BarryServer : Git

All the code for all my projects
// BarryServer : Git / Orion / blob / master / mem / pagefault.c

// Related

Orion

Barry Moving signal handlers into separate namespace 7ae31b0 (2 years, 4 months ago)
/*
 * This is the page fault handler.  It handles/dispatches all handlers for page
 * faults.  This includes various tasking functions.
 */

#include <stdint.h>
#include <signal.h>
#include "paging.h"
#include "../vfs/cache.h"
#include "../vfs/inode.h"
#include "../vfs/tmpfs/fs.h"
#include "../mem/heap.h"
#include "../mem/mem.h"
#include "../task/task.h"
#include "../proc/proc.h"
#include "../screen.h"

extern size_t numFrames, usedFrames;

void copy_page_frame(void *src, void *dest);

/* Copy-On-Write */
static void
copy_on_write(VMRegion *region, uintptr_t addr)
{
	Page *page = NULL;
	File *front = region->front,
	     *back  = region->back;
	off_t offset = ((addr & ~0xFFF) - region->start) + region->offset;
	page_t *pg = get_page((void *) addr);

	/* Create front if it doesn't exist and is needed */
	uint8_t private = region->flags & MAP_PRIVATE;
	uint8_t sharedanon = (region->flags & MAP_SHARED) &&
	                     (region->flags & MAP_ANONYMOUS);
	if (!front && (private || sharedanon)) {
		/*
		 * A private mapping will always write to the front.  A shared
		 * mapping will write to the back.  If a shared mapping is
		 * anonymous, then the back is the front.  The front must be
		 * created if it is required - which means if the mapping is
		 * private, or if the mapping is shared & anonymous.
		 */
		front = kmalloc(sizeof(File));
		front->inode = inode_get(kmalloc(sizeof(Inode)));
		front->ops = &tmpfsFileOps;
		region->front = file_get(front);
	}

	/* Find original page frame */
	Inode *inode;
	if (!page && front) {
		inode = front->inode;
		ASSERT(inode);
		page = page_find(inode, offset);
	}
	if (!page && back) {
		inode = back->inode;
		ASSERT(inode);
		page = page_find(inode, offset);
	}
	ASSERT(page);

	/* Copy already happened, just link */
	if (page->usage == 1 && page->frame != zeroFrame) {
		*pg |= PTE_WRITE;
		return;
	}
	/* Put that page, and create a new one */
	*pg = 0;
	alloc_page(pg, PTE_PRESENT | PTE_USER | PTE_WRITE, -1);
	copy_page_frame((void *) PG_ADDR(page->frame),
	                (void *) PG_ADDR(*pg));
	page_remove(inode, page);
	page = page_create(front->inode, PG_ADDR(*pg), offset);
}

/* Handle a not-present read page fault */
static void
not_present_read(VMRegion *region, uintptr_t addr)
{
	Page *page;
	File *front = region->front,
	     *back  = region->back;
	off_t offset = ((addr & ~0xFFF) - region->start) + region->offset;
	page_t *pg = get_page((void *) addr);

	/* Handle uninitialised anonymous regions */
	if (!front && (region->flags & MAP_ANONYMOUS)) {
		front = kmalloc(sizeof(File));
		front->inode = inode_get(kmalloc(sizeof(Inode)));
		front->ops = &tmpfsFileOps;
		region->front = file_get(front);
	}

	/* Attempt to use front */
	if (front) {
		page = page_find(front->inode, offset);
		if (page) {
			page_get(page);
			alloc_page(pg, PTE_PRESENT | PTE_USER, page->frame);
			return;
		}
		if (region->flags & MAP_ANONYMOUS) {
			/* Must be anonymous, zero-fill */
			alloc_page(pg, PTE_PRESENT | PTE_USER, zeroFrame);
			page_create(front->inode, zeroFrame, offset);
			return;
		}
	}

	/* Use back */
	ASSERT(back);
	page = page_find(back->inode, offset);
	if (page) {
		page_get(page);
		alloc_page(pg, PTE_PRESENT | PTE_USER, page->frame);
		return;
	}
	/* Create new block cache entry */
	alloc_page(pg, PTE_PRESENT | PTE_USER, -1);
	file_mmap(back, (void *) PG_ADDR(addr), 0x1000, offset);
	page_create(back->inode, PG_ADDR(*pg), offset);
}

/* Handle a not-present write page fault */
static void
not_present_write(VMRegion *region, uintptr_t addr)
{
	Page *page = NULL;
	File *front = region->front,
	     *back  = region->back;
	off_t offset = ((addr & ~0xFFF) - region->start) + region->offset;
	page_t *pg = get_page((void *) addr);

	/* Handle uninitialised anonymous regions */
	if (!front && ((region->flags & MAP_PRIVATE)
	 || (region->flags & MAP_ANONYMOUS))) {
		/*
		 * This applies to all private regions, anonymous or not.
		 * Unless the region is shared, the process should write to the
		 * front, which will be the private copy.  If the region is
		 * shared, and also anonymous, then the write will occur to the
		 * front too.
		 */
		front = kmalloc(sizeof(File));
		front->inode = inode_get(kmalloc(sizeof(Inode)));
		front->ops = &tmpfsFileOps;
		region->front = file_get(front);
	}

	/* Shared region, write-through to back */
	if (region->flags & MAP_SHARED) {
		if (region->flags & MAP_ANONYMOUS)
			back = front;
		ASSERT(back);
		page = page_find(back->inode, offset);
		if (page) {
			page_get(page);
			alloc_page(pg, PTE_PRESENT | PTE_USER | PTE_WRITE,
			           page->frame);
			return;
		}
		*pg = 0;
		alloc_page(pg, PTE_PRESENT | PTE_USER | PTE_WRITE, -1);
		memset((void *) PG_ADDR(addr), 0, 0x1000);
		page_create(back->inode, PG_ADDR(*pg), offset);
		return;
	}

	/* Private region, copy to front */
	alloc_page(pg, PTE_PRESENT | PTE_USER | PTE_WRITE, -1);
	if (front)
		page = page_find(front->inode, offset);
	if (page) {
		copy_page_frame((void *) PG_ADDR(page->frame),
		                (void *) PG_ADDR(*pg));
		page_remove(front->inode, page);
		page_create(front->inode, PG_ADDR(*pg), offset);
		return;
	}

	/* Anonymous region, zero-fill */
	if (region->flags & MAP_ANONYMOUS) {
		memset((void *) PG_ADDR(addr), 0, 0x1000);
		page_create(front->inode, PG_ADDR(*pg), offset);
		return;
	}

	/* Use back */
	ASSERT(back);
	page = page_find(back->inode, offset);
	if (page) {
		copy_page_frame((void *) PG_ADDR(page->frame),
		                (void *) PG_ADDR(*pg));
		page_remove(back->inode, page);
	} else {
		file_mmap(back, (void *) PG_ADDR(addr), 0x1000, offset);
	}
	page_create(front->inode, PG_ADDR(*pg), offset);
}

/* Page fault handler */
void
page_fault_handler(InterruptFrame *frame)
{
	uintptr_t addr;
	asm volatile("mov %%cr2, %0" : "=r" (addr));
	uint8_t present = frame->errCode & (1 << 0);
	uint8_t write   = frame->errCode & (1 << 1);
	uint8_t user    = frame->errCode & (1 << 2);

	/* Iterate VM Regions */
	VMRegion *region;
	for (region = current->vm->regions; region; region = region->next) {
		if (region->start <= addr && region->end > addr)
			break;
	}
	if (!region && current->stack) {
		region = current->stack;
		if (region->start > addr || region->end <= addr)
			region = NULL;
	}
	if (!region && current->tls) {
		region = current->tls;
		if (region->start > addr || region->end <= addr)
			region = NULL;
	}
	/* Not in a region */
	if (!region) {
		page_t *pg = get_page((void *) addr);
		panic("Page Fault [%d:%d] (%#.8x -> %#.8x [tbl:%d, pg:%d][%#.8x]), %s, %s, %s",
		      current->tgid, current->tid, frame->eip,
		      addr, (addr>>12) / 1024, (addr>>12) % 1024, *pg,
		      present ? "present" : "not present",
		      write ? "write" : "read",
		      user ? "user" : "kernel");
	}

	if (user && write && !(region->prot & PROT_WRITE))
		return (void) kill(current->tgid, SIGSEGV);

	if (present && write)
		return copy_on_write(region, addr);

	if (write)
		return not_present_write(region, addr);
	else
		return not_present_read(region, addr);
}

/* Early (pre-VFS/tasking) page fault handler */
void
early_page_fault_handler(InterruptFrame *frame)
{
	uintptr_t addr;
	asm volatile("mov %%cr2, %0" : "=r" (addr));
	if (!PG_ADDR(addr))
		panic("Null dereference @ %#.8x", frame->eip);
	alloc_page(get_page((void *) addr),
	           PTE_PRESENT | PTE_WRITE | PTE_GLOBAL, -1);
}