Nucleus
Barry Setup copy-on-write for cloned page directories 5611731 (3 years, 3 months ago)
/*
* This file contains all the functions used to manipulate the virtual address
* spaces. It controls all of the system's paging and virtual memory from a low
* level perspective. On initialisation, a page directory is allocated for the
* kernel that identity maps everything.
*/
#include <stdint.h>
#include <string.h>
#include <nucleus/memory.h>
#include "paging.h"
void copy_page_frame(uintptr_t src, uintptr_t dest);
page_t zeroFrame;
static page_dir_t kernelDir;
/* Switch page directory */
static void
switch_dir(page_dir_t dir)
{
asm volatile("mov %0, %%cr3" :: "r" (dir));
}
/* Get a page mapping */
page_t
get_page(uintptr_t vaddr)
{
page_t *mappings = (void *) 0xFFC00000;
page_table_t *tables = (void *) 0xFFFFF000;
uintptr_t address = vaddr >> 12;
uint32_t tbl = address / 1024;
if (!(tables[tbl] & PDE_PRESENT))
return 0x00000000;
return mappings[address];
}
/* Set a page mapping */
void
set_page(uintptr_t vaddr, page_t page)
{
page_t *mappings = (void *) 0xFFC00000;
page_table_t *tables = (void *) 0xFFFFF000;
uintptr_t address = vaddr >> 12;
uint32_t tbl = address / 1024;
/* Create table if not present */
if (!(tables[tbl] & PDE_PRESENT)) {
tables[tbl] = alloc_frame() | PDE_PRESENT | PDE_WRITE;
memset(mappings + (tbl * 1024), 0, PAGE_SIZE);
}
mappings[address] = page;
}
/* Clone an entire page directory */
page_dir_t
clone_dir(void)
{
page_table_t *oldTables = (void *) 0xFFFFF000;
page_table_t *newTables = (void *) 0xFFFFE000;
page_t *oldTable, *newTable;
page_dir_t dir = alloc_frame();
uint16_t tbl, pg;
/* Temporarily link new paging structures into current directory */
page_table_t restore = oldTables[1022];
oldTables[1022] = dir | PDE_PRESENT | PDE_WRITE;
flush_tlb((uintptr_t) newTables);
/* Iterate tables */
for (tbl = 0; tbl < 1022; tbl++) {
if (!(oldTables[tbl] & PDE_PRESENT))
continue;
/* Link kernel tables */
if (tbl < 2 || tbl >= 1008) {
newTables[tbl] = oldTables[tbl];
continue;
}
/* Copy everything else */
newTables[tbl] = alloc_frame() | PAGE_ATTR(oldTables[tbl]);
oldTable = (page_t *) 0xFFC00000 + (tbl * 1024);
newTable = (page_t *) 0xFF800000 + (tbl * 1024);
flush_tlb((uintptr_t) newTable);
for (pg = 0; pg < 1024; pg++) {
if (!(oldTable[pg] & PTE_PRESENT)) {
newTable[pg] = 0;
continue;
}
/* Link the pages for Copy-On-Write */
if (tbl < 960) { /* 0xF0000000 */
oldTable[pg] &= ~PTE_WRITE;
flush_tlb(((tbl * 1024) + pg) << 12);
newTable[pg] = oldTable[pg];
} else {
/* Copy the kernel stack area immediately */
newTable[pg] = alloc_frame()
| PAGE_ATTR(oldTable[pg]);
flush_tlb(((tbl * 1024) + pg) << 12);
copy_page_frame(PAGE_ADDR(oldTable[pg]),
PAGE_ADDR(newTable[pg]));
}
}
}
newTables[1023] = oldTables[1022];
/* Unlink paging structures from current directory */
oldTables[1022] = restore;
flush_tlb((uintptr_t) newTables);
return dir;
}
/* Initialise paging */
void
init_paging(void)
{
uint16_t tbl, pg;
kernelDir = alloc_frame();
page_table_t *kernelTables = (page_table_t *) kernelDir;
page_t *table;
for (tbl = 0; tbl < 1024; tbl++)
kernelTables[tbl] = 0x00000000 | PDE_WRITE;
for (tbl = 0; tbl < 2; tbl++) {
table = (page_t *) alloc_frame();
kernelTables[tbl] = ((page_table_t) table)
| PDE_WRITE | PDE_PRESENT;
for (pg = 0; pg < 1024; pg++) {
/* Skip bottom page - catches NULL dereferences */
if (!tbl && !pg)
continue;
table[pg] = (((tbl * 1024) + pg) << 12)
| PTE_WRITE | PTE_PRESENT | PTE_GLOBAL;
}
}
/* Map the directory into itself */
kernelTables[1023] = kernelDir | PDE_WRITE | PDE_PRESENT;
/*
* By mapping the page directory as the last page table, the page
* directory entries are read as page table entries, and the page
* table entries become the pages. This means that each page contains
* the contents of a page table, and the region in memory represented by
* the last page table contains a contiguous list of all pages in
* memory. The very last page contains the contents of the page
* directory itself. This means that each virtual address space
* contains it's own paging structures.
*/
/* Use kernel directory */
register_exception(14, early_page_fault_handler);
cpu_load_paging();
/* Allocate a kernel stack */
uintptr_t stk;
for (stk = 0xF0400000; stk < 0xF0800000; stk += PAGE_SIZE)
set_page(stk, alloc_frame() | PTE_PRESENT | PTE_WRITE);
zeroFrame = alloc_frame();
}
/* Enable paging on the current CPU */
void
cpu_load_paging(void)
{
switch_dir(kernelDir);
asm volatile (
"movl %%cr0, %%eax;"
"orl $0x80000000, %%eax;"
"movl %%eax, %%cr0"
::: "%eax"
);
}