/* * This file deals with the Global Descriptor Table. It creates a simple GDT, * suitable for use in the kernel, and attaches a TSS to the end. The TSS, * which can be used to switch from ring 3 to ring 0, allows processes to access * all IO ports. Every logical processor requires a unique TSS, and each one is * also given a unique GDT, to save space, since the number of logical * processors is not known until load time. */ #include #include #include #include #include #include #include "desc.h" /* GDT Entry Indicies */ enum GDTIndex { GDT_NULL, GDT_KERN_CODE, GDT_KERN_DATA, GDT_USER_CODE, GDT_USER_DATA, GDT_FS, GDT_GS, GDT_TSS, }; /* Structure for a GDT Entry */ static struct GDTEntry { uint16_t limitLower, baseLower; uint8_t baseMiddle, access, gran, baseHigher; } __attribute__((packed)) *gdt[MAX_CPUS]; /* Per CPU */ #define GDT_OFFSET(i) ((i) * sizeof(struct GDTEntry)) /* Structure for a TSS Entry */ static struct TSSEntry { uint32_t prevTSS; uint32_t esp0, ss0; uint32_t esp1, ss1; uint32_t esp2, ss2; uint32_t cr3, eip, eflags; uint32_t eax, ecx, edx, ebx; uint32_t esp, ebp, esi, edi; uint32_t es, cs, ss, ds, fs, gs; uint32_t ldt; uint16_t trap, iomapBase; } __attribute__((packed)) *tss[MAX_CPUS]; /* Per CPU */ /* Set a gate of the GDT */ static void gdt_set_gate(uint8_t num, uint32_t base, uint32_t limit, uint8_t access, uint8_t gran) { gdt[CPUID][num].baseLower = (base & 0xFFFF); gdt[CPUID][num].baseMiddle = (base >> 16) & 0xFF; gdt[CPUID][num].baseHigher = (base >> 24) & 0xFF; gdt[CPUID][num].limitLower = (limit & 0xFFFF); gdt[CPUID][num].gran = (limit >> 16) & 0x0F; gdt[CPUID][num].gran |= gran & 0xF0; gdt[CPUID][num].access = access; } /* Load the GDT */ void cpu_load_gdt(void) { /* * Each page can fit multiple GDT/TSS structures. If there is space in * an existing page, the new structures should be allocated in there. * Otherwise, a new page frame should be allocated. */ size_t size = (sizeof(struct GDTEntry) * (GDT_TSS + 1)) + sizeof(struct TSSEntry); off_t idx = CPUID % (PAGE_SIZE / size); off_t block = (CPUID / (PAGE_SIZE / size)) * (PAGE_SIZE / size); if (idx == 0) gdt[CPUID] = (void *) alloc_frame(); else gdt[CPUID] = (void *) gdt[block] + (size * idx); memset(gdt[CPUID], 0, size); /* Create gate entries */ gdt_set_gate(GDT_NULL, 0x00000000, 0x00000000, 0x00, 0x00); gdt_set_gate(GDT_KERN_CODE, 0x00000000, 0xFFFFFFFF, 0x9A, 0xCF); gdt_set_gate(GDT_KERN_DATA, 0x00000000, 0xFFFFFFFF, 0x92, 0xCF); gdt_set_gate(GDT_USER_CODE, 0x00000000, 0xFFFFFFFF, 0xFA, 0xCF); gdt_set_gate(GDT_USER_DATA, 0x00000000, 0xFFFFFFFF, 0xF2, 0xCF); gdt_set_gate(GDT_FS, 0x00000000, 0xFFFFFFFF, 0xF2, 0xCF); gdt_set_gate(GDT_GS, 0x00000000, 0xFFFFFFFF, 0xF2, 0xCF); /* Create TSS entry */ uint32_t addr = (uint32_t) (gdt[CPUID] + (GDT_TSS + 1)); gdt_set_gate(GDT_TSS, addr, sizeof(struct TSSEntry) - 1, 0xE9, 0); /* TSS */ tss[CPUID] = (void *) addr; tss[CPUID]->ss0 = 0x10; tss[CPUID]->cs = GDT_OFFSET(GDT_KERN_CODE) | 0; tss[CPUID]->ds = GDT_OFFSET(GDT_KERN_DATA) | 0; tss[CPUID]->es = tss[CPUID]->ss = tss[CPUID]->ds; tss[CPUID]->fs = tss[CPUID]->gs = GDT_OFFSET(GDT_FS) | 3; tss[CPUID]->iomapBase = sizeof(struct TSSEntry); /* Load table */ struct DescRecord ptr = { .limit = GDT_OFFSET(GDT_TSS + 1), .base = (uintptr_t) gdt[CPUID], }; asm volatile("lgdt %0" :: "m" (ptr)); asm volatile("ltr %w0" :: "q" (GDT_OFFSET(GDT_TSS) | 3)); } /* Set FS base */ void set_fs_base(uintptr_t base) { struct GDTEntry *fs = gdt[CPUID] + GDT_FS; fs->baseLower = (base & 0xFFFF); fs->baseMiddle = (base >> 16) & 0xFF; fs->baseHigher = (base >> 24) & 0xFF; asm volatile("mov %0, %%fs" :: "r" (GDT_OFFSET(GDT_FS) | 3)); } /* Set FS base */ void set_gs_base(uintptr_t base) { struct GDTEntry *gs = gdt[CPUID] + GDT_GS; gs->baseLower = (base & 0xFFFF); gs->baseMiddle = (base >> 16) & 0xFF; gs->baseHigher = (base >> 24) & 0xFF; asm volatile("mov %0, %%gs" :: "r" (GDT_OFFSET(GDT_GS) | 3)); } /* Set the used kernel stack */ void set_kernel_stack(uintptr_t top) { ASSERT((top & 0xF) == 0); tss[cpu->id]->esp0 = top; }