simplify kernel mapping calls

This commit is contained in:
Robert Morris 2019-07-23 12:17:17 -04:00
parent 55bc96d419
commit 54178ad94d
8 changed files with 53 additions and 45 deletions

View file

@ -193,7 +193,7 @@ uint64 walkaddr(pagetable_t, uint64);
int copyout(pagetable_t, uint64, char *, uint64); int copyout(pagetable_t, uint64, char *, uint64);
int copyin(pagetable_t, char *, uint64, uint64); int copyin(pagetable_t, char *, uint64, uint64);
int copyinstr(pagetable_t, char *, uint64, uint64); int copyinstr(pagetable_t, char *, uint64, uint64);
char* mapkstack(uint64); void kmap(uint64, uint64, uint64, int);
uint64 kernelpa(uint64); uint64 kernelpa(uint64);
void clearpteu(pagetable_t, uint64); void clearpteu(pagetable_t, uint64);

View file

@ -53,6 +53,9 @@
// map the trampoline page to the highest address, // map the trampoline page to the highest address,
// in both user and kernel space. // in both user and kernel space.
#define TRAMPOLINE (MAXVA - PGSIZE) #define TRAMPOLINE (MAXVA - PGSIZE)
// map kernel stacks beneath the trampoline,
// each surrounded by invalid guard pages.
#define KSTACK(p) (TRAMPOLINE - ((p)+1)* 2*PGSIZE) #define KSTACK(p) (TRAMPOLINE - ((p)+1)* 2*PGSIZE)
// User memory layout. // User memory layout.

View file

@ -28,12 +28,18 @@ procinit(void)
initlock(&pid_lock, "nextpid"); initlock(&pid_lock, "nextpid");
for(p = proc; p < &proc[NPROC]; p++) { for(p = proc; p < &proc[NPROC]; p++) {
initlock(&p->lock, "proc"); initlock(&p->lock, "proc");
// Allocate a page for the kernel stack.
uint64 kstack = KSTACK((int) (p - proc)); // Allocate a page for the process's kernel stack.
if((p->kstack = mapkstack(kstack)) == 0) { // Map it high in memory, followed by an invalid
panic("procinit"); // guard page.
} char *pa = kalloc();
if(pa == 0)
panic("kalloc");
uint64 va = KSTACK((int) (p - proc));
kmap(va, (uint64)pa, PGSIZE, PTE_R | PTE_W);
p->kstack = va;
} }
kvminithart();
} }
// Must be called with interrupts disabled, // Must be called with interrupts disabled,
@ -113,7 +119,7 @@ found:
// which returns to user space. // which returns to user space.
memset(&p->context, 0, sizeof p->context); memset(&p->context, 0, sizeof p->context);
p->context.ra = (uint64)forkret; p->context.ra = (uint64)forkret;
p->context.sp = (uint64)p->kstack + PGSIZE; p->context.sp = p->kstack + PGSIZE;
return p; return p;
} }

View file

@ -96,7 +96,7 @@ struct proc {
int pid; // Process ID int pid; // Process ID
// these are private to the process, so p->lock need not be held. // these are private to the process, so p->lock need not be held.
char *kstack; // Bottom of kernel stack for this process uint64 kstack; // Bottom of kernel stack for this process
uint64 sz; // Size of process memory (bytes) uint64 sz; // Size of process memory (bytes)
pagetable_t pagetable; // Page table pagetable_t pagetable; // Page table
struct trapframe *tf; // data page for trampoline.S struct trapframe *tf; // data page for trampoline.S

View file

@ -101,7 +101,7 @@ usertrapret(void)
// set up values that trampoline.S will need when // set up values that trampoline.S will need when
// the process next re-enters the kernel. // the process next re-enters the kernel.
p->tf->kernel_satp = r_satp(); p->tf->kernel_satp = r_satp();
p->tf->kernel_sp = (uint64)p->kstack + PGSIZE; p->tf->kernel_sp = p->kstack + PGSIZE;
p->tf->kernel_trap = (uint64)usertrap; p->tf->kernel_trap = (uint64)usertrap;
p->tf->kernel_hartid = r_tp(); p->tf->kernel_hartid = r_tp();

View file

@ -199,6 +199,8 @@ virtio_disk_rw(struct buf *b)
buf0.reserved = 0; buf0.reserved = 0;
buf0.sector = sector; buf0.sector = sector;
// buf0 is on a kernel stack, which is not direct mapped,
// thus the call to kernelpa().
desc[idx[0]].addr = (uint64) kernelpa((uint64) &buf0); desc[idx[0]].addr = (uint64) kernelpa((uint64) &buf0);
desc[idx[0]].len = sizeof(buf0); desc[idx[0]].len = sizeof(buf0);
desc[idx[0]].flags = VRING_DESC_F_NEXT; desc[idx[0]].flags = VRING_DESC_F_NEXT;

View file

@ -27,33 +27,26 @@ kvminit()
memset(kernel_pagetable, 0, PGSIZE); memset(kernel_pagetable, 0, PGSIZE);
// uart registers // uart registers
mappages(kernel_pagetable, UART0, PGSIZE, kmap(UART0, UART0, PGSIZE, PTE_R | PTE_W);
UART0, PTE_R | PTE_W);
// virtio mmio disk interface // virtio mmio disk interface
mappages(kernel_pagetable, VIRTIO0, PGSIZE, kmap(VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
VIRTIO0, PTE_R | PTE_W);
// CLINT // CLINT
mappages(kernel_pagetable, CLINT, 0x10000, kmap(CLINT, CLINT, 0x10000, PTE_R | PTE_W);
CLINT, PTE_R | PTE_W);
// PLIC // PLIC
mappages(kernel_pagetable, PLIC, 0x4000000, kmap(PLIC, PLIC, 0x400000, PTE_R | PTE_W);
PLIC, PTE_R | PTE_W);
// map kernel text executable and read-only. // map kernel text executable and read-only.
mappages(kernel_pagetable, KERNBASE, (uint64)etext-KERNBASE, kmap(KERNBASE, KERNBASE, (uint64)etext-KERNBASE, PTE_R | PTE_X);
KERNBASE, PTE_R | PTE_X);
// map kernel data and the physical RAM we'll make use of. // map kernel data and the physical RAM we'll make use of.
mappages(kernel_pagetable, (uint64)etext, PHYSTOP-(uint64)etext, kmap((uint64)etext, (uint64)etext, PHYSTOP-(uint64)etext, PTE_R | PTE_W);
(uint64)etext, PTE_R | PTE_W);
// map the trampoline for trap entry/exit to // map the trampoline for trap entry/exit to
// the highest virtual address in the kernel. // the highest virtual address in the kernel.
mappages(kernel_pagetable, TRAMPOLINE, PGSIZE, kmap(TRAMPOLINE, (uint64)trampout, PGSIZE, PTE_R | PTE_X);
(uint64)trampout, PTE_R | PTE_X);
} }
// Switch h/w page table register to the kernel's page table, // Switch h/w page table register to the kernel's page table,
@ -117,6 +110,15 @@ walkaddr(pagetable_t pagetable, uint64 va)
return pa; return pa;
} }
// add a mapping to the kernel page table.
// only used when booting.
// does not flush TLB or enable paging.
void
kmap(uint64 va, uint64 pa, uint64 sz, int perm)
{
if(mappages(kernel_pagetable, va, sz, pa, perm) != 0)
panic("kmap");
}
// Create PTEs for virtual addresses starting at va that refer to // Create PTEs for virtual addresses starting at va that refer to
// physical addresses starting at pa. va and size might not // physical addresses starting at pa. va and size might not
@ -405,25 +407,13 @@ copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
} }
} }
char * // translate a kernel virtual address to
mapkstack(uint64 kstack) // a physical address. only needed for
{ // addresses on the stack.
char *k = kalloc(); // assumes va is page aligned.
if(k == 0) {
return 0;
}
if (mappages(kernel_pagetable, kstack, PGSIZE,
(uint64) k, PTE_R | PTE_W) == 0) {
kvminithart();
return (char *) kstack;
}
kfree(k);
return 0;
}
// assumes va is page aligned
uint64 uint64
kernelpa(uint64 va) { kernelpa(uint64 va)
{
uint64 off = va % PGSIZE; uint64 off = va % PGSIZE;
pte_t *pte; pte_t *pte;
uint64 pa; uint64 pa;
@ -437,8 +427,11 @@ kernelpa(uint64 va) {
return pa+off; return pa+off;
} }
// mark a PTE invalid for user access.
// used by exec for the user stack guard page.
void void
clearpteu(pagetable_t pagetable, uint64 va) { clearpteu(pagetable_t pagetable, uint64 va)
{
pte_t *pte; pte_t *pte;
pte = walk(pagetable, va, 0); pte = walk(pagetable, va, 0);

View file

@ -1884,26 +1884,30 @@ rand()
return randstate; return randstate;
} }
// check that there's an invalid page beneath
// the user stack, to catch stack overflow.
void void
stacktest() stacktest()
{ {
int pid; int pid;
int ppid = getpid();
printf(1, "stack test\n"); printf(1, "stack guard test\n");
pid = fork(); pid = fork();
if(pid == 0) { if(pid == 0) {
char *sp = (char *) r_sp(); char *sp = (char *) r_sp();
printf(1, "%p\n", sp);
sp -= 4096; sp -= 4096;
// the *sp should cause a trap.
printf(1, "stacktest: read below stack %p\n", *sp); printf(1, "stacktest: read below stack %p\n", *sp);
printf(1, "stacktest: test FAILED\n"); printf(1, "stacktest: test FAILED\n");
kill(ppid);
exit(); exit();
} else if(pid < 0){ } else if(pid < 0){
printf (1, "fork failed\n"); printf (1, "fork failed\n");
exit(); exit();
} }
wait(); wait();
printf(1, "stack test done\n"); printf(1, "stack guard test ok\n");
} }
int int