One way of supporting a guard page below kstack: allocate kstacks in

procinit() and map them high up (below TRAMPOLNE) with an empty
mapping below each stack.  Never free a kernel stack.

Another way would be to allocate and map them dynamically, but then we
need to reload page table when switching processes in scheduler()
and/or have a kernel pagetable per proc (if we want k->stack to be the
same virtual address in each process).

One gotcha: kernel addresses are not equal to physical addresses for
stack addresses.  A stack address must be translated if we need its
physical address (e.g., virtio passes a stack address to the disk).
This commit is contained in:
Frans Kaashoek 2019-07-19 08:38:51 -04:00
parent b924e44f06
commit 9156632701
5 changed files with 42 additions and 14 deletions

View file

@ -193,6 +193,8 @@ uint64 walkaddr(pagetable_t, uint64);
int copyout(pagetable_t, uint64, char *, uint64); int copyout(pagetable_t, uint64, char *, uint64);
int copyin(pagetable_t, char *, uint64, uint64); int copyin(pagetable_t, char *, uint64, uint64);
int copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max); int copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max);
char* map_kstack();
uint64 kernelpa(uint64);
// plic.c // plic.c
void plicinit(void); void plicinit(void);

View file

@ -53,3 +53,4 @@
// map the trampoline page to the highest address, // map the trampoline page to the highest address,
// in both user and kernel space. // in both user and kernel space.
#define TRAMPOLINE (MAXVA - PGSIZE) #define TRAMPOLINE (MAXVA - PGSIZE)
#define KSTACK(p) ((TRAMPOLINE-PGSIZE)-p*2*PGSIZE)

View file

@ -26,8 +26,14 @@ procinit(void)
struct proc *p; struct proc *p;
initlock(&pid_lock, "nextpid"); initlock(&pid_lock, "nextpid");
for(p = proc; p < &proc[NPROC]; p++) for(p = proc; p < &proc[NPROC]; p++) {
initlock(&p->lock, "proc"); initlock(&p->lock, "proc");
// Allocate a page for the kernel stack.
char *kstack = (char *) KSTACK((int) (p - proc));
if((p->kstack = map_kstack(kstack)) == 0) {
panic("procinit");
}
}
} }
// Must be called with interrupts disabled, // Must be called with interrupts disabled,
@ -94,16 +100,8 @@ allocproc(void)
found: found:
p->pid = allocpid(); p->pid = allocpid();
// Allocate a page for the kernel stack.
if((p->kstack = kalloc()) == 0){
release(&p->lock);
return 0;
}
// Allocate a trapframe page. // Allocate a trapframe page.
if((p->tf = (struct trapframe *)kalloc()) == 0){ if((p->tf = (struct trapframe *)kalloc()) == 0){
kfree(p->kstack);
p->kstack = 0;
release(&p->lock); release(&p->lock);
return 0; return 0;
} }
@ -126,9 +124,6 @@ found:
static void static void
freeproc(struct proc *p) freeproc(struct proc *p)
{ {
if(p->kstack)
kfree(p->kstack);
p->kstack = 0;
if(p->tf) if(p->tf)
kfree((void*)p->tf); kfree((void*)p->tf);
p->tf = 0; p->tf = 0;
@ -651,4 +646,3 @@ procdump(void)
printf("\n"); printf("\n");
} }
} }

View file

@ -199,7 +199,7 @@ virtio_disk_rw(struct buf *b)
buf0.reserved = 0; buf0.reserved = 0;
buf0.sector = sector; buf0.sector = sector;
desc[idx[0]].addr = (uint64) &buf0; desc[idx[0]].addr = (uint64) kernelpa((uint64) &buf0);
desc[idx[0]].len = sizeof(buf0); desc[idx[0]].len = sizeof(buf0);
desc[idx[0]].flags = VRING_DESC_F_NEXT; desc[idx[0]].flags = VRING_DESC_F_NEXT;
desc[idx[0]].next = idx[1]; desc[idx[0]].next = idx[1];

View file

@ -404,3 +404,34 @@ copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
return -1; return -1;
} }
} }
char *map_kstack(uint64 kstack)
{
char *k = kalloc();
if(k == 0) {
return 0;
}
if (mappages(kernel_pagetable, (uint64) kstack, PGSIZE,
(uint64) k, PTE_R | PTE_W) == 0) {
kvminithart();
return (char *) kstack;
}
kfree(k);
return 0;
}
// assumes va is page aligned
uint64
kernelpa(uint64 va) {
uint64 off = va % PGSIZE;
pte_t *pte;
uint64 pa;
pte = walk(kernel_pagetable, va, 0);
if(pte == 0)
panic("kernelpa");
if((*pte & PTE_V) == 0)
panic("kernelpa");
pa = PTE2PA(*pte);
return pa+off;
}