xv6-65oo2/kernel/vm.c
Frans Kaashoek 9156632701 One way of supporting a guard page below kstack: allocate kstacks in
procinit() and map them high up (below TRAMPOLNE) with an empty
mapping below each stack.  Never free a kernel stack.

Another way would be to allocate and map them dynamically, but then we
need to reload page table when switching processes in scheduler()
and/or have a kernel pagetable per proc (if we want k->stack to be the
same virtual address in each process).

One gotcha: kernel addresses are not equal to physical addresses for
stack addresses.  A stack address must be translated if we need its
physical address (e.g., virtio passes a stack address to the disk).
2019-07-19 08:38:51 -04:00

438 lines
9.9 KiB
C

#include "param.h"
#include "types.h"
#include "memlayout.h"
#include "elf.h"
#include "riscv.h"
#include "defs.h"
#include "fs.h"
/*
* the kernel's page table.
*/
pagetable_t kernel_pagetable;
extern char etext[]; // kernel.ld sets this to end of kernel code.
extern char trampout[]; // trampoline.S
/*
* create a direct-map page table for the kernel and
* turn on paging. called early, in supervisor mode.
* the page allocator is already initialized.
*/
void
kvminit()
{
kernel_pagetable = (pagetable_t) kalloc();
memset(kernel_pagetable, 0, PGSIZE);
// uart registers
mappages(kernel_pagetable, UART0, PGSIZE,
UART0, PTE_R | PTE_W);
// virtio mmio disk interface
mappages(kernel_pagetable, VIRTIO0, PGSIZE,
VIRTIO0, PTE_R | PTE_W);
// CLINT
mappages(kernel_pagetable, CLINT, 0x10000,
CLINT, PTE_R | PTE_W);
// PLIC
mappages(kernel_pagetable, PLIC, 0x4000000,
PLIC, PTE_R | PTE_W);
// map kernel text executable and read-only.
mappages(kernel_pagetable, KERNBASE, (uint64)etext-KERNBASE,
KERNBASE, PTE_R | PTE_X);
// map kernel data and the physical RAM we'll make use of.
mappages(kernel_pagetable, (uint64)etext, PHYSTOP-(uint64)etext,
(uint64)etext, PTE_R | PTE_W);
// map the trampoline for trap entry/exit to
// the highest virtual address in the kernel.
mappages(kernel_pagetable, TRAMPOLINE, PGSIZE,
(uint64)trampout, PTE_R | PTE_X);
}
// Switch h/w page table register to the kernel's page table,
// and enable paging.
void
kvminithart()
{
sfence_vma();
w_satp(MAKE_SATP(kernel_pagetable));
}
// Return the address of the PTE in page table pagetable
// that corresponds to virtual address va. If alloc!=0,
// create any required page table pages.
//
// The risc-v Sv39 scheme has three levels of page table
// pages. A page table page contains 512 64-bit PTEs.
// A 64-bit virtual address is split into five fields:
// 39..63 -- must be zero.
// 30..38 -- 9 bits of level-2 index.
// 21..39 -- 9 bits of level-1 index.
// 12..20 -- 9 bits of level-0 index.
// 0..12 -- 12 bits of byte offset within the page.
static pte_t *
walk(pagetable_t pagetable, uint64 va, int alloc)
{
if(va >= MAXVA)
panic("walk");
for(int level = 2; level > 0; level--) {
pte_t *pte = &pagetable[PX(level, va)];
if(*pte & PTE_V) {
pagetable = (pagetable_t)PTE2PA(*pte);
} else {
if(!alloc || (pagetable = (pde_t*)kalloc()) == 0)
return 0;
memset(pagetable, 0, PGSIZE);
*pte = PA2PTE(pagetable) | PTE_V;
}
}
return &pagetable[PX(0, va)];
}
// Look up a virtual address, return the physical address,
// or 0 if not mapped.
// Can only be used to look up user pages.
uint64
walkaddr(pagetable_t pagetable, uint64 va)
{
pte_t *pte;
uint64 pa;
pte = walk(pagetable, va, 0);
if(pte == 0)
return 0;
if((*pte & PTE_V) == 0)
return 0;
if((*pte & PTE_U) == 0)
return 0;
pa = PTE2PA(*pte);
return pa;
}
// Create PTEs for virtual addresses starting at va that refer to
// physical addresses starting at pa. va and size might not
// be page-aligned. Returns 0 on success, -1 if walk() couldn't
// allocate a needed page-table page.
int
mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
{
uint64 a, last;
pte_t *pte;
a = PGROUNDDOWN(va);
last = PGROUNDDOWN(va + size - 1);
for(;;){
if((pte = walk(pagetable, a, 1)) == 0)
return -1;
if(*pte & PTE_V)
panic("remap");
*pte = PA2PTE(pa) | perm | PTE_V;
if(a == last)
break;
a += PGSIZE;
pa += PGSIZE;
}
return 0;
}
// Remove mappings from a page table. The mappings in
// the given range must exist. Optionally free the
// physical memory.
void
unmappages(pagetable_t pagetable, uint64 va, uint64 size, int do_free)
{
uint64 a, last;
pte_t *pte;
uint64 pa;
a = PGROUNDDOWN(va);
last = PGROUNDDOWN(va + size - 1);
for(;;){
if((pte = walk(pagetable, a, 0)) == 0)
panic("unmappages: walk");
if((*pte & PTE_V) == 0){
printf("va=%p pte=%p\n", a, *pte);
panic("unmappages: not mapped");
}
if(PTE_FLAGS(*pte) == PTE_V)
panic("unmappages: not a leaf");
if(do_free){
pa = PTE2PA(*pte);
kfree((void*)pa);
}
*pte = 0;
if(a == last)
break;
a += PGSIZE;
pa += PGSIZE;
}
}
// create an empty user page table.
pagetable_t
uvmcreate()
{
pagetable_t pagetable;
pagetable = (pagetable_t) kalloc();
if(pagetable == 0)
panic("uvmcreate: out of memory");
memset(pagetable, 0, PGSIZE);
return pagetable;
}
// Load the user initcode into address 0 of pagetable,
// for the very first process.
// sz must be less than a page.
void
uvminit(pagetable_t pagetable, uchar *src, uint sz)
{
char *mem;
if(sz >= PGSIZE)
panic("inituvm: more than a page");
mem = kalloc();
memset(mem, 0, PGSIZE);
mappages(pagetable, 0, PGSIZE, (uint64)mem, PTE_W|PTE_R|PTE_X|PTE_U);
memmove(mem, src, sz);
}
// Allocate PTEs and physical memory to grow process from oldsz to
// newsz, which need not be page aligned. Returns new size or 0 on error.
uint64
uvmalloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz)
{
char *mem;
uint64 a;
if(newsz < oldsz)
return oldsz;
oldsz = PGROUNDUP(oldsz);
a = oldsz;
for(; a < newsz; a += PGSIZE){
mem = kalloc();
if(mem == 0){
uvmdealloc(pagetable, a, oldsz);
return 0;
}
memset(mem, 0, PGSIZE);
if(mappages(pagetable, a, PGSIZE, (uint64)mem, PTE_W|PTE_X|PTE_R|PTE_U) != 0){
kfree(mem);
uvmdealloc(pagetable, a, oldsz);
return 0;
}
}
return newsz;
}
// Deallocate user pages to bring the process size from oldsz to
// newsz. oldsz and newsz need not be page-aligned, nor does newsz
// need to be less than oldsz. oldsz can be larger than the actual
// process size. Returns the new process size.
uint64
uvmdealloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz)
{
if(newsz >= oldsz)
return oldsz;
unmappages(pagetable, newsz, oldsz - newsz, 1);
return newsz;
}
// Recursively free page table pages.
// All leaf mappings must already have been removed.
static void
freewalk(pagetable_t pagetable)
{
// there are 2^9 = 512 PTEs in a page table.
for(int i = 0; i < 512; i++){
pte_t pte = pagetable[i];
if((pte & PTE_V) && (pte & (PTE_R|PTE_W|PTE_X)) == 0){
// this PTE points to a lower-level page table.
uint64 child = PTE2PA(pte);
freewalk((pagetable_t)child);
pagetable[i] = 0;
} else if(pte & PTE_V){
panic("freewalk: leaf");
}
}
kfree((void*)pagetable);
}
// Free user memory pages,
// then free page table pages.
void
uvmfree(pagetable_t pagetable, uint64 sz)
{
unmappages(pagetable, 0, sz, 1);
freewalk(pagetable);
}
// Given a parent process's page table, copy
// its memory into a child's page table.
// Copies both the page table and the
// physical memory.
// returns 0 on success, -1 on failure.
// frees any allocated pages on failure.
int
uvmcopy(pagetable_t old, pagetable_t new, uint64 sz)
{
pte_t *pte;
uint64 pa, i;
uint flags;
char *mem;
for(i = 0; i < sz; i += PGSIZE){
if((pte = walk(old, i, 0)) == 0)
panic("copyuvm: pte should exist");
if((*pte & PTE_V) == 0)
panic("copyuvm: page not present");
pa = PTE2PA(*pte);
flags = PTE_FLAGS(*pte);
if((mem = kalloc()) == 0)
goto err;
memmove(mem, (char*)pa, PGSIZE);
if(mappages(new, i, PGSIZE, (uint64)mem, flags) != 0){
kfree(mem);
goto err;
}
}
return 0;
err:
unmappages(new, 0, i, 1);
return -1;
}
// Copy from kernel to user.
// Copy len bytes from src to virtual address dstva in a given page table.
// Return 0 on success, -1 on error.
int
copyout(pagetable_t pagetable, uint64 dstva, char *src, uint64 len)
{
uint64 n, va0, pa0;
while(len > 0){
va0 = (uint)PGROUNDDOWN(dstva);
pa0 = walkaddr(pagetable, va0);
if(pa0 == 0)
return -1;
n = PGSIZE - (dstva - va0);
if(n > len)
n = len;
memmove((void *)(pa0 + (dstva - va0)), src, n);
len -= n;
src += n;
dstva = va0 + PGSIZE;
}
return 0;
}
// Copy from user to kernel.
// Copy len bytes to dst from virtual address srcva in a given page table.
// Return 0 on success, -1 on error.
int
copyin(pagetable_t pagetable, char *dst, uint64 srcva, uint64 len)
{
uint64 n, va0, pa0;
while(len > 0){
va0 = (uint)PGROUNDDOWN(srcva);
pa0 = walkaddr(pagetable, va0);
if(pa0 == 0)
return -1;
n = PGSIZE - (srcva - va0);
if(n > len)
n = len;
memmove(dst, (void *)(pa0 + (srcva - va0)), n);
len -= n;
dst += n;
srcva = va0 + PGSIZE;
}
return 0;
}
// Copy a null-terminated string from user to kernel.
// Copy bytes to dst from virtual address srcva in a given page table,
// until a '\0', or max.
// Return 0 on success, -1 on error.
int
copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
{
uint64 n, va0, pa0;
int got_null = 0;
while(got_null == 0 && max > 0){
va0 = (uint)PGROUNDDOWN(srcva);
pa0 = walkaddr(pagetable, va0);
if(pa0 == 0)
return -1;
n = PGSIZE - (srcva - va0);
if(n > max)
n = max;
char *p = (char *) (pa0 + (srcva - va0));
while(n > 0){
if(*p == '\0'){
*dst = '\0';
got_null = 1;
break;
} else {
*dst = *p;
}
--n;
--max;
p++;
dst++;
}
srcva = va0 + PGSIZE;
}
if(got_null){
return 0;
} else {
return -1;
}
}
char *map_kstack(uint64 kstack)
{
char *k = kalloc();
if(k == 0) {
return 0;
}
if (mappages(kernel_pagetable, (uint64) kstack, PGSIZE,
(uint64) k, PTE_R | PTE_W) == 0) {
kvminithart();
return (char *) kstack;
}
kfree(k);
return 0;
}
// assumes va is page aligned
uint64
kernelpa(uint64 va) {
uint64 off = va % PGSIZE;
pte_t *pte;
uint64 pa;
pte = walk(kernel_pagetable, va, 0);
if(pte == 0)
panic("kernelpa");
if((*pte & PTE_V) == 0)
panic("kernelpa");
pa = PTE2PA(*pte);
return pa+off;
}