rationalize some vm function names

This commit is contained in:
Robert Morris 2019-07-24 15:28:37 -04:00
parent da898a11b6
commit 9a817bd134
5 changed files with 60 additions and 60 deletions

View file

@ -180,21 +180,21 @@ int uartgetc(void);
// vm.c
void kvminit(void);
void kvminithart(void);
uint64 kvmpa(uint64);
void kvmmap(uint64, uint64, uint64, int);
int mappages(pagetable_t, uint64, uint64, uint64, int);
pagetable_t uvmcreate(void);
void uvminit(pagetable_t, uchar *, uint);
uint64 uvmalloc(pagetable_t, uint64, uint64);
uint64 uvmdealloc(pagetable_t, uint64, uint64);
int uvmcopy(pagetable_t, pagetable_t, uint64);
void uvmfree(pagetable_t, uint64);
int mappages(pagetable_t, uint64, uint64, uint64, int);
void unmappages(pagetable_t, uint64, uint64, int);
void uvmunmap(pagetable_t, uint64, uint64, int);
void uvmclear(pagetable_t, uint64);
uint64 walkaddr(pagetable_t, uint64);
int copyout(pagetable_t, uint64, char *, uint64);
int copyin(pagetable_t, char *, uint64, uint64);
int copyinstr(pagetable_t, char *, uint64, uint64);
void kmap(uint64, uint64, uint64, int);
uint64 kernelpa(uint64);
void clearpteu(pagetable_t, uint64);
// plic.c
void plicinit(void);

View file

@ -68,7 +68,7 @@ exec(char *path, char **argv)
sz = PGROUNDUP(sz);
if((sz = uvmalloc(pagetable, sz, sz + 2*PGSIZE)) == 0)
goto bad;
clearpteu(pagetable, sz-2*PGSIZE);
uvmclear(pagetable, sz-2*PGSIZE);
sp = sz;
stackbase = sp - PGSIZE;

View file

@ -36,7 +36,7 @@ procinit(void)
if(pa == 0)
panic("kalloc");
uint64 va = KSTACK((int) (p - proc));
kmap(va, (uint64)pa, PGSIZE, PTE_R | PTE_W);
kvmmap(va, (uint64)pa, PGSIZE, PTE_R | PTE_W);
p->kstack = va;
}
kvminithart();
@ -173,8 +173,8 @@ proc_pagetable(struct proc *p)
void
proc_freepagetable(pagetable_t pagetable, uint64 sz)
{
unmappages(pagetable, TRAMPOLINE, PGSIZE, 0);
unmappages(pagetable, TRAPFRAME, PGSIZE, 0);
uvmunmap(pagetable, TRAMPOLINE, PGSIZE, 0);
uvmunmap(pagetable, TRAPFRAME, PGSIZE, 0);
if(sz > 0)
uvmfree(pagetable, sz);
}

View file

@ -200,8 +200,8 @@ virtio_disk_rw(struct buf *b)
buf0.sector = sector;
// buf0 is on a kernel stack, which is not direct mapped,
// thus the call to kernelpa().
desc[idx[0]].addr = (uint64) kernelpa((uint64) &buf0);
// thus the call to kvmpa().
desc[idx[0]].addr = (uint64) kvmpa((uint64) &buf0);
desc[idx[0]].len = sizeof(buf0);
desc[idx[0]].flags = VRING_DESC_F_NEXT;
desc[idx[0]].next = idx[1];

View file

@ -27,26 +27,26 @@ kvminit()
memset(kernel_pagetable, 0, PGSIZE);
// uart registers
kmap(UART0, UART0, PGSIZE, PTE_R | PTE_W);
kvmmap(UART0, UART0, PGSIZE, PTE_R | PTE_W);
// virtio mmio disk interface
kmap(VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
kvmmap(VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
// CLINT
kmap(CLINT, CLINT, 0x10000, PTE_R | PTE_W);
kvmmap(CLINT, CLINT, 0x10000, PTE_R | PTE_W);
// PLIC
kmap(PLIC, PLIC, 0x400000, PTE_R | PTE_W);
kvmmap(PLIC, PLIC, 0x400000, PTE_R | PTE_W);
// map kernel text executable and read-only.
kmap(KERNBASE, KERNBASE, (uint64)etext-KERNBASE, PTE_R | PTE_X);
kvmmap(KERNBASE, KERNBASE, (uint64)etext-KERNBASE, PTE_R | PTE_X);
// map kernel data and the physical RAM we'll make use of.
kmap((uint64)etext, (uint64)etext, PHYSTOP-(uint64)etext, PTE_R | PTE_W);
kvmmap((uint64)etext, (uint64)etext, PHYSTOP-(uint64)etext, PTE_R | PTE_W);
// map the trampoline for trap entry/exit to
// the highest virtual address in the kernel.
kmap(TRAMPOLINE, (uint64)trampout, PGSIZE, PTE_R | PTE_X);
kvmmap(TRAMPOLINE, (uint64)trampout, PGSIZE, PTE_R | PTE_X);
}
// Switch h/w page table register to the kernel's page table,
@ -114,10 +114,30 @@ walkaddr(pagetable_t pagetable, uint64 va)
// only used when booting.
// does not flush TLB or enable paging.
void
kmap(uint64 va, uint64 pa, uint64 sz, int perm)
kvmmap(uint64 va, uint64 pa, uint64 sz, int perm)
{
if(mappages(kernel_pagetable, va, sz, pa, perm) != 0)
panic("kmap");
panic("kvmmap");
}
// translate a kernel virtual address to
// a physical address. only needed for
// addresses on the stack.
// assumes va is page aligned.
uint64
kvmpa(uint64 va)
{
uint64 off = va % PGSIZE;
pte_t *pte;
uint64 pa;
pte = walk(kernel_pagetable, va, 0);
if(pte == 0)
panic("kernelpa");
if((*pte & PTE_V) == 0)
panic("kernelpa");
pa = PTE2PA(*pte);
return pa+off;
}
// Create PTEs for virtual addresses starting at va that refer to
@ -150,7 +170,7 @@ mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
// the given range must exist. Optionally free the
// physical memory.
void
unmappages(pagetable_t pagetable, uint64 va, uint64 size, int do_free)
uvmunmap(pagetable_t pagetable, uint64 va, uint64 size, int do_free)
{
uint64 a, last;
pte_t *pte;
@ -160,13 +180,13 @@ unmappages(pagetable_t pagetable, uint64 va, uint64 size, int do_free)
last = PGROUNDDOWN(va + size - 1);
for(;;){
if((pte = walk(pagetable, a, 0)) == 0)
panic("unmappages: walk");
panic("uvmunmap: walk");
if((*pte & PTE_V) == 0){
printf("va=%p pte=%p\n", a, *pte);
panic("unmappages: not mapped");
panic("uvmunmap: not mapped");
}
if(PTE_FLAGS(*pte) == PTE_V)
panic("unmappages: not a leaf");
panic("uvmunmap: not a leaf");
if(do_free){
pa = PTE2PA(*pte);
kfree((void*)pa);
@ -245,7 +265,7 @@ uvmdealloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz)
{
if(newsz >= oldsz)
return oldsz;
unmappages(pagetable, newsz, oldsz - newsz, 1);
uvmunmap(pagetable, newsz, oldsz - newsz, 1);
return newsz;
}
@ -274,7 +294,7 @@ freewalk(pagetable_t pagetable)
void
uvmfree(pagetable_t pagetable, uint64 sz)
{
unmappages(pagetable, 0, sz, 1);
uvmunmap(pagetable, 0, sz, 1);
freewalk(pagetable);
}
@ -310,10 +330,23 @@ uvmcopy(pagetable_t old, pagetable_t new, uint64 sz)
return 0;
err:
unmappages(new, 0, i, 1);
uvmunmap(new, 0, i, 1);
return -1;
}
// mark a PTE invalid for user access.
// used by exec for the user stack guard page.
void
uvmclear(pagetable_t pagetable, uint64 va)
{
pte_t *pte;
pte = walk(pagetable, va, 0);
if(pte == 0)
panic("clearpteu");
*pte &= ~PTE_U;
}
// Copy from kernel to user.
// Copy len bytes from src to virtual address dstva in a given page table.
// Return 0 on success, -1 on error.
@ -406,36 +439,3 @@ copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
return -1;
}
}
// translate a kernel virtual address to
// a physical address. only needed for
// addresses on the stack.
// assumes va is page aligned.
uint64
kernelpa(uint64 va)
{
uint64 off = va % PGSIZE;
pte_t *pte;
uint64 pa;
pte = walk(kernel_pagetable, va, 0);
if(pte == 0)
panic("kernelpa");
if((*pte & PTE_V) == 0)
panic("kernelpa");
pa = PTE2PA(*pte);
return pa+off;
}
// mark a PTE invalid for user access.
// used by exec for the user stack guard page.
void
clearpteu(pagetable_t pagetable, uint64 va)
{
pte_t *pte;
pte = walk(pagetable, va, 0);
if(pte == 0)
panic("clearpteu");
*pte &= ~PTE_U;
}