2010-07-23 16:52:35 +00:00
|
|
|
#include "param.h"
|
|
|
|
#include "types.h"
|
2011-07-29 11:31:27 +00:00
|
|
|
#include "memlayout.h"
|
2010-07-23 16:52:35 +00:00
|
|
|
#include "elf.h"
|
2019-05-31 13:45:59 +00:00
|
|
|
#include "riscv.h"
|
|
|
|
#include "defs.h"
|
2019-05-31 15:45:42 +00:00
|
|
|
#include "fs.h"
|
Checkpoint port of xv6 to x86-64. Passed usertests on 2 processors a few times.
The x86-64 doesn't just add two levels to page tables to support 64 bit
addresses, but is a different processor. For example, calling conventions,
system calls, and segmentation are different from 32-bit x86. Segmentation is
basically gone, but gs/fs in combination with MSRs can be used to hold a
per-core pointer. In general, x86-64 is more straightforward than 32-bit
x86. The port uses code from sv6 and the xv6 "rsc-amd64" branch.
A summary of the changes is as follows:
- Booting: switch to grub instead of xv6's bootloader (pass -kernel to qemu),
because xv6's boot loader doesn't understand 64bit ELF files. And, we don't
care anymore about booting.
- Makefile: use -m64 instead of -m32 flag for gcc, delete boot loader, xv6.img,
bochs, and memfs. For now dont' use -O2, since usertests with -O2 is bigger than
MAXFILE!
- Update gdb.tmpl to be for i386 or x86-64
- Console/printf: use stdarg.h and treat 64-bit addresses different from ints
(32-bit)
- Update elfhdr to be 64 bit
- entry.S/entryother.S: add code to switch to 64-bit mode: build a simple page
table in 32-bit mode before switching to 64-bit mode, share code for entering
boot processor and APs, and tweak boot gdt. The boot gdt is the gdt that the
kernel proper also uses. (In 64-bit mode, the gdt/segmentation and task state
mostly disappear.)
- exec.c: fix passing argv (64-bit now instead of 32-bit).
- initcode.c: use syscall instead of int.
- kernel.ld: load kernel very high, in top terabyte. 64 bits is a lot of
address space!
- proc.c: initial return is through new syscall path instead of trapret.
- proc.h: update struct cpu to have some scratch space since syscall saves less
state than int, update struct context to reflect x86-64 calling conventions.
- swtch: simplify for x86-64 calling conventions.
- syscall: add fetcharg to handle x86-64 calling convetions (6 arguments are
passed through registers), and fetchaddr to read a 64-bit value from user space.
- sysfile: update to handle pointers from user space (e.g., sys_exec), which are
64 bits.
- trap.c: no special trap vector for sys calls, because x86-64 has a different
plan for system calls.
- trapasm: one plan for syscalls and one plan for traps (interrupt and
exceptions). On x86-64, the kernel is responsible for switching user/kernel
stacks. To do, xv6 keeps some scratch space in the cpu structure, and uses MSR
GS_KERN_BASE to point to the core's cpu structure (using swapgs).
- types.h: add uint64, and change pde_t to uint64
- usertests: exit() when fork fails, which helped in tracking down one of the
bugs in the switch from 32-bit to 64-bit
- vectors: update to make them 64 bits
- vm.c: use bootgdt in kernel too, program MSRs for syscalls and core-local
state (for swapgs), walk 4 levels in walkpgdir, add DEVSPACETOP, use task
segment to set kernel stack for interrupts (but simpler than in 32-bit mode),
add an extra argument to freevm (size of user part of address space) to avoid
checking all entries till KERNBASE (there are MANY TB before the top 1TB).
- x86: update trapframe to have 64-bit entries, which is what the processor
pushes on syscalls and traps. simplify lgdt and lidt, using struct desctr,
which needs the gcc directives packed and aligned.
TODO:
- use int32 instead of int?
- simplify curproc(). xv6 has per-cpu state again, but this time it must have it.
- avoid repetition in walkpgdir
- fix validateint() in usertests.c
- fix bugs (e.g., observed one a case of entering kernel with invalid gs or proc
2018-09-23 12:24:42 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
/*
|
|
|
|
* the kernel's page table.
|
|
|
|
*/
|
|
|
|
pagetable_t kernel_pagetable;
|
Checkpoint port of xv6 to x86-64. Passed usertests on 2 processors a few times.
The x86-64 doesn't just add two levels to page tables to support 64 bit
addresses, but is a different processor. For example, calling conventions,
system calls, and segmentation are different from 32-bit x86. Segmentation is
basically gone, but gs/fs in combination with MSRs can be used to hold a
per-core pointer. In general, x86-64 is more straightforward than 32-bit
x86. The port uses code from sv6 and the xv6 "rsc-amd64" branch.
A summary of the changes is as follows:
- Booting: switch to grub instead of xv6's bootloader (pass -kernel to qemu),
because xv6's boot loader doesn't understand 64bit ELF files. And, we don't
care anymore about booting.
- Makefile: use -m64 instead of -m32 flag for gcc, delete boot loader, xv6.img,
bochs, and memfs. For now dont' use -O2, since usertests with -O2 is bigger than
MAXFILE!
- Update gdb.tmpl to be for i386 or x86-64
- Console/printf: use stdarg.h and treat 64-bit addresses different from ints
(32-bit)
- Update elfhdr to be 64 bit
- entry.S/entryother.S: add code to switch to 64-bit mode: build a simple page
table in 32-bit mode before switching to 64-bit mode, share code for entering
boot processor and APs, and tweak boot gdt. The boot gdt is the gdt that the
kernel proper also uses. (In 64-bit mode, the gdt/segmentation and task state
mostly disappear.)
- exec.c: fix passing argv (64-bit now instead of 32-bit).
- initcode.c: use syscall instead of int.
- kernel.ld: load kernel very high, in top terabyte. 64 bits is a lot of
address space!
- proc.c: initial return is through new syscall path instead of trapret.
- proc.h: update struct cpu to have some scratch space since syscall saves less
state than int, update struct context to reflect x86-64 calling conventions.
- swtch: simplify for x86-64 calling conventions.
- syscall: add fetcharg to handle x86-64 calling convetions (6 arguments are
passed through registers), and fetchaddr to read a 64-bit value from user space.
- sysfile: update to handle pointers from user space (e.g., sys_exec), which are
64 bits.
- trap.c: no special trap vector for sys calls, because x86-64 has a different
plan for system calls.
- trapasm: one plan for syscalls and one plan for traps (interrupt and
exceptions). On x86-64, the kernel is responsible for switching user/kernel
stacks. To do, xv6 keeps some scratch space in the cpu structure, and uses MSR
GS_KERN_BASE to point to the core's cpu structure (using swapgs).
- types.h: add uint64, and change pde_t to uint64
- usertests: exit() when fork fails, which helped in tracking down one of the
bugs in the switch from 32-bit to 64-bit
- vectors: update to make them 64 bits
- vm.c: use bootgdt in kernel too, program MSRs for syscalls and core-local
state (for swapgs), walk 4 levels in walkpgdir, add DEVSPACETOP, use task
segment to set kernel stack for interrupts (but simpler than in 32-bit mode),
add an extra argument to freevm (size of user part of address space) to avoid
checking all entries till KERNBASE (there are MANY TB before the top 1TB).
- x86: update trapframe to have 64-bit entries, which is what the processor
pushes on syscalls and traps. simplify lgdt and lidt, using struct desctr,
which needs the gcc directives packed and aligned.
TODO:
- use int32 instead of int?
- simplify curproc(). xv6 has per-cpu state again, but this time it must have it.
- avoid repetition in walkpgdir
- fix validateint() in usertests.c
- fix bugs (e.g., observed one a case of entering kernel with invalid gs or proc
2018-09-23 12:24:42 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
extern char etext[]; // kernel.ld sets this to end of kernel code.
|
Checkpoint port of xv6 to x86-64. Passed usertests on 2 processors a few times.
The x86-64 doesn't just add two levels to page tables to support 64 bit
addresses, but is a different processor. For example, calling conventions,
system calls, and segmentation are different from 32-bit x86. Segmentation is
basically gone, but gs/fs in combination with MSRs can be used to hold a
per-core pointer. In general, x86-64 is more straightforward than 32-bit
x86. The port uses code from sv6 and the xv6 "rsc-amd64" branch.
A summary of the changes is as follows:
- Booting: switch to grub instead of xv6's bootloader (pass -kernel to qemu),
because xv6's boot loader doesn't understand 64bit ELF files. And, we don't
care anymore about booting.
- Makefile: use -m64 instead of -m32 flag for gcc, delete boot loader, xv6.img,
bochs, and memfs. For now dont' use -O2, since usertests with -O2 is bigger than
MAXFILE!
- Update gdb.tmpl to be for i386 or x86-64
- Console/printf: use stdarg.h and treat 64-bit addresses different from ints
(32-bit)
- Update elfhdr to be 64 bit
- entry.S/entryother.S: add code to switch to 64-bit mode: build a simple page
table in 32-bit mode before switching to 64-bit mode, share code for entering
boot processor and APs, and tweak boot gdt. The boot gdt is the gdt that the
kernel proper also uses. (In 64-bit mode, the gdt/segmentation and task state
mostly disappear.)
- exec.c: fix passing argv (64-bit now instead of 32-bit).
- initcode.c: use syscall instead of int.
- kernel.ld: load kernel very high, in top terabyte. 64 bits is a lot of
address space!
- proc.c: initial return is through new syscall path instead of trapret.
- proc.h: update struct cpu to have some scratch space since syscall saves less
state than int, update struct context to reflect x86-64 calling conventions.
- swtch: simplify for x86-64 calling conventions.
- syscall: add fetcharg to handle x86-64 calling convetions (6 arguments are
passed through registers), and fetchaddr to read a 64-bit value from user space.
- sysfile: update to handle pointers from user space (e.g., sys_exec), which are
64 bits.
- trap.c: no special trap vector for sys calls, because x86-64 has a different
plan for system calls.
- trapasm: one plan for syscalls and one plan for traps (interrupt and
exceptions). On x86-64, the kernel is responsible for switching user/kernel
stacks. To do, xv6 keeps some scratch space in the cpu structure, and uses MSR
GS_KERN_BASE to point to the core's cpu structure (using swapgs).
- types.h: add uint64, and change pde_t to uint64
- usertests: exit() when fork fails, which helped in tracking down one of the
bugs in the switch from 32-bit to 64-bit
- vectors: update to make them 64 bits
- vm.c: use bootgdt in kernel too, program MSRs for syscalls and core-local
state (for swapgs), walk 4 levels in walkpgdir, add DEVSPACETOP, use task
segment to set kernel stack for interrupts (but simpler than in 32-bit mode),
add an extra argument to freevm (size of user part of address space) to avoid
checking all entries till KERNBASE (there are MANY TB before the top 1TB).
- x86: update trapframe to have 64-bit entries, which is what the processor
pushes on syscalls and traps. simplify lgdt and lidt, using struct desctr,
which needs the gcc directives packed and aligned.
TODO:
- use int32 instead of int?
- simplify curproc(). xv6 has per-cpu state again, but this time it must have it.
- avoid repetition in walkpgdir
- fix validateint() in usertests.c
- fix bugs (e.g., observed one a case of entering kernel with invalid gs or proc
2018-09-23 12:24:42 +00:00
|
|
|
|
2019-07-26 08:53:46 +00:00
|
|
|
extern char trampoline[]; // trampoline.S
|
2010-07-23 16:52:35 +00:00
|
|
|
|
2020-10-15 00:03:14 +00:00
|
|
|
// Make a direct-map page table for the kernel.
|
|
|
|
pagetable_t
|
|
|
|
kvmmake(void)
|
2010-09-02 20:23:15 +00:00
|
|
|
{
|
2020-10-15 00:03:14 +00:00
|
|
|
pagetable_t kpgtbl;
|
|
|
|
|
|
|
|
kpgtbl = (pagetable_t) kalloc();
|
|
|
|
memset(kpgtbl, 0, PGSIZE);
|
Checkpoint port of xv6 to x86-64. Passed usertests on 2 processors a few times.
The x86-64 doesn't just add two levels to page tables to support 64 bit
addresses, but is a different processor. For example, calling conventions,
system calls, and segmentation are different from 32-bit x86. Segmentation is
basically gone, but gs/fs in combination with MSRs can be used to hold a
per-core pointer. In general, x86-64 is more straightforward than 32-bit
x86. The port uses code from sv6 and the xv6 "rsc-amd64" branch.
A summary of the changes is as follows:
- Booting: switch to grub instead of xv6's bootloader (pass -kernel to qemu),
because xv6's boot loader doesn't understand 64bit ELF files. And, we don't
care anymore about booting.
- Makefile: use -m64 instead of -m32 flag for gcc, delete boot loader, xv6.img,
bochs, and memfs. For now dont' use -O2, since usertests with -O2 is bigger than
MAXFILE!
- Update gdb.tmpl to be for i386 or x86-64
- Console/printf: use stdarg.h and treat 64-bit addresses different from ints
(32-bit)
- Update elfhdr to be 64 bit
- entry.S/entryother.S: add code to switch to 64-bit mode: build a simple page
table in 32-bit mode before switching to 64-bit mode, share code for entering
boot processor and APs, and tweak boot gdt. The boot gdt is the gdt that the
kernel proper also uses. (In 64-bit mode, the gdt/segmentation and task state
mostly disappear.)
- exec.c: fix passing argv (64-bit now instead of 32-bit).
- initcode.c: use syscall instead of int.
- kernel.ld: load kernel very high, in top terabyte. 64 bits is a lot of
address space!
- proc.c: initial return is through new syscall path instead of trapret.
- proc.h: update struct cpu to have some scratch space since syscall saves less
state than int, update struct context to reflect x86-64 calling conventions.
- swtch: simplify for x86-64 calling conventions.
- syscall: add fetcharg to handle x86-64 calling convetions (6 arguments are
passed through registers), and fetchaddr to read a 64-bit value from user space.
- sysfile: update to handle pointers from user space (e.g., sys_exec), which are
64 bits.
- trap.c: no special trap vector for sys calls, because x86-64 has a different
plan for system calls.
- trapasm: one plan for syscalls and one plan for traps (interrupt and
exceptions). On x86-64, the kernel is responsible for switching user/kernel
stacks. To do, xv6 keeps some scratch space in the cpu structure, and uses MSR
GS_KERN_BASE to point to the core's cpu structure (using swapgs).
- types.h: add uint64, and change pde_t to uint64
- usertests: exit() when fork fails, which helped in tracking down one of the
bugs in the switch from 32-bit to 64-bit
- vectors: update to make them 64 bits
- vm.c: use bootgdt in kernel too, program MSRs for syscalls and core-local
state (for swapgs), walk 4 levels in walkpgdir, add DEVSPACETOP, use task
segment to set kernel stack for interrupts (but simpler than in 32-bit mode),
add an extra argument to freevm (size of user part of address space) to avoid
checking all entries till KERNBASE (there are MANY TB before the top 1TB).
- x86: update trapframe to have 64-bit entries, which is what the processor
pushes on syscalls and traps. simplify lgdt and lidt, using struct desctr,
which needs the gcc directives packed and aligned.
TODO:
- use int32 instead of int?
- simplify curproc(). xv6 has per-cpu state again, but this time it must have it.
- avoid repetition in walkpgdir
- fix validateint() in usertests.c
- fix bugs (e.g., observed one a case of entering kernel with invalid gs or proc
2018-09-23 12:24:42 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// uart registers
|
2020-10-15 00:03:14 +00:00
|
|
|
kvmmap(kpgtbl, UART0, UART0, PGSIZE, PTE_R | PTE_W);
|
2019-05-31 13:45:59 +00:00
|
|
|
|
2019-06-13 10:57:38 +00:00
|
|
|
// virtio mmio disk interface
|
2020-10-15 00:03:14 +00:00
|
|
|
kvmmap(kpgtbl, VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
|
2019-06-04 18:20:37 +00:00
|
|
|
|
2019-06-03 18:13:07 +00:00
|
|
|
// PLIC
|
2020-10-15 00:03:14 +00:00
|
|
|
kvmmap(kpgtbl, PLIC, PLIC, 0x400000, PTE_R | PTE_W);
|
2019-06-03 18:13:07 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// map kernel text executable and read-only.
|
2020-10-15 00:03:14 +00:00
|
|
|
kvmmap(kpgtbl, KERNBASE, KERNBASE, (uint64)etext-KERNBASE, PTE_R | PTE_X);
|
2019-06-04 14:43:45 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// map kernel data and the physical RAM we'll make use of.
|
2020-10-15 00:03:14 +00:00
|
|
|
kvmmap(kpgtbl, (uint64)etext, (uint64)etext, PHYSTOP-(uint64)etext, PTE_R | PTE_W);
|
2019-05-31 13:45:59 +00:00
|
|
|
|
|
|
|
// map the trampoline for trap entry/exit to
|
|
|
|
// the highest virtual address in the kernel.
|
2020-10-15 00:03:14 +00:00
|
|
|
kvmmap(kpgtbl, TRAMPOLINE, (uint64)trampoline, PGSIZE, PTE_R | PTE_X);
|
|
|
|
|
2022-08-09 19:11:25 +00:00
|
|
|
// allocate and map a kernel stack for each process.
|
2020-10-15 00:03:14 +00:00
|
|
|
proc_mapstacks(kpgtbl);
|
|
|
|
|
|
|
|
return kpgtbl;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the one kernel_pagetable
|
|
|
|
void
|
|
|
|
kvminit(void)
|
|
|
|
{
|
|
|
|
kernel_pagetable = kvmmake();
|
2019-06-05 18:05:46 +00:00
|
|
|
}
|
Checkpoint port of xv6 to x86-64. Passed usertests on 2 processors a few times.
The x86-64 doesn't just add two levels to page tables to support 64 bit
addresses, but is a different processor. For example, calling conventions,
system calls, and segmentation are different from 32-bit x86. Segmentation is
basically gone, but gs/fs in combination with MSRs can be used to hold a
per-core pointer. In general, x86-64 is more straightforward than 32-bit
x86. The port uses code from sv6 and the xv6 "rsc-amd64" branch.
A summary of the changes is as follows:
- Booting: switch to grub instead of xv6's bootloader (pass -kernel to qemu),
because xv6's boot loader doesn't understand 64bit ELF files. And, we don't
care anymore about booting.
- Makefile: use -m64 instead of -m32 flag for gcc, delete boot loader, xv6.img,
bochs, and memfs. For now dont' use -O2, since usertests with -O2 is bigger than
MAXFILE!
- Update gdb.tmpl to be for i386 or x86-64
- Console/printf: use stdarg.h and treat 64-bit addresses different from ints
(32-bit)
- Update elfhdr to be 64 bit
- entry.S/entryother.S: add code to switch to 64-bit mode: build a simple page
table in 32-bit mode before switching to 64-bit mode, share code for entering
boot processor and APs, and tweak boot gdt. The boot gdt is the gdt that the
kernel proper also uses. (In 64-bit mode, the gdt/segmentation and task state
mostly disappear.)
- exec.c: fix passing argv (64-bit now instead of 32-bit).
- initcode.c: use syscall instead of int.
- kernel.ld: load kernel very high, in top terabyte. 64 bits is a lot of
address space!
- proc.c: initial return is through new syscall path instead of trapret.
- proc.h: update struct cpu to have some scratch space since syscall saves less
state than int, update struct context to reflect x86-64 calling conventions.
- swtch: simplify for x86-64 calling conventions.
- syscall: add fetcharg to handle x86-64 calling convetions (6 arguments are
passed through registers), and fetchaddr to read a 64-bit value from user space.
- sysfile: update to handle pointers from user space (e.g., sys_exec), which are
64 bits.
- trap.c: no special trap vector for sys calls, because x86-64 has a different
plan for system calls.
- trapasm: one plan for syscalls and one plan for traps (interrupt and
exceptions). On x86-64, the kernel is responsible for switching user/kernel
stacks. To do, xv6 keeps some scratch space in the cpu structure, and uses MSR
GS_KERN_BASE to point to the core's cpu structure (using swapgs).
- types.h: add uint64, and change pde_t to uint64
- usertests: exit() when fork fails, which helped in tracking down one of the
bugs in the switch from 32-bit to 64-bit
- vectors: update to make them 64 bits
- vm.c: use bootgdt in kernel too, program MSRs for syscalls and core-local
state (for swapgs), walk 4 levels in walkpgdir, add DEVSPACETOP, use task
segment to set kernel stack for interrupts (but simpler than in 32-bit mode),
add an extra argument to freevm (size of user part of address space) to avoid
checking all entries till KERNBASE (there are MANY TB before the top 1TB).
- x86: update trapframe to have 64-bit entries, which is what the processor
pushes on syscalls and traps. simplify lgdt and lidt, using struct desctr,
which needs the gcc directives packed and aligned.
TODO:
- use int32 instead of int?
- simplify curproc(). xv6 has per-cpu state again, but this time it must have it.
- avoid repetition in walkpgdir
- fix validateint() in usertests.c
- fix bugs (e.g., observed one a case of entering kernel with invalid gs or proc
2018-09-23 12:24:42 +00:00
|
|
|
|
2019-06-05 18:05:46 +00:00
|
|
|
// Switch h/w page table register to the kernel's page table,
|
|
|
|
// and enable paging.
|
|
|
|
void
|
|
|
|
kvminithart()
|
|
|
|
{
|
2022-08-24 17:42:59 +00:00
|
|
|
// wait for any previous writes to the page table memory to finish.
|
|
|
|
sfence_vma();
|
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
w_satp(MAKE_SATP(kernel_pagetable));
|
2022-08-24 17:42:59 +00:00
|
|
|
|
|
|
|
// flush stale entries from the TLB.
|
2019-09-03 19:45:07 +00:00
|
|
|
sfence_vma();
|
2010-09-02 20:23:15 +00:00
|
|
|
}
|
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// Return the address of the PTE in page table pagetable
|
2011-08-16 00:21:14 +00:00
|
|
|
// that corresponds to virtual address va. If alloc!=0,
|
2019-07-24 17:33:43 +00:00
|
|
|
// create any required page-table pages.
|
2019-05-31 13:45:59 +00:00
|
|
|
//
|
2019-07-24 17:33:43 +00:00
|
|
|
// The risc-v Sv39 scheme has three levels of page-table
|
|
|
|
// pages. A page-table page contains 512 64-bit PTEs.
|
2019-05-31 13:45:59 +00:00
|
|
|
// A 64-bit virtual address is split into five fields:
|
|
|
|
// 39..63 -- must be zero.
|
|
|
|
// 30..38 -- 9 bits of level-2 index.
|
2019-10-21 12:01:07 +00:00
|
|
|
// 21..29 -- 9 bits of level-1 index.
|
2019-05-31 13:45:59 +00:00
|
|
|
// 12..20 -- 9 bits of level-0 index.
|
2019-10-21 12:01:07 +00:00
|
|
|
// 0..11 -- 12 bits of byte offset within the page.
|
2019-10-21 12:01:07 +00:00
|
|
|
pte_t *
|
2019-05-31 16:43:20 +00:00
|
|
|
walk(pagetable_t pagetable, uint64 va, int alloc)
|
2010-07-23 16:52:35 +00:00
|
|
|
{
|
2019-05-31 16:43:20 +00:00
|
|
|
if(va >= MAXVA)
|
2019-05-31 13:45:59 +00:00
|
|
|
panic("walk");
|
|
|
|
|
|
|
|
for(int level = 2; level > 0; level--) {
|
|
|
|
pte_t *pte = &pagetable[PX(level, va)];
|
|
|
|
if(*pte & PTE_V) {
|
|
|
|
pagetable = (pagetable_t)PTE2PA(*pte);
|
|
|
|
} else {
|
|
|
|
if(!alloc || (pagetable = (pde_t*)kalloc()) == 0)
|
2018-10-02 11:37:49 +00:00
|
|
|
return 0;
|
2019-05-31 13:45:59 +00:00
|
|
|
memset(pagetable, 0, PGSIZE);
|
|
|
|
*pte = PA2PTE(pagetable) | PTE_V;
|
2018-10-02 11:37:49 +00:00
|
|
|
}
|
Checkpoint port of xv6 to x86-64. Passed usertests on 2 processors a few times.
The x86-64 doesn't just add two levels to page tables to support 64 bit
addresses, but is a different processor. For example, calling conventions,
system calls, and segmentation are different from 32-bit x86. Segmentation is
basically gone, but gs/fs in combination with MSRs can be used to hold a
per-core pointer. In general, x86-64 is more straightforward than 32-bit
x86. The port uses code from sv6 and the xv6 "rsc-amd64" branch.
A summary of the changes is as follows:
- Booting: switch to grub instead of xv6's bootloader (pass -kernel to qemu),
because xv6's boot loader doesn't understand 64bit ELF files. And, we don't
care anymore about booting.
- Makefile: use -m64 instead of -m32 flag for gcc, delete boot loader, xv6.img,
bochs, and memfs. For now dont' use -O2, since usertests with -O2 is bigger than
MAXFILE!
- Update gdb.tmpl to be for i386 or x86-64
- Console/printf: use stdarg.h and treat 64-bit addresses different from ints
(32-bit)
- Update elfhdr to be 64 bit
- entry.S/entryother.S: add code to switch to 64-bit mode: build a simple page
table in 32-bit mode before switching to 64-bit mode, share code for entering
boot processor and APs, and tweak boot gdt. The boot gdt is the gdt that the
kernel proper also uses. (In 64-bit mode, the gdt/segmentation and task state
mostly disappear.)
- exec.c: fix passing argv (64-bit now instead of 32-bit).
- initcode.c: use syscall instead of int.
- kernel.ld: load kernel very high, in top terabyte. 64 bits is a lot of
address space!
- proc.c: initial return is through new syscall path instead of trapret.
- proc.h: update struct cpu to have some scratch space since syscall saves less
state than int, update struct context to reflect x86-64 calling conventions.
- swtch: simplify for x86-64 calling conventions.
- syscall: add fetcharg to handle x86-64 calling convetions (6 arguments are
passed through registers), and fetchaddr to read a 64-bit value from user space.
- sysfile: update to handle pointers from user space (e.g., sys_exec), which are
64 bits.
- trap.c: no special trap vector for sys calls, because x86-64 has a different
plan for system calls.
- trapasm: one plan for syscalls and one plan for traps (interrupt and
exceptions). On x86-64, the kernel is responsible for switching user/kernel
stacks. To do, xv6 keeps some scratch space in the cpu structure, and uses MSR
GS_KERN_BASE to point to the core's cpu structure (using swapgs).
- types.h: add uint64, and change pde_t to uint64
- usertests: exit() when fork fails, which helped in tracking down one of the
bugs in the switch from 32-bit to 64-bit
- vectors: update to make them 64 bits
- vm.c: use bootgdt in kernel too, program MSRs for syscalls and core-local
state (for swapgs), walk 4 levels in walkpgdir, add DEVSPACETOP, use task
segment to set kernel stack for interrupts (but simpler than in 32-bit mode),
add an extra argument to freevm (size of user part of address space) to avoid
checking all entries till KERNBASE (there are MANY TB before the top 1TB).
- x86: update trapframe to have 64-bit entries, which is what the processor
pushes on syscalls and traps. simplify lgdt and lidt, using struct desctr,
which needs the gcc directives packed and aligned.
TODO:
- use int32 instead of int?
- simplify curproc(). xv6 has per-cpu state again, but this time it must have it.
- avoid repetition in walkpgdir
- fix validateint() in usertests.c
- fix bugs (e.g., observed one a case of entering kernel with invalid gs or proc
2018-09-23 12:24:42 +00:00
|
|
|
}
|
2019-05-31 13:45:59 +00:00
|
|
|
return &pagetable[PX(0, va)];
|
2010-07-23 16:52:35 +00:00
|
|
|
}
|
|
|
|
|
2019-05-31 16:43:20 +00:00
|
|
|
// Look up a virtual address, return the physical address,
|
|
|
|
// or 0 if not mapped.
|
2019-07-02 15:04:35 +00:00
|
|
|
// Can only be used to look up user pages.
|
2019-05-31 16:43:20 +00:00
|
|
|
uint64
|
|
|
|
walkaddr(pagetable_t pagetable, uint64 va)
|
|
|
|
{
|
|
|
|
pte_t *pte;
|
|
|
|
uint64 pa;
|
|
|
|
|
2019-09-20 13:41:03 +00:00
|
|
|
if(va >= MAXVA)
|
|
|
|
return 0;
|
|
|
|
|
2019-05-31 16:43:20 +00:00
|
|
|
pte = walk(pagetable, va, 0);
|
|
|
|
if(pte == 0)
|
|
|
|
return 0;
|
|
|
|
if((*pte & PTE_V) == 0)
|
|
|
|
return 0;
|
|
|
|
if((*pte & PTE_U) == 0)
|
|
|
|
return 0;
|
|
|
|
pa = PTE2PA(*pte);
|
|
|
|
return pa;
|
|
|
|
}
|
|
|
|
|
2019-07-23 16:17:17 +00:00
|
|
|
// add a mapping to the kernel page table.
|
|
|
|
// only used when booting.
|
|
|
|
// does not flush TLB or enable paging.
|
|
|
|
void
|
2020-10-15 00:03:14 +00:00
|
|
|
kvmmap(pagetable_t kpgtbl, uint64 va, uint64 pa, uint64 sz, int perm)
|
2019-07-23 16:17:17 +00:00
|
|
|
{
|
2020-10-15 00:03:14 +00:00
|
|
|
if(mappages(kpgtbl, va, sz, pa, perm) != 0)
|
2019-07-24 19:28:37 +00:00
|
|
|
panic("kvmmap");
|
|
|
|
}
|
|
|
|
|
2011-08-16 19:47:22 +00:00
|
|
|
// Create PTEs for virtual addresses starting at va that refer to
|
|
|
|
// physical addresses starting at pa. va and size might not
|
2019-07-02 15:45:06 +00:00
|
|
|
// be page-aligned. Returns 0 on success, -1 if walk() couldn't
|
|
|
|
// allocate a needed page-table page.
|
|
|
|
int
|
2019-05-31 13:45:59 +00:00
|
|
|
mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
|
2010-07-23 16:52:35 +00:00
|
|
|
{
|
2019-05-31 16:43:20 +00:00
|
|
|
uint64 a, last;
|
2011-01-11 18:01:13 +00:00
|
|
|
pte_t *pte;
|
2016-08-25 13:13:00 +00:00
|
|
|
|
2020-11-10 05:31:16 +00:00
|
|
|
if(size == 0)
|
|
|
|
panic("mappages: size");
|
|
|
|
|
2019-05-31 16:43:20 +00:00
|
|
|
a = PGROUNDDOWN(va);
|
|
|
|
last = PGROUNDDOWN(va + size - 1);
|
2011-01-11 18:01:13 +00:00
|
|
|
for(;;){
|
2019-05-31 13:45:59 +00:00
|
|
|
if((pte = walk(pagetable, a, 1)) == 0)
|
2019-07-02 15:45:06 +00:00
|
|
|
return -1;
|
2019-05-31 13:45:59 +00:00
|
|
|
if(*pte & PTE_V)
|
2020-11-10 05:31:16 +00:00
|
|
|
panic("mappages: remap");
|
2019-05-31 13:45:59 +00:00
|
|
|
*pte = PA2PTE(pa) | perm | PTE_V;
|
2010-08-05 20:00:59 +00:00
|
|
|
if(a == last)
|
|
|
|
break;
|
|
|
|
a += PGSIZE;
|
|
|
|
pa += PGSIZE;
|
2010-07-23 16:52:35 +00:00
|
|
|
}
|
2019-07-02 15:45:06 +00:00
|
|
|
return 0;
|
2010-07-23 16:52:35 +00:00
|
|
|
}
|
|
|
|
|
2020-08-13 12:46:28 +00:00
|
|
|
// Remove npages of mappings starting from va. va must be
|
|
|
|
// page-aligned. The mappings must exist.
|
|
|
|
// Optionally free the physical memory.
|
2019-05-31 13:45:59 +00:00
|
|
|
void
|
2020-08-13 12:46:28 +00:00
|
|
|
uvmunmap(pagetable_t pagetable, uint64 va, uint64 npages, int do_free)
|
2010-09-02 20:23:15 +00:00
|
|
|
{
|
2020-08-13 12:46:28 +00:00
|
|
|
uint64 a;
|
2019-05-31 13:45:59 +00:00
|
|
|
pte_t *pte;
|
2010-07-23 16:52:35 +00:00
|
|
|
|
2020-08-13 12:46:28 +00:00
|
|
|
if((va % PGSIZE) != 0)
|
|
|
|
panic("uvmunmap: not aligned");
|
|
|
|
|
|
|
|
for(a = va; a < va + npages*PGSIZE; a += PGSIZE){
|
2019-05-31 13:45:59 +00:00
|
|
|
if((pte = walk(pagetable, a, 0)) == 0)
|
2019-07-24 19:28:37 +00:00
|
|
|
panic("uvmunmap: walk");
|
2020-08-13 12:46:28 +00:00
|
|
|
if((*pte & PTE_V) == 0)
|
2019-07-24 19:28:37 +00:00
|
|
|
panic("uvmunmap: not mapped");
|
2019-05-31 13:45:59 +00:00
|
|
|
if(PTE_FLAGS(*pte) == PTE_V)
|
2019-07-24 19:28:37 +00:00
|
|
|
panic("uvmunmap: not a leaf");
|
2019-05-31 13:45:59 +00:00
|
|
|
if(do_free){
|
2020-08-13 12:04:56 +00:00
|
|
|
uint64 pa = PTE2PA(*pte);
|
2019-05-31 13:45:59 +00:00
|
|
|
kfree((void*)pa);
|
2017-02-05 10:44:56 +00:00
|
|
|
}
|
2019-05-31 13:45:59 +00:00
|
|
|
*pte = 0;
|
Checkpoint port of xv6 to x86-64. Passed usertests on 2 processors a few times.
The x86-64 doesn't just add two levels to page tables to support 64 bit
addresses, but is a different processor. For example, calling conventions,
system calls, and segmentation are different from 32-bit x86. Segmentation is
basically gone, but gs/fs in combination with MSRs can be used to hold a
per-core pointer. In general, x86-64 is more straightforward than 32-bit
x86. The port uses code from sv6 and the xv6 "rsc-amd64" branch.
A summary of the changes is as follows:
- Booting: switch to grub instead of xv6's bootloader (pass -kernel to qemu),
because xv6's boot loader doesn't understand 64bit ELF files. And, we don't
care anymore about booting.
- Makefile: use -m64 instead of -m32 flag for gcc, delete boot loader, xv6.img,
bochs, and memfs. For now dont' use -O2, since usertests with -O2 is bigger than
MAXFILE!
- Update gdb.tmpl to be for i386 or x86-64
- Console/printf: use stdarg.h and treat 64-bit addresses different from ints
(32-bit)
- Update elfhdr to be 64 bit
- entry.S/entryother.S: add code to switch to 64-bit mode: build a simple page
table in 32-bit mode before switching to 64-bit mode, share code for entering
boot processor and APs, and tweak boot gdt. The boot gdt is the gdt that the
kernel proper also uses. (In 64-bit mode, the gdt/segmentation and task state
mostly disappear.)
- exec.c: fix passing argv (64-bit now instead of 32-bit).
- initcode.c: use syscall instead of int.
- kernel.ld: load kernel very high, in top terabyte. 64 bits is a lot of
address space!
- proc.c: initial return is through new syscall path instead of trapret.
- proc.h: update struct cpu to have some scratch space since syscall saves less
state than int, update struct context to reflect x86-64 calling conventions.
- swtch: simplify for x86-64 calling conventions.
- syscall: add fetcharg to handle x86-64 calling convetions (6 arguments are
passed through registers), and fetchaddr to read a 64-bit value from user space.
- sysfile: update to handle pointers from user space (e.g., sys_exec), which are
64 bits.
- trap.c: no special trap vector for sys calls, because x86-64 has a different
plan for system calls.
- trapasm: one plan for syscalls and one plan for traps (interrupt and
exceptions). On x86-64, the kernel is responsible for switching user/kernel
stacks. To do, xv6 keeps some scratch space in the cpu structure, and uses MSR
GS_KERN_BASE to point to the core's cpu structure (using swapgs).
- types.h: add uint64, and change pde_t to uint64
- usertests: exit() when fork fails, which helped in tracking down one of the
bugs in the switch from 32-bit to 64-bit
- vectors: update to make them 64 bits
- vm.c: use bootgdt in kernel too, program MSRs for syscalls and core-local
state (for swapgs), walk 4 levels in walkpgdir, add DEVSPACETOP, use task
segment to set kernel stack for interrupts (but simpler than in 32-bit mode),
add an extra argument to freevm (size of user part of address space) to avoid
checking all entries till KERNBASE (there are MANY TB before the top 1TB).
- x86: update trapframe to have 64-bit entries, which is what the processor
pushes on syscalls and traps. simplify lgdt and lidt, using struct desctr,
which needs the gcc directives packed and aligned.
TODO:
- use int32 instead of int?
- simplify curproc(). xv6 has per-cpu state again, but this time it must have it.
- avoid repetition in walkpgdir
- fix validateint() in usertests.c
- fix bugs (e.g., observed one a case of entering kernel with invalid gs or proc
2018-09-23 12:24:42 +00:00
|
|
|
}
|
2011-07-29 11:31:27 +00:00
|
|
|
}
|
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// create an empty user page table.
|
2020-08-13 14:22:07 +00:00
|
|
|
// returns 0 if out of memory.
|
2019-05-31 13:45:59 +00:00
|
|
|
pagetable_t
|
|
|
|
uvmcreate()
|
2010-09-02 20:23:15 +00:00
|
|
|
{
|
2019-05-31 13:45:59 +00:00
|
|
|
pagetable_t pagetable;
|
|
|
|
pagetable = (pagetable_t) kalloc();
|
|
|
|
if(pagetable == 0)
|
2020-08-13 14:22:07 +00:00
|
|
|
return 0;
|
2019-05-31 13:45:59 +00:00
|
|
|
memset(pagetable, 0, PGSIZE);
|
|
|
|
return pagetable;
|
2010-07-23 16:52:35 +00:00
|
|
|
}
|
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// Load the user initcode into address 0 of pagetable,
|
|
|
|
// for the very first process.
|
2010-09-02 20:39:55 +00:00
|
|
|
// sz must be less than a page.
|
2010-09-02 20:23:15 +00:00
|
|
|
void
|
2022-08-09 19:11:25 +00:00
|
|
|
uvmfirst(pagetable_t pagetable, uchar *src, uint sz)
|
2010-09-02 20:23:15 +00:00
|
|
|
{
|
2011-01-11 18:01:13 +00:00
|
|
|
char *mem;
|
2016-08-25 13:13:00 +00:00
|
|
|
|
2011-01-11 18:01:13 +00:00
|
|
|
if(sz >= PGSIZE)
|
2022-08-09 19:11:25 +00:00
|
|
|
panic("uvmfirst: more than a page");
|
2011-01-11 18:01:13 +00:00
|
|
|
mem = kalloc();
|
2010-09-02 20:23:15 +00:00
|
|
|
memset(mem, 0, PGSIZE);
|
2019-05-31 13:45:59 +00:00
|
|
|
mappages(pagetable, 0, PGSIZE, (uint64)mem, PTE_W|PTE_R|PTE_X|PTE_U);
|
|
|
|
memmove(mem, src, sz);
|
2010-07-23 16:52:35 +00:00
|
|
|
}
|
|
|
|
|
2019-05-31 16:43:20 +00:00
|
|
|
// Allocate PTEs and physical memory to grow process from oldsz to
|
|
|
|
// newsz, which need not be page aligned. Returns new size or 0 on error.
|
|
|
|
uint64
|
2022-08-12 17:22:10 +00:00
|
|
|
uvmalloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz, int xperm)
|
2019-05-31 16:43:20 +00:00
|
|
|
{
|
|
|
|
char *mem;
|
|
|
|
uint64 a;
|
|
|
|
|
|
|
|
if(newsz < oldsz)
|
|
|
|
return oldsz;
|
|
|
|
|
2019-06-04 15:31:50 +00:00
|
|
|
oldsz = PGROUNDUP(oldsz);
|
2020-08-13 18:10:58 +00:00
|
|
|
for(a = oldsz; a < newsz; a += PGSIZE){
|
2019-05-31 16:43:20 +00:00
|
|
|
mem = kalloc();
|
|
|
|
if(mem == 0){
|
2019-06-04 14:43:45 +00:00
|
|
|
uvmdealloc(pagetable, a, oldsz);
|
2019-05-31 16:43:20 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
memset(mem, 0, PGSIZE);
|
2022-08-12 17:22:10 +00:00
|
|
|
if(mappages(pagetable, a, PGSIZE, (uint64)mem, PTE_R|PTE_U|xperm) != 0){
|
2019-07-02 15:45:06 +00:00
|
|
|
kfree(mem);
|
|
|
|
uvmdealloc(pagetable, a, oldsz);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-05-31 16:43:20 +00:00
|
|
|
}
|
|
|
|
return newsz;
|
|
|
|
}
|
|
|
|
|
2010-09-02 22:28:36 +00:00
|
|
|
// Deallocate user pages to bring the process size from oldsz to
|
|
|
|
// newsz. oldsz and newsz need not be page-aligned, nor does newsz
|
|
|
|
// need to be less than oldsz. oldsz can be larger than the actual
|
|
|
|
// process size. Returns the new process size.
|
2019-05-31 16:43:20 +00:00
|
|
|
uint64
|
2019-05-31 13:45:59 +00:00
|
|
|
uvmdealloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz)
|
2010-08-10 21:08:41 +00:00
|
|
|
{
|
2011-01-11 18:01:13 +00:00
|
|
|
if(newsz >= oldsz)
|
|
|
|
return oldsz;
|
2019-09-20 15:35:27 +00:00
|
|
|
|
2020-08-13 12:46:28 +00:00
|
|
|
if(PGROUNDUP(newsz) < PGROUNDUP(oldsz)){
|
|
|
|
int npages = (PGROUNDUP(oldsz) - PGROUNDUP(newsz)) / PGSIZE;
|
|
|
|
uvmunmap(pagetable, PGROUNDUP(newsz), npages, 1);
|
|
|
|
}
|
2019-09-20 15:35:27 +00:00
|
|
|
|
2011-01-11 18:01:13 +00:00
|
|
|
return newsz;
|
2010-08-10 21:08:41 +00:00
|
|
|
}
|
|
|
|
|
2019-07-24 17:33:43 +00:00
|
|
|
// Recursively free page-table pages.
|
2019-05-31 13:45:59 +00:00
|
|
|
// All leaf mappings must already have been removed.
|
2020-08-07 09:32:48 +00:00
|
|
|
void
|
2019-05-31 13:45:59 +00:00
|
|
|
freewalk(pagetable_t pagetable)
|
|
|
|
{
|
|
|
|
// there are 2^9 = 512 PTEs in a page table.
|
|
|
|
for(int i = 0; i < 512; i++){
|
|
|
|
pte_t pte = pagetable[i];
|
|
|
|
if((pte & PTE_V) && (pte & (PTE_R|PTE_W|PTE_X)) == 0){
|
|
|
|
// this PTE points to a lower-level page table.
|
|
|
|
uint64 child = PTE2PA(pte);
|
|
|
|
freewalk((pagetable_t)child);
|
|
|
|
pagetable[i] = 0;
|
|
|
|
} else if(pte & PTE_V){
|
|
|
|
panic("freewalk: leaf");
|
2018-10-02 12:12:01 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-31 13:45:59 +00:00
|
|
|
kfree((void*)pagetable);
|
2018-10-02 12:12:01 +00:00
|
|
|
}
|
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// Free user memory pages,
|
2019-07-24 17:33:43 +00:00
|
|
|
// then free page-table pages.
|
2010-07-23 16:52:35 +00:00
|
|
|
void
|
2019-05-31 13:45:59 +00:00
|
|
|
uvmfree(pagetable_t pagetable, uint64 sz)
|
2010-07-23 16:52:35 +00:00
|
|
|
{
|
2019-10-27 17:36:46 +00:00
|
|
|
if(sz > 0)
|
2020-08-13 12:46:28 +00:00
|
|
|
uvmunmap(pagetable, 0, PGROUNDUP(sz)/PGSIZE, 1);
|
2019-05-31 13:45:59 +00:00
|
|
|
freewalk(pagetable);
|
2010-07-23 16:52:35 +00:00
|
|
|
}
|
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// Given a parent process's page table, copy
|
|
|
|
// its memory into a child's page table.
|
|
|
|
// Copies both the page table and the
|
|
|
|
// physical memory.
|
2019-07-01 21:46:06 +00:00
|
|
|
// returns 0 on success, -1 on failure.
|
|
|
|
// frees any allocated pages on failure.
|
|
|
|
int
|
2019-05-31 13:45:59 +00:00
|
|
|
uvmcopy(pagetable_t old, pagetable_t new, uint64 sz)
|
2010-07-23 16:52:35 +00:00
|
|
|
{
|
|
|
|
pte_t *pte;
|
Checkpoint port of xv6 to x86-64. Passed usertests on 2 processors a few times.
The x86-64 doesn't just add two levels to page tables to support 64 bit
addresses, but is a different processor. For example, calling conventions,
system calls, and segmentation are different from 32-bit x86. Segmentation is
basically gone, but gs/fs in combination with MSRs can be used to hold a
per-core pointer. In general, x86-64 is more straightforward than 32-bit
x86. The port uses code from sv6 and the xv6 "rsc-amd64" branch.
A summary of the changes is as follows:
- Booting: switch to grub instead of xv6's bootloader (pass -kernel to qemu),
because xv6's boot loader doesn't understand 64bit ELF files. And, we don't
care anymore about booting.
- Makefile: use -m64 instead of -m32 flag for gcc, delete boot loader, xv6.img,
bochs, and memfs. For now dont' use -O2, since usertests with -O2 is bigger than
MAXFILE!
- Update gdb.tmpl to be for i386 or x86-64
- Console/printf: use stdarg.h and treat 64-bit addresses different from ints
(32-bit)
- Update elfhdr to be 64 bit
- entry.S/entryother.S: add code to switch to 64-bit mode: build a simple page
table in 32-bit mode before switching to 64-bit mode, share code for entering
boot processor and APs, and tweak boot gdt. The boot gdt is the gdt that the
kernel proper also uses. (In 64-bit mode, the gdt/segmentation and task state
mostly disappear.)
- exec.c: fix passing argv (64-bit now instead of 32-bit).
- initcode.c: use syscall instead of int.
- kernel.ld: load kernel very high, in top terabyte. 64 bits is a lot of
address space!
- proc.c: initial return is through new syscall path instead of trapret.
- proc.h: update struct cpu to have some scratch space since syscall saves less
state than int, update struct context to reflect x86-64 calling conventions.
- swtch: simplify for x86-64 calling conventions.
- syscall: add fetcharg to handle x86-64 calling convetions (6 arguments are
passed through registers), and fetchaddr to read a 64-bit value from user space.
- sysfile: update to handle pointers from user space (e.g., sys_exec), which are
64 bits.
- trap.c: no special trap vector for sys calls, because x86-64 has a different
plan for system calls.
- trapasm: one plan for syscalls and one plan for traps (interrupt and
exceptions). On x86-64, the kernel is responsible for switching user/kernel
stacks. To do, xv6 keeps some scratch space in the cpu structure, and uses MSR
GS_KERN_BASE to point to the core's cpu structure (using swapgs).
- types.h: add uint64, and change pde_t to uint64
- usertests: exit() when fork fails, which helped in tracking down one of the
bugs in the switch from 32-bit to 64-bit
- vectors: update to make them 64 bits
- vm.c: use bootgdt in kernel too, program MSRs for syscalls and core-local
state (for swapgs), walk 4 levels in walkpgdir, add DEVSPACETOP, use task
segment to set kernel stack for interrupts (but simpler than in 32-bit mode),
add an extra argument to freevm (size of user part of address space) to avoid
checking all entries till KERNBASE (there are MANY TB before the top 1TB).
- x86: update trapframe to have 64-bit entries, which is what the processor
pushes on syscalls and traps. simplify lgdt and lidt, using struct desctr,
which needs the gcc directives packed and aligned.
TODO:
- use int32 instead of int?
- simplify curproc(). xv6 has per-cpu state again, but this time it must have it.
- avoid repetition in walkpgdir
- fix validateint() in usertests.c
- fix bugs (e.g., observed one a case of entering kernel with invalid gs or proc
2018-09-23 12:24:42 +00:00
|
|
|
uint64 pa, i;
|
|
|
|
uint flags;
|
2010-07-23 16:52:35 +00:00
|
|
|
char *mem;
|
|
|
|
|
2010-09-01 04:41:25 +00:00
|
|
|
for(i = 0; i < sz; i += PGSIZE){
|
2019-05-31 16:43:20 +00:00
|
|
|
if((pte = walk(old, i, 0)) == 0)
|
2019-09-17 10:07:58 +00:00
|
|
|
panic("uvmcopy: pte should exist");
|
2019-05-31 13:45:59 +00:00
|
|
|
if((*pte & PTE_V) == 0)
|
2019-09-17 10:07:58 +00:00
|
|
|
panic("uvmcopy: page not present");
|
2019-05-31 13:45:59 +00:00
|
|
|
pa = PTE2PA(*pte);
|
2013-03-04 21:16:54 +00:00
|
|
|
flags = PTE_FLAGS(*pte);
|
2011-01-11 18:01:13 +00:00
|
|
|
if((mem = kalloc()) == 0)
|
2019-07-01 21:46:06 +00:00
|
|
|
goto err;
|
2019-05-31 13:45:59 +00:00
|
|
|
memmove(mem, (char*)pa, PGSIZE);
|
2019-07-02 15:45:06 +00:00
|
|
|
if(mappages(new, i, PGSIZE, (uint64)mem, flags) != 0){
|
|
|
|
kfree(mem);
|
|
|
|
goto err;
|
|
|
|
}
|
2010-07-23 16:52:35 +00:00
|
|
|
}
|
2019-07-01 21:46:06 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2020-08-13 12:46:28 +00:00
|
|
|
uvmunmap(new, 0, i / PGSIZE, 1);
|
2019-07-01 21:46:06 +00:00
|
|
|
return -1;
|
2010-07-23 16:52:35 +00:00
|
|
|
}
|
2019-05-31 16:43:20 +00:00
|
|
|
|
2019-07-24 19:28:37 +00:00
|
|
|
// mark a PTE invalid for user access.
|
|
|
|
// used by exec for the user stack guard page.
|
|
|
|
void
|
|
|
|
uvmclear(pagetable_t pagetable, uint64 va)
|
|
|
|
{
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
pte = walk(pagetable, va, 0);
|
|
|
|
if(pte == 0)
|
2019-07-25 10:59:07 +00:00
|
|
|
panic("uvmclear");
|
2019-07-24 19:28:37 +00:00
|
|
|
*pte &= ~PTE_U;
|
|
|
|
}
|
|
|
|
|
2019-06-01 09:33:38 +00:00
|
|
|
// Copy from kernel to user.
|
2019-05-31 16:43:20 +00:00
|
|
|
// Copy len bytes from src to virtual address dstva in a given page table.
|
|
|
|
// Return 0 on success, -1 on error.
|
|
|
|
int
|
|
|
|
copyout(pagetable_t pagetable, uint64 dstva, char *src, uint64 len)
|
|
|
|
{
|
|
|
|
uint64 n, va0, pa0;
|
|
|
|
|
|
|
|
while(len > 0){
|
2019-09-20 14:27:03 +00:00
|
|
|
va0 = PGROUNDDOWN(dstva);
|
2019-05-31 16:43:20 +00:00
|
|
|
pa0 = walkaddr(pagetable, va0);
|
|
|
|
if(pa0 == 0)
|
|
|
|
return -1;
|
|
|
|
n = PGSIZE - (dstva - va0);
|
|
|
|
if(n > len)
|
|
|
|
n = len;
|
|
|
|
memmove((void *)(pa0 + (dstva - va0)), src, n);
|
|
|
|
|
|
|
|
len -= n;
|
|
|
|
src += n;
|
|
|
|
dstva = va0 + PGSIZE;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2019-06-01 09:33:38 +00:00
|
|
|
|
|
|
|
// Copy from user to kernel.
|
|
|
|
// Copy len bytes to dst from virtual address srcva in a given page table.
|
|
|
|
// Return 0 on success, -1 on error.
|
|
|
|
int
|
|
|
|
copyin(pagetable_t pagetable, char *dst, uint64 srcva, uint64 len)
|
|
|
|
{
|
|
|
|
uint64 n, va0, pa0;
|
|
|
|
|
|
|
|
while(len > 0){
|
2019-09-20 14:27:03 +00:00
|
|
|
va0 = PGROUNDDOWN(srcva);
|
2019-06-01 09:33:38 +00:00
|
|
|
pa0 = walkaddr(pagetable, va0);
|
|
|
|
if(pa0 == 0)
|
|
|
|
return -1;
|
|
|
|
n = PGSIZE - (srcva - va0);
|
|
|
|
if(n > len)
|
|
|
|
n = len;
|
|
|
|
memmove(dst, (void *)(pa0 + (srcva - va0)), n);
|
|
|
|
|
|
|
|
len -= n;
|
|
|
|
dst += n;
|
|
|
|
srcva = va0 + PGSIZE;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-04 09:57:47 +00:00
|
|
|
// Copy a null-terminated string from user to kernel.
|
2019-06-01 09:33:38 +00:00
|
|
|
// Copy bytes to dst from virtual address srcva in a given page table,
|
|
|
|
// until a '\0', or max.
|
|
|
|
// Return 0 on success, -1 on error.
|
|
|
|
int
|
|
|
|
copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
|
|
|
|
{
|
|
|
|
uint64 n, va0, pa0;
|
|
|
|
int got_null = 0;
|
|
|
|
|
|
|
|
while(got_null == 0 && max > 0){
|
2019-09-20 13:41:03 +00:00
|
|
|
va0 = PGROUNDDOWN(srcva);
|
2019-06-01 09:33:38 +00:00
|
|
|
pa0 = walkaddr(pagetable, va0);
|
|
|
|
if(pa0 == 0)
|
|
|
|
return -1;
|
|
|
|
n = PGSIZE - (srcva - va0);
|
|
|
|
if(n > max)
|
|
|
|
n = max;
|
|
|
|
|
|
|
|
char *p = (char *) (pa0 + (srcva - va0));
|
|
|
|
while(n > 0){
|
|
|
|
if(*p == '\0'){
|
|
|
|
*dst = '\0';
|
|
|
|
got_null = 1;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
*dst = *p;
|
|
|
|
}
|
|
|
|
--n;
|
|
|
|
--max;
|
|
|
|
p++;
|
|
|
|
dst++;
|
|
|
|
}
|
|
|
|
|
|
|
|
srcva = va0 + PGSIZE;
|
|
|
|
}
|
|
|
|
if(got_null){
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|