conservatively call sfence.vma before every satp load.

This commit is contained in:
Robert Morris 2019-07-16 17:02:21 -04:00
parent 6bbc2b2245
commit ebc3937209
4 changed files with 16 additions and 2 deletions

View file

@ -544,7 +544,7 @@ sleep(void *chan, struct spinlock *lk)
}
//PAGEBREAK!
// Wake up p, used by exit().
// Wake up p if it is sleeping in wait(); used by exit().
// Caller must hold p->lock.
static void
wakeup1(struct proc *p)

View file

@ -312,6 +312,17 @@ r_ra()
return x;
}
// tell the machine to finish any previous writes to
// PTEs, so that a subsequent use of a virtual
// address or load of the SATP will see those writes.
// perhaps this also flushes the TLB.
static inline void
sfence_vma()
{
// the zero, zero means flush all TLB entries.
asm volatile("sfence.vma zero, zero");
}
#define PGSIZE 4096 // bytes per page
#define PGSHIFT 12 // bits of offset within a page

View file

@ -17,7 +17,8 @@ trampout:
# a0: p->tf in user page table
# a1: new value for satp, for user page table
# switch to user page table
# switch to user page table.
sfence.vma zero, zero
csrw satp, a1
# put the saved user a0 in sscratch, so we
@ -128,6 +129,7 @@ trampin:
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
sfence.vma zero, zero
csrw satp, t1
# a0 is no longer valid, since the kernel page

View file

@ -61,6 +61,7 @@ kvminit()
void
kvminithart()
{
sfence_vma();
w_satp(MAKE_SATP(kernel_pagetable));
}