spinlocks using gcc intrinsics
push_off() / pop_off() set up per-hart plic stuff so all harts get device interrupts
This commit is contained in:
parent
f1a727b971
commit
3113643768
2
Makefile
2
Makefile
|
@ -182,7 +182,7 @@ QEMUGDB = $(shell if $(QEMU) -help | grep -q '^-gdb'; \
|
|||
then echo "-gdb tcp::$(GDBPORT)"; \
|
||||
else echo "-s -p $(GDBPORT)"; fi)
|
||||
ifndef CPUS
|
||||
CPUS := 1
|
||||
CPUS := 2
|
||||
endif
|
||||
QEMUOPTS = -machine virt -kernel kernel -m 3G -smp $(CPUS) -nographic
|
||||
QEMUOPTS += -initrd fs.img
|
||||
|
|
8
defs.h
8
defs.h
|
@ -130,12 +130,11 @@ void swtch(struct context*, struct context*);
|
|||
|
||||
// spinlock.c
|
||||
void acquire(struct spinlock*);
|
||||
void getcallerpcs(void*, uint64*);
|
||||
int holding(struct spinlock*);
|
||||
void initlock(struct spinlock*, char*);
|
||||
void release(struct spinlock*);
|
||||
void pushcli(void);
|
||||
void popcli(void);
|
||||
void push_off(void);
|
||||
void pop_off(void);
|
||||
|
||||
// sleeplock.c
|
||||
void acquiresleep(struct sleeplock*);
|
||||
|
@ -168,6 +167,7 @@ void timerinit(void);
|
|||
// trap.c
|
||||
extern uint ticks;
|
||||
void trapinit(void);
|
||||
void trapinithart(void);
|
||||
extern struct spinlock tickslock;
|
||||
void usertrapret(void);
|
||||
|
||||
|
@ -179,6 +179,7 @@ int uartgetc(void);
|
|||
|
||||
// vm.c
|
||||
void kvminit(void);
|
||||
void kvminithart(void);
|
||||
pagetable_t uvmcreate(void);
|
||||
void uvminit(pagetable_t, char *, uint);
|
||||
uint64 uvmalloc(pagetable_t, uint64, uint64);
|
||||
|
@ -194,6 +195,7 @@ int copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64
|
|||
|
||||
// plic.c
|
||||
void plicinit(void);
|
||||
void plicinithart(void);
|
||||
uint64 plic_pending(void);
|
||||
int plic_claim(void);
|
||||
void plic_complete(int);
|
||||
|
|
42
main.c
42
main.c
|
@ -4,25 +4,39 @@
|
|||
#include "riscv.h"
|
||||
#include "defs.h"
|
||||
|
||||
volatile static int started = 0;
|
||||
|
||||
// Bootstrap processor starts running C code here.
|
||||
// Allocate a real stack and switch to it, first
|
||||
// doing some setup required for memory allocator to work.
|
||||
void
|
||||
main(int hartid)
|
||||
main()
|
||||
{
|
||||
w_tp(hartid); // save hartid where cpuid() can find it
|
||||
uartinit(); // serial port
|
||||
consoleinit();
|
||||
printf("entering main() on hart %d\n", hartid);
|
||||
kinit(); // physical page allocator
|
||||
kvminit(); // kernel page table
|
||||
procinit(); // process table
|
||||
trapinit(); // trap vectors
|
||||
plicinit(); // set up interrupt controller
|
||||
binit(); // buffer cache
|
||||
fileinit(); // file table
|
||||
ramdiskinit(); // disk
|
||||
userinit(); // first user process
|
||||
if(cpuid() == 0){
|
||||
uartinit(); // serial port
|
||||
consoleinit();
|
||||
printf("hart %d starting\n", cpuid());
|
||||
kinit(); // physical page allocator
|
||||
kvminit(); // create kernel page table
|
||||
kvminithart(); // turn on paging
|
||||
procinit(); // process table
|
||||
trapinit(); // trap vectors
|
||||
trapinithart(); // install kernel trap vector
|
||||
plicinit(); // set up interrupt controller
|
||||
plicinithart(); // ask PLIC for device interrupts
|
||||
binit(); // buffer cache
|
||||
fileinit(); // file table
|
||||
ramdiskinit(); // disk
|
||||
userinit(); // first user process
|
||||
started = 1;
|
||||
} else {
|
||||
while(started == 0)
|
||||
;
|
||||
printf("hart %d starting\n", cpuid());
|
||||
kvminithart(); // turn on paging
|
||||
trapinithart(); // install kernel trap vector
|
||||
plicinithart(); // ask PLIC for device interrupts
|
||||
}
|
||||
|
||||
scheduler();
|
||||
}
|
||||
|
|
|
@ -28,6 +28,14 @@
|
|||
|
||||
// qemu puts programmable interrupt controller here.
|
||||
#define PLIC 0x0c000000L
|
||||
#define PLIC_PRIORITY (PLIC + 0x0)
|
||||
#define PLIC_PENDING (PLIC + 0x1000)
|
||||
#define PLIC_MENABLE(hart) (PLIC + 0x2000 + (hart)*0x100)
|
||||
#define PLIC_SENABLE(hart) (PLIC + 0x2080 + (hart)*0x100)
|
||||
#define PLIC_MPRIORITY(hart) (PLIC + 0x200000 + (hart)*0x2000)
|
||||
#define PLIC_SPRIORITY(hart) (PLIC + 0x201000 + (hart)*0x2000)
|
||||
#define PLIC_MCLAIM(hart) (PLIC + 0x200004 + (hart)*0x2000)
|
||||
#define PLIC_SCLAIM(hart) (PLIC + 0x201004 + (hart)*0x2000)
|
||||
|
||||
#define RAMDISK 0x88000000L
|
||||
|
||||
|
|
5
proc.c
5
proc.c
|
@ -360,7 +360,7 @@ scheduler(void)
|
|||
{
|
||||
struct proc *p;
|
||||
struct cpu *c = mycpu();
|
||||
|
||||
|
||||
c->proc = 0;
|
||||
for(;;){
|
||||
// Enable interrupts on this processor.
|
||||
|
@ -385,7 +385,6 @@ scheduler(void)
|
|||
c->proc = 0;
|
||||
}
|
||||
release(&ptable.lock);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -393,7 +392,7 @@ scheduler(void)
|
|||
// and have changed proc->state. Saves and restores
|
||||
// intena because intena is a property of this
|
||||
// kernel thread, not this CPU. It should
|
||||
// be proc->intena and proc->ncli, but that would
|
||||
// be proc->intena and proc->noff, but that would
|
||||
// break in the few places where a lock is held but
|
||||
// there's no process.
|
||||
void
|
||||
|
|
5
proc.h
5
proc.h
|
@ -22,9 +22,8 @@ struct context {
|
|||
struct cpu {
|
||||
struct proc *proc; // The process running on this cpu or null
|
||||
struct context scheduler; // swtch() here to enter scheduler
|
||||
volatile uint started; // Has the CPU started?
|
||||
int ncli; // Depth of pushcli nesting.
|
||||
int intena; // Were interrupts enabled before pushcli?
|
||||
int noff; // Depth of push_off() nesting.
|
||||
int intena; // Were interrupts enabled before push_off()?
|
||||
};
|
||||
|
||||
extern struct cpu cpus[NCPU];
|
||||
|
|
95
spinlock.c
95
spinlock.c
|
@ -5,6 +5,7 @@
|
|||
#include "memlayout.h"
|
||||
#include "spinlock.h"
|
||||
#include "riscv.h"
|
||||
#include "proc.h"
|
||||
#include "defs.h"
|
||||
|
||||
void
|
||||
|
@ -15,27 +16,6 @@ initlock(struct spinlock *lk, char *name)
|
|||
lk->cpu = 0;
|
||||
}
|
||||
|
||||
void
|
||||
acquire(struct spinlock *lk)
|
||||
{
|
||||
lk->locked = 1;
|
||||
lk->cpu = mycpu();
|
||||
}
|
||||
|
||||
void
|
||||
release(struct spinlock *lk)
|
||||
{
|
||||
lk->locked = 0;
|
||||
lk->cpu = 0;
|
||||
}
|
||||
|
||||
int
|
||||
holding(struct spinlock *lk)
|
||||
{
|
||||
return lk->locked && lk->cpu == mycpu();
|
||||
}
|
||||
|
||||
#if 0
|
||||
// Acquire the lock.
|
||||
// Loops (spins) until the lock is acquired.
|
||||
// Holding a lock for a long time may cause
|
||||
|
@ -43,12 +23,14 @@ holding(struct spinlock *lk)
|
|||
void
|
||||
acquire(struct spinlock *lk)
|
||||
{
|
||||
pushcli(); // disable interrupts to avoid deadlock.
|
||||
push_off(); // disable interrupts to avoid deadlock.
|
||||
if(holding(lk))
|
||||
panic("acquire");
|
||||
|
||||
// The xchg is atomic.
|
||||
while(xchg(&lk->locked, 1) != 0)
|
||||
//while(xchg(&lk->locked, 1) != 0)
|
||||
// ;
|
||||
while(__sync_lock_test_and_set(&lk->locked, 1) != 0)
|
||||
;
|
||||
|
||||
// Tell the C compiler and the processor to not move loads or stores
|
||||
|
@ -58,7 +40,6 @@ acquire(struct spinlock *lk)
|
|||
|
||||
// Record info about lock acquisition for holding() and debugging.
|
||||
lk->cpu = mycpu();
|
||||
getcallerpcs(&lk, lk->pcs);
|
||||
}
|
||||
|
||||
// Release the lock.
|
||||
|
@ -68,7 +49,6 @@ release(struct spinlock *lk)
|
|||
if(!holding(lk))
|
||||
panic("release");
|
||||
|
||||
lk->pcs[0] = 0;
|
||||
lk->cpu = 0;
|
||||
|
||||
// Tell the C compiler and the processor to not move loads or stores
|
||||
|
@ -81,27 +61,10 @@ release(struct spinlock *lk)
|
|||
// Release the lock, equivalent to lk->locked = 0.
|
||||
// This code can't use a C assignment, since it might
|
||||
// not be atomic. A real OS would use C atomics here.
|
||||
asm volatile("movl $0, %0" : "+m" (lk->locked) : );
|
||||
//asm volatile("movl $0, %0" : "+m" (lk->locked) : );
|
||||
__sync_lock_release(&lk->locked);
|
||||
|
||||
popcli();
|
||||
}
|
||||
|
||||
// Record the current call stack in pcs[] by following the %ebp chain.
|
||||
void
|
||||
getcallerpcs(void *v, uint64 pcs[])
|
||||
{
|
||||
uint64 *ebp;
|
||||
int i;
|
||||
|
||||
asm volatile("mov %%rbp, %0" : "=r" (ebp));
|
||||
for(i = 0; i < 10; i++){
|
||||
if(ebp == 0 || ebp < (uint64*)KERNBASE || ebp == (uint64*)0xffffffff)
|
||||
break;
|
||||
pcs[i] = ebp[1]; // saved %eip
|
||||
ebp = (uint64*)ebp[0]; // saved %ebp
|
||||
}
|
||||
for(; i < 10; i++)
|
||||
pcs[i] = 0;
|
||||
pop_off();
|
||||
}
|
||||
|
||||
// Check whether this cpu is holding the lock.
|
||||
|
@ -109,37 +72,37 @@ int
|
|||
holding(struct spinlock *lk)
|
||||
{
|
||||
int r;
|
||||
pushcli();
|
||||
push_off();
|
||||
r = lk->locked && lk->cpu == mycpu();
|
||||
popcli();
|
||||
pop_off();
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
// Pushcli/popcli are like cli/sti except that they are matched:
|
||||
// it takes two popcli to undo two pushcli. Also, if interrupts
|
||||
// are off, then pushcli, popcli leaves them off.
|
||||
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
|
||||
// it takes two pop_off to undo two push_off. Also, if interrupts
|
||||
// are initially off, then push_off, pop_off leaves them off.
|
||||
|
||||
void
|
||||
pushcli(void)
|
||||
push_off(void)
|
||||
{
|
||||
int eflags;
|
||||
struct cpu *c = mycpu();
|
||||
int old = intr_get();
|
||||
|
||||
eflags = readeflags();
|
||||
cli();
|
||||
if(mycpu()->ncli == 0)
|
||||
mycpu()->intena = eflags & FL_IF;
|
||||
mycpu()->ncli += 1;
|
||||
intr_off();
|
||||
if(c->noff == 0)
|
||||
c->intena = old;
|
||||
c->noff += 1;
|
||||
}
|
||||
|
||||
void
|
||||
popcli(void)
|
||||
pop_off(void)
|
||||
{
|
||||
if(readeflags()&FL_IF)
|
||||
panic("popcli - interruptible");
|
||||
if(--mycpu()->ncli < 0)
|
||||
panic("popcli");
|
||||
if(mycpu()->ncli == 0 && mycpu()->intena)
|
||||
sti();
|
||||
struct cpu *c = mycpu();
|
||||
if(intr_get())
|
||||
panic("pop_off - interruptible");
|
||||
c->noff -= 1;
|
||||
if(c->noff < 0)
|
||||
panic("pop_off");
|
||||
if(c->noff == 0 && c->intena)
|
||||
intr_on();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -5,7 +5,5 @@ struct spinlock {
|
|||
// For debugging:
|
||||
char *name; // Name of lock.
|
||||
struct cpu *cpu; // The cpu holding the lock.
|
||||
uint64 pcs[10]; // The call stack (an array of program counters)
|
||||
// that locked the lock.
|
||||
};
|
||||
|
||||
|
|
14
start.c
14
start.c
|
@ -9,12 +9,12 @@ void main();
|
|||
// entry.S needs one stack per CPU.
|
||||
__attribute__ ((aligned (16))) char stack0[4096 * NCPU];
|
||||
|
||||
// assembly code in kernelvec for machine-mode timer interrupt.
|
||||
extern void machinevec();
|
||||
|
||||
// scratch area for timer interrupt, one per CPU.
|
||||
uint64 mscratch0[NCPU * 32];
|
||||
|
||||
// assembly code in kernelvec for machine-mode timer interrupt.
|
||||
extern void machinevec();
|
||||
|
||||
// entry.S jumps here in machine mode on stack0.
|
||||
void
|
||||
mstart()
|
||||
|
@ -48,7 +48,9 @@ mstart()
|
|||
w_mstatus(r_mstatus() | MSTATUS_MIE);
|
||||
w_mie(r_mie() | MIE_MTIE);
|
||||
|
||||
// call main(hartid) in supervisor mode.
|
||||
asm("csrr a0, mhartid ; \
|
||||
mret");
|
||||
// keep each CPU's hartid in its tp register, for cpuid().
|
||||
w_tp(id);
|
||||
|
||||
// call main() in supervisor mode.
|
||||
asm("mret");
|
||||
}
|
||||
|
|
12
trap.c
12
trap.c
|
@ -19,14 +19,16 @@ extern int devintr();
|
|||
void
|
||||
trapinit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
// set up to take exceptions and traps while in the kernel.
|
||||
w_stvec((uint64)kernelvec);
|
||||
|
||||
initlock(&tickslock, "time");
|
||||
}
|
||||
|
||||
// set up to take exceptions and traps while in the kernel.
|
||||
void
|
||||
trapinithart(void)
|
||||
{
|
||||
w_stvec((uint64)kernelvec);
|
||||
}
|
||||
|
||||
//
|
||||
// handle an interrupt, exception, or system call from user space.
|
||||
// called from trampoline.S
|
||||
|
|
8
vm.c
8
vm.c
|
@ -54,9 +54,13 @@ kvminit()
|
|||
// the highest virtual address in the kernel.
|
||||
mappages(kernel_pagetable, TRAMPOLINE, PGSIZE,
|
||||
(uint64)trampout, PTE_R | PTE_X);
|
||||
}
|
||||
|
||||
// Switch h/w page table register to the kernel's page table,
|
||||
// and enable paging.
|
||||
// Switch h/w page table register to the kernel's page table,
|
||||
// and enable paging.
|
||||
void
|
||||
kvminithart()
|
||||
{
|
||||
w_satp(MAKE_SATP(kernel_pagetable));
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue