943fd378a1
Dropped cmpxchg in favor of xchg, to match lecture notes. Use xchg to release lock, for future protection and to keep gcc from acting clever.
118 lines
2.4 KiB
C
118 lines
2.4 KiB
C
// Mutual exclusion spin locks.
|
|
|
|
#include "types.h"
|
|
#include "defs.h"
|
|
#include "param.h"
|
|
#include "x86.h"
|
|
#include "mmu.h"
|
|
#include "proc.h"
|
|
#include "spinlock.h"
|
|
|
|
extern int use_console_lock;
|
|
|
|
void
|
|
initlock(struct spinlock *lock, char *name)
|
|
{
|
|
lock->name = name;
|
|
lock->locked = 0;
|
|
lock->cpu = 0xffffffff;
|
|
}
|
|
|
|
// Acquire the lock.
|
|
// Loops (spins) until the lock is acquired.
|
|
// Holding a lock for a long time may cause
|
|
// other CPUs to waste time spinning to acquire it.
|
|
void
|
|
acquire(struct spinlock *lock)
|
|
{
|
|
pushcli();
|
|
if(holding(lock))
|
|
panic("acquire");
|
|
|
|
// The xchg is atomic.
|
|
// It also serializes, so that reads after acquire are not
|
|
// reordered before it.
|
|
while(xchg(&lock->locked, 1) == 1)
|
|
;
|
|
|
|
// Record info about lock acquisition for debugging.
|
|
// The +10 is only so that we can tell the difference
|
|
// between forgetting to initialize lock->cpu
|
|
// and holding a lock on cpu 0.
|
|
lock->cpu = cpu() + 10;
|
|
getcallerpcs(&lock, lock->pcs);
|
|
}
|
|
|
|
// Release the lock.
|
|
void
|
|
release(struct spinlock *lock)
|
|
{
|
|
if(!holding(lock))
|
|
panic("release");
|
|
|
|
lock->pcs[0] = 0;
|
|
lock->cpu = 0xffffffff;
|
|
|
|
// The xchg serializes, so that reads before release are
|
|
// not reordered after it. (This reordering would be allowed
|
|
// by the Intel manuals, but does not happen on current
|
|
// Intel processors. The xchg being asm volatile also keeps
|
|
// gcc from delaying the above assignments.)
|
|
xchg(&lock->locked, 0);
|
|
|
|
popcli();
|
|
}
|
|
|
|
// Record the current call stack in pcs[] by following the %ebp chain.
|
|
void
|
|
getcallerpcs(void *v, uint pcs[])
|
|
{
|
|
uint *ebp;
|
|
int i;
|
|
|
|
ebp = (uint*)v - 2;
|
|
for(i = 0; i < 10; i++){
|
|
if(ebp == 0 || ebp == (uint*)0xffffffff)
|
|
break;
|
|
pcs[i] = ebp[1]; // saved %eip
|
|
ebp = (uint*)ebp[0]; // saved %ebp
|
|
}
|
|
for(; i < 10; i++)
|
|
pcs[i] = 0;
|
|
}
|
|
|
|
// Check whether this cpu is holding the lock.
|
|
int
|
|
holding(struct spinlock *lock)
|
|
{
|
|
return lock->locked && lock->cpu == cpu() + 10;
|
|
}
|
|
|
|
|
|
// Pushcli/popcli are like cli/sti except that they are matched:
|
|
// it takes two popcli to undo two pushcli. Also, if interrupts
|
|
// are off, then pushcli, popcli leaves them off.
|
|
|
|
void
|
|
pushcli(void)
|
|
{
|
|
int eflags;
|
|
|
|
eflags = read_eflags();
|
|
cli();
|
|
if(cpus[cpu()].ncli++ == 0)
|
|
cpus[cpu()].intena = eflags & FL_IF;
|
|
}
|
|
|
|
void
|
|
popcli(void)
|
|
{
|
|
if(read_eflags()&FL_IF)
|
|
panic("popcli - interruptible");
|
|
if(--cpus[cpu()].ncli < 0)
|
|
panic("popcli");
|
|
if(cpus[cpu()].ncli == 0 && cpus[cpu()].intena)
|
|
sti();
|
|
}
|
|
|