New scheduler.

Removed cli and sti stack in favor of tracking
number of locks held on each CPU and explicit
conditionals in spinlock.c.
This commit is contained in:
rsc 2006-07-16 01:15:28 +00:00
parent 40a2a08319
commit 65bd8e139a
11 changed files with 249 additions and 230 deletions

View file

@ -113,7 +113,7 @@ void
cprintf(char *fmt, ...) cprintf(char *fmt, ...)
{ {
int i, state = 0, c; int i, state = 0, c;
unsigned int *ap = (unsigned int *) &fmt + 1; unsigned int *ap = (unsigned int *)(void*)&fmt + 1;
if(use_console_lock) if(use_console_lock)
acquire(&console_lock); acquire(&console_lock);

3
defs.h
View file

@ -13,7 +13,6 @@ struct proc;
struct jmpbuf; struct jmpbuf;
void setupsegs(struct proc *); void setupsegs(struct proc *);
struct proc * newproc(void); struct proc * newproc(void);
void swtch(int);
struct spinlock; struct spinlock;
void sleep(void *, struct spinlock *); void sleep(void *, struct spinlock *);
void wakeup(void *); void wakeup(void *);
@ -22,8 +21,6 @@ void proc_exit(void);
int proc_kill(int); int proc_kill(int);
int proc_wait(void); int proc_wait(void);
void yield(void); void yield(void);
void cli(void);
void sti(void);
// swtch.S // swtch.S
struct jmpbuf; struct jmpbuf;

View file

@ -107,7 +107,7 @@ romimage: file=$BXSHARE/BIOS-bochs-latest, address=0xf0000
# 650Mhz Athlon K-7 with Linux 2.4.4/egcs-2.91.66 2 to 2.5 Mips # 650Mhz Athlon K-7 with Linux 2.4.4/egcs-2.91.66 2 to 2.5 Mips
# 400Mhz Pentium II with Linux 2.0.36/egcs-1.0.3 1 to 1.8 Mips # 400Mhz Pentium II with Linux 2.0.36/egcs-1.0.3 1 to 1.8 Mips
#======================================================================= #=======================================================================
cpu: count=2, ips=10000000 cpu: count=2, ips=10000000, reset_on_triple_fault=0
#======================================================================= #=======================================================================
# MEGS # MEGS

10
main.c
View file

@ -18,19 +18,19 @@ extern uint8_t _binary_userfs_start[], _binary_userfs_size[];
extern int use_console_lock; extern int use_console_lock;
struct spinlock sillylock; // hold this to keep interrupts disabled
int int
main() main()
{ {
struct proc *p; struct proc *p;
if (acpu) { if (acpu) {
cpus[cpu()].clis = 1;
cprintf("an application processor\n"); cprintf("an application processor\n");
idtinit(); // CPU's idt idtinit(); // CPU's idt
lapic_init(cpu()); lapic_init(cpu());
lapic_timerinit(); lapic_timerinit();
lapic_enableintr(); lapic_enableintr();
sti();
scheduler(); scheduler();
} }
acpu = 1; acpu = 1;
@ -40,10 +40,9 @@ main()
mp_init(); // collect info about this machine mp_init(); // collect info about this machine
acquire(&sillylock);
use_console_lock = 1; use_console_lock = 1;
cpus[cpu()].clis = 1; // cpu starts as if we had called cli()
lapic_init(mp_bcpu()); lapic_init(mp_bcpu());
cprintf("\nxV6\n\n"); cprintf("\nxV6\n\n");
@ -56,7 +55,7 @@ main()
// create fake process zero // create fake process zero
p = &proc[0]; p = &proc[0];
memset(p, 0, sizeof *p); memset(p, 0, sizeof *p);
p->state = WAITING; p->state = SLEEPING;
p->sz = 4 * PAGE; p->sz = 4 * PAGE;
p->mem = kalloc(p->sz); p->mem = kalloc(p->sz);
memset(p->mem, 0, p->sz); memset(p->mem, 0, p->sz);
@ -88,6 +87,7 @@ main()
//load_icode(p, _binary_userfs_start, (unsigned) _binary_userfs_size); //load_icode(p, _binary_userfs_start, (unsigned) _binary_userfs_size);
p->state = RUNNABLE; p->state = RUNNABLE;
cprintf("loaded userfs\n"); cprintf("loaded userfs\n");
release(&sillylock);
scheduler(); scheduler();

351
proc.c
View file

@ -12,6 +12,7 @@ struct spinlock proc_table_lock;
struct proc proc[NPROC]; struct proc proc[NPROC];
struct proc *curproc[NCPU]; struct proc *curproc[NCPU];
int next_pid = 1; int next_pid = 1;
extern void forkret(void);
/* /*
* set up a process's task state and segment descriptors * set up a process's task state and segment descriptors
@ -96,12 +97,14 @@ newproc()
*(np->tf) = *(op->tf); *(np->tf) = *(op->tf);
np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child
// set up new jmpbuf to start executing at trapret with esp pointing at tf // Set up new jmpbuf to start executing forkret (see trapasm.S)
// with esp pointing at tf. Forkret will call forkret1 (below) to release
// the proc_table_lock and then jump into the usual trap return code.
memset(&np->jmpbuf, 0, sizeof np->jmpbuf); memset(&np->jmpbuf, 0, sizeof np->jmpbuf);
np->jmpbuf.jb_eip = (unsigned) trapret; np->jmpbuf.jb_eip = (unsigned) forkret;
np->jmpbuf.jb_esp = (unsigned) np->tf - 4; // -4 for the %eip that isn't actually there np->jmpbuf.jb_esp = (unsigned) np->tf - 4; // -4 for the %eip that isn't actually there
// copy file descriptors // Copy file descriptors
for(fd = 0; fd < NOFILE; fd++){ for(fd = 0; fd < NOFILE; fd++){
np->fds[fd] = op->fds[fd]; np->fds[fd] = op->fds[fd];
if(np->fds[fd]) if(np->fds[fd])
@ -111,128 +114,153 @@ newproc()
return np; return np;
} }
void
forkret1(void)
{
release(&proc_table_lock);
}
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - longjmp to start running that process
// - eventually that process transfers control back
// via longjmp back to the top of scheduler.
void void
scheduler(void) scheduler(void)
{ {
struct proc *op, *np; struct proc *p;
int i; int i;
cprintf("start scheduler on cpu %d jmpbuf %p\n", cpu(), &cpus[cpu()].jmpbuf); cprintf("start scheduler on cpu %d jmpbuf %p\n", cpu(), &cpus[cpu()].jmpbuf);
cpus[cpu()].lastproc = &proc[0]; cpus[cpu()].lastproc = &proc[0];
setjmp(&cpus[cpu()].jmpbuf); for(;;){
// Loop over process table looking for process to run.
op = curproc[cpu()];
if(op == 0 || op->mtx != &proc_table_lock)
acquire1(&proc_table_lock, op);
if(op){
if(op->newstate <= 0 || op->newstate > ZOMBIE)
panic("scheduler");
op->state = op->newstate;
op->newstate = -1;
if(op->mtx){
struct spinlock *mtx = op->mtx;
op->mtx = 0;
if(mtx != &proc_table_lock)
release1(mtx, op);
}
}
// find a runnable process and switch to it
curproc[cpu()] = 0;
np = cpus[cpu()].lastproc + 1;
while(1){
for(i = 0; i < NPROC; i++){
if(np >= &proc[NPROC])
np = &proc[0];
if(np->state == RUNNABLE)
break;
np++;
}
if(i < NPROC){
np->state = RUNNING;
release1(&proc_table_lock, op);
break;
}
release1(&proc_table_lock, op);
op = 0;
acquire(&proc_table_lock); acquire(&proc_table_lock);
np = &proc[0]; for(i = 0; i < NPROC; i++){
} p = &proc[i];
if(p->state != RUNNABLE)
cpus[cpu()].lastproc = np; continue;
curproc[cpu()] = np;
// Run this process.
// XXX move this into swtch or trapret or something.
// It can run on the other stack.
// h/w sets busy bit in TSS descriptor sometimes, and faults // h/w sets busy bit in TSS descriptor sometimes, and faults
// if it's set in LTR. so clear tss descriptor busy bit. // if it's set in LTR. so clear tss descriptor busy bit.
np->gdt[SEG_TSS].sd_type = STS_T32A; p->gdt[SEG_TSS].sd_type = STS_T32A;
// XXX should probably have an lgdt() function in x86.h // XXX should probably have an lgdt() function in x86.h
// to confine all the inline assembly. // to confine all the inline assembly.
// XXX probably ought to lgdt on trap return too, in case // XXX probably ought to lgdt on trap return too, in case
// a system call has moved a program or changed its size. // a system call has moved a program or changed its size.
asm volatile("lgdt %0" : : "g" (np->gdt_pd.pd_lim)); asm volatile("lgdt %0" : : "g" (p->gdt_pd.pd_lim));
ltr(SEG_TSS << 3); ltr(SEG_TSS << 3);
if(0) cprintf("cpu%d: run %d esp=%p callerpc=%p\n", cpu(), np-proc); // Switch to chosen process. It is the process's job
longjmp(&np->jmpbuf); // to release proc_table_lock and then reacquire it
// before jumping back to us.
if(0) cprintf("cpu%d: run %d\n", cpu(), p-proc);
curproc[cpu()] = p;
p->state = RUNNING;
if(setjmp(&cpus[cpu()].jmpbuf) == 0)
longjmp(&p->jmpbuf);
// Process is done running for now.
// It should have changed its p->state before coming back.
curproc[cpu()] = 0;
if(p->state == RUNNING)
panic("swtch to scheduler with state=RUNNING");
// XXX if not holding proc_table_lock panic.
}
release(&proc_table_lock);
if(cpus[cpu()].nlock != 0)
panic("holding locks in scheduler");
// With proc_table_lock released, there are no
// locks held on this cpu, so interrupts are enabled.
// Hardware interrupts can happen here.
// Also, releasing the lock here lets the other CPUs
// look for runnable processes too.
}
} }
// give up the cpu by switching to the scheduler, // Enter scheduler. Must already hold proc_table_lock
// which runs on the per-cpu stack. // and have changed curproc[cpu()]->state.
void void
swtch(int newstate) sched(void)
{ {
struct proc *p = curproc[cpu()]; if(setjmp(&curproc[cpu()]->jmpbuf) == 0)
if(p == 0)
panic("swtch no proc");
if(p->mtx == 0 && p->locks != 0)
panic("swtch w/ locks");
if(p->mtx && p->locks != 1)
panic("swtch w/ locks 1");
if(p->mtx && p->mtx->locked == 0)
panic("switch w/ lock but not held");
if(p->locks && (read_eflags() & FL_IF))
panic("swtch w/ lock but FL_IF");
p->newstate = newstate; // basically an argument to scheduler()
if(setjmp(&p->jmpbuf) == 0)
longjmp(&cpus[cpu()].jmpbuf); longjmp(&cpus[cpu()].jmpbuf);
} }
// Give up the CPU for one scheduling round.
void void
sleep(void *chan, struct spinlock *mtx) yield()
{
struct proc *p;
if((p=curproc[cpu()]) == 0 || curproc[cpu()]->state != RUNNING)
panic("yield");
acquire(&proc_table_lock);
p->state = RUNNABLE;
sched();
release(&proc_table_lock);
}
// Atomically release lock and sleep on chan.
// Reacquires lock when reawakened.
void
sleep(void *chan, struct spinlock *lk)
{ {
struct proc *p = curproc[cpu()]; struct proc *p = curproc[cpu()];
if(p == 0) if(p == 0)
panic("sleep"); panic("sleep");
// Must acquire proc_table_lock in order to
// change p->state and then call sched.
// Once we hold proc_table_lock, we can be
// guaranteed that we won't miss any wakeup
// (wakeup runs with proc_table_lock locked),
// so it's okay to release lk.
if(lk != &proc_table_lock){
acquire(&proc_table_lock);
release(lk);
}
// Go to sleep.
p->chan = chan; p->chan = chan;
p->mtx = mtx; // scheduler will release it p->state = SLEEPING;
sched();
swtch(WAITING); // Tidy up.
if(mtx)
acquire(mtx);
p->chan = 0; p->chan = 0;
// Reacquire original lock.
if(lk != &proc_table_lock){
release(&proc_table_lock);
acquire(lk);
}
} }
// Wake up all processes sleeping on chan.
// Proc_table_lock must be held.
void void
wakeup1(void *chan) wakeup1(void *chan)
{ {
struct proc *p; struct proc *p;
for(p = proc; p < &proc[NPROC]; p++) for(p = proc; p < &proc[NPROC]; p++)
if(p->state == WAITING && p->chan == chan) if(p->state == SLEEPING && p->chan == chan)
p->state = RUNNABLE; p->state = RUNNABLE;
} }
// Wake up all processes sleeping on chan.
// Proc_table_lock is acquired and released.
void void
wakeup(void *chan) wakeup(void *chan)
{ {
@ -241,77 +269,9 @@ wakeup(void *chan)
release(&proc_table_lock); release(&proc_table_lock);
} }
// give up the CPU but stay marked as RUNNABLE // Kill the process with the given pid.
void // Process won't actually exit until it returns
yield() // to user space (see trap in trap.c).
{
if(curproc[cpu()] == 0 || curproc[cpu()]->state != RUNNING)
panic("yield");
swtch(RUNNABLE);
}
void
proc_exit()
{
struct proc *p;
struct proc *cp = curproc[cpu()];
int fd;
for(fd = 0; fd < NOFILE; fd++){
if(cp->fds[fd]){
fd_close(cp->fds[fd]);
cp->fds[fd] = 0;
}
}
acquire(&proc_table_lock);
// wake up parent
for(p = proc; p < &proc[NPROC]; p++)
if(p->pid == cp->ppid)
wakeup1(p);
// abandon children
for(p = proc; p < &proc[NPROC]; p++)
if(p->ppid == cp->pid)
p->pid = 1;
cp->mtx = &proc_table_lock;
swtch(ZOMBIE);
panic("a zombie revived");
}
int
proc_wait(void)
{
struct proc *p;
struct proc *cp = curproc[cpu()];
int any, pid;
acquire(&proc_table_lock);
while(1){
any = 0;
for(p = proc; p < &proc[NPROC]; p++){
if(p->state == ZOMBIE && p->ppid == cp->pid){
kfree(p->mem, p->sz);
kfree(p->kstack, KSTACKSIZE);
pid = p->pid;
p->state = UNUSED;
release(&proc_table_lock);
return pid;
}
if(p->state != UNUSED && p->ppid == cp->pid)
any = 1;
}
if(any == 0){
release(&proc_table_lock);
return -1;
}
sleep(cp, &proc_table_lock);
}
}
int int
proc_kill(int pid) proc_kill(int pid)
{ {
@ -319,9 +279,10 @@ proc_kill(int pid)
acquire(&proc_table_lock); acquire(&proc_table_lock);
for(p = proc; p < &proc[NPROC]; p++){ for(p = proc; p < &proc[NPROC]; p++){
if(p->pid == pid && p->state != UNUSED){ if(p->pid == pid){
p->killed = 1; p->killed = 1;
if(p->state == WAITING) // Wake process from sleep if necessary.
if(p->state == SLEEPING)
p->state = RUNNABLE; p->state = RUNNABLE;
release(&proc_table_lock); release(&proc_table_lock);
return 0; return 0;
@ -331,26 +292,80 @@ proc_kill(int pid)
return -1; return -1;
} }
// disable interrupts // Exit the current process. Does not return.
// Exited processes remain in the zombie state
// until their parent calls wait() to find out they exited.
void void
cli(void) proc_exit()
{ {
if(cpus[cpu()].clis == 0) struct proc *p;
__asm __volatile("cli"); struct proc *cp = curproc[cpu()];
cpus[cpu()].clis += 1; int fd;
if((read_eflags() & FL_IF) != 0)
panic("cli but enabled"); // Close all open files.
for(fd = 0; fd < NOFILE; fd++){
if(cp->fds[fd]){
fd_close(cp->fds[fd]);
cp->fds[fd] = 0;
}
}
acquire(&proc_table_lock);
// Wake up our parent.
for(p = proc; p < &proc[NPROC]; p++)
if(p->pid == cp->ppid)
wakeup1(p);
// Reparent our children to process 1.
for(p = proc; p < &proc[NPROC]; p++)
if(p->ppid == cp->pid)
p->ppid = 1;
// Jump into the scheduler, never to return.
cp->state = ZOMBIE;
sched();
panic("zombie exit");
} }
// enable interrupts // Wait for a child process to exit and return its pid.
void // Return -1 if this process has no children.
sti(void) int
proc_wait(void)
{ {
if((read_eflags() & FL_IF) != 0) struct proc *p;
panic("sti but enabled"); struct proc *cp = curproc[cpu()];
if(cpus[cpu()].clis < 1) int i, havekids, pid;
panic("sti");
cpus[cpu()].clis -= 1; acquire(&proc_table_lock);
if(cpus[cpu()].clis < 1) for(;;){
__asm __volatile("sti"); // Scan through table looking zombie children.
havekids = 0;
for(i = 0; i < NPROC; i++){
p = &proc[i];
if(p->ppid == cp->pid){
if(p->state == ZOMBIE){
// Found one.
kfree(p->mem, p->sz);
kfree(p->kstack, KSTACKSIZE);
pid = p->pid;
p->state = UNUSED;
p->pid = 0;
release(&proc_table_lock);
return pid;
}
havekids = 1;
}
}
// No point waiting if we don't have any children.
if(!havekids){
release(&proc_table_lock);
return -1;
}
// Wait for children to exit. (See wakeup1 call in proc_exit.)
sleep(cp, &proc_table_lock);
}
} }

5
proc.h
View file

@ -33,7 +33,7 @@ struct jmpbuf {
int jb_eip; int jb_eip;
}; };
enum proc_state { UNUSED, EMBRYO, WAITING, RUNNABLE, RUNNING, ZOMBIE }; enum proc_state { UNUSED, EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
struct proc{ struct proc{
char *mem; // start of process's physical memory char *mem; // start of process's physical memory
@ -46,7 +46,6 @@ struct proc{
int ppid; int ppid;
void *chan; // sleep void *chan; // sleep
int killed; int killed;
int locks; // # of locks currently held
struct fd *fds[NOFILE]; struct fd *fds[NOFILE];
struct Taskstate ts; // only to give cpu address of kernel stack struct Taskstate ts; // only to give cpu address of kernel stack
@ -71,7 +70,7 @@ struct cpu {
struct jmpbuf jmpbuf; struct jmpbuf jmpbuf;
char mpstack[MPSTACK]; // per-cpu start-up stack, only used to get into main() char mpstack[MPSTACK]; // per-cpu start-up stack, only used to get into main()
struct proc *lastproc; // last proc scheduled on this cpu (never NULL) struct proc *lastproc; // last proc scheduled on this cpu (never NULL)
int clis; // cli() nesting depth int nlock; // # of locks currently held
}; };
extern struct cpu cpus[NCPU]; extern struct cpu cpus[NCPU];

View file

@ -6,42 +6,35 @@
#include "proc.h" #include "proc.h"
#include "spinlock.h" #include "spinlock.h"
#define DEBUG 0 // Can't call cprintf from inside these routines,
// because cprintf uses them itself.
#define cprintf dont_use_cprintf
extern int use_console_lock; extern int use_console_lock;
int getcallerpc(void *v) { int
getcallerpc(void *v)
{
return ((int*)v)[-1]; return ((int*)v)[-1];
} }
void void
acquire1(struct spinlock * lock, struct proc *cp) acquire1(struct spinlock * lock, struct proc *cp)
{ {
if(DEBUG) cprintf("cpu%d: acquiring at %x\n", cpu(), getcallerpc(&lock)); if(cpus[cpu()].nlock++ == 0)
cli(); cli();
while ( cmpxchg(0, 1, &lock->locked) == 1 ) { ; } while(cmpxchg(0, 1, &lock->locked) == 1)
;
cpuid(0, 0, 0, 0, 0); // memory barrier
lock->locker_pc = getcallerpc(&lock); lock->locker_pc = getcallerpc(&lock);
if(cp)
cp->locks += 1;
if(DEBUG) cprintf("cpu%d: acquired at %x\n", cpu(), getcallerpc(&lock));
} }
void void
release1(struct spinlock * lock, struct proc *cp) release1(struct spinlock * lock, struct proc *cp)
{ {
cpuid(0, 0, 0, 0, 0); // memory barrier
if(DEBUG) cprintf ("cpu%d: releasing at %x\n", cpu(), getcallerpc(&lock)); lock->locked = 0;
if(--cpus[cpu()].nlock == 0)
if(lock->locked != 1)
panic("release");
if(cp)
cp->locks -= 1;
cmpxchg(1, 0, &lock->locked);
sti(); sti();
} }
@ -56,3 +49,4 @@ release(struct spinlock *lock)
{ {
release1(lock, curproc[cpu()]); release1(lock, curproc[cpu()]);
} }

View file

@ -34,8 +34,9 @@ fetchint(struct proc *p, unsigned addr, int *ip)
return 0; return 0;
} }
// This arg is void* so that both int* and uint* can be passed.
int int
fetcharg(int argno, int *ip) fetcharg(int argno, void *ip)
{ {
unsigned esp; unsigned esp;

11
trap.c
View file

@ -36,11 +36,6 @@ trap(struct Trapframe *tf)
{ {
int v = tf->tf_trapno; int v = tf->tf_trapno;
if(cpus[cpu()].clis){
cprintf("cpu %d v %d eip %x\n", cpu(), v, tf->tf_eip);
panic("interrupt while interrupts are off");
}
if(v == T_SYSCALL){ if(v == T_SYSCALL){
struct proc *cp = curproc[cpu()]; struct proc *cp = curproc[cpu()];
int num = cp->tf->tf_regs.reg_eax; int num = cp->tf->tf_regs.reg_eax;
@ -56,12 +51,10 @@ trap(struct Trapframe *tf)
panic("trap ret but not RUNNING"); panic("trap ret but not RUNNING");
if(tf != cp->tf) if(tf != cp->tf)
panic("trap ret wrong tf"); panic("trap ret wrong tf");
if(cp->locks){ if(cpus[cpu()].nlock){
cprintf("num=%d\n", num); cprintf("num=%d\n", num);
panic("syscall returning locks held"); panic("syscall returning locks held");
} }
if(cpus[cpu()].clis)
panic("syscall returning but clis != 0");
if((read_eflags() & FL_IF) == 0) if((read_eflags() & FL_IF) == 0)
panic("syscall returning but FL_IF clear"); panic("syscall returning but FL_IF clear");
if(read_esp() < (unsigned)cp->kstack || if(read_esp() < (unsigned)cp->kstack ||
@ -75,7 +68,7 @@ trap(struct Trapframe *tf)
if(v == (IRQ_OFFSET + IRQ_TIMER)){ if(v == (IRQ_OFFSET + IRQ_TIMER)){
struct proc *cp = curproc[cpu()]; struct proc *cp = curproc[cpu()];
lapic_timerintr(); lapic_timerintr();
if(cp && cp->locks) if(cpus[cpu()].nlock)
panic("timer interrupt while holding a lock"); panic("timer interrupt while holding a lock");
if(cp){ if(cp){
#if 1 #if 1

View file

@ -1,8 +1,10 @@
#include "mmu.h" #include "mmu.h"
.text .text
.globl alltraps .globl trap
.globl trap .globl trapret1
.globl alltraps
alltraps: alltraps:
/* vectors.S sends all traps here */ /* vectors.S sends all traps here */
pushl %ds # build pushl %ds # build
@ -16,11 +18,11 @@ alltraps:
addl $4, %esp addl $4, %esp
# return falls through to trapret... # return falls through to trapret...
.globl trapret
/* /*
* a forked process RETs here * a forked process RETs here
* expects ESP to point to a Trapframe * expects ESP to point to a Trapframe
*/ */
.globl trapret
trapret: trapret:
popal popal
popl %es popl %es
@ -28,6 +30,10 @@ trapret:
addl $0x8, %esp /* trapno and errcode */ addl $0x8, %esp /* trapno and errcode */
iret iret
.globl forkret
forkret:
call forkret1
jmp trapret
.globl acpu .globl acpu
acpu: acpu:

14
x86.h
View file

@ -29,6 +29,8 @@ static __inline uint32_t read_ebp(void) __attribute__((always_inline));
static __inline uint32_t read_esp(void) __attribute__((always_inline)); static __inline uint32_t read_esp(void) __attribute__((always_inline));
static __inline void cpuid(uint32_t info, uint32_t *eaxp, uint32_t *ebxp, uint32_t *ecxp, uint32_t *edxp); static __inline void cpuid(uint32_t info, uint32_t *eaxp, uint32_t *ebxp, uint32_t *ecxp, uint32_t *edxp);
static __inline uint64_t read_tsc(void) __attribute__((always_inline)); static __inline uint64_t read_tsc(void) __attribute__((always_inline));
static __inline void cli(void) __attribute__((always_inline));
static __inline void sti(void) __attribute__((always_inline));
static __inline void static __inline void
breakpoint(void) breakpoint(void)
@ -304,6 +306,18 @@ read_tsc(void)
return tsc; return tsc;
} }
static __inline void
cli(void)
{
__asm__ volatile("cli");
}
static __inline void
sti(void)
{
__asm__ volatile("sti");
}
struct PushRegs { struct PushRegs {
/* registers as pushed by pusha */ /* registers as pushed by pusha */
uint32_t reg_edi; uint32_t reg_edi;