2006-06-12 15:22:12 +00:00
|
|
|
#include "types.h"
|
2007-08-27 23:26:33 +00:00
|
|
|
#include "param.h"
|
2011-07-29 11:31:27 +00:00
|
|
|
#include "memlayout.h"
|
2019-05-31 13:45:59 +00:00
|
|
|
#include "riscv.h"
|
2006-07-12 01:48:35 +00:00
|
|
|
#include "spinlock.h"
|
2019-07-02 13:14:47 +00:00
|
|
|
#include "proc.h"
|
2019-05-31 13:45:59 +00:00
|
|
|
#include "defs.h"
|
2006-07-12 01:48:35 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
struct cpu cpus[NCPU];
|
|
|
|
|
2019-07-10 12:57:51 +00:00
|
|
|
struct proc proc[NPROC];
|
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
struct proc *initproc;
|
2007-08-23 14:35:28 +00:00
|
|
|
|
2007-08-22 06:01:32 +00:00
|
|
|
int nextpid = 1;
|
2019-07-10 12:57:51 +00:00
|
|
|
struct spinlock pid_lock;
|
2019-07-07 19:20:13 +00:00
|
|
|
|
2006-07-16 01:15:28 +00:00
|
|
|
extern void forkret(void);
|
2019-07-02 13:14:47 +00:00
|
|
|
static void wakeup1(struct proc *chan);
|
2010-09-02 18:30:06 +00:00
|
|
|
|
2019-07-26 08:53:46 +00:00
|
|
|
extern char trampoline[]; // trampoline.S
|
2019-05-31 13:45:59 +00:00
|
|
|
|
2006-08-10 22:08:14 +00:00
|
|
|
void
|
2019-05-31 13:45:59 +00:00
|
|
|
procinit(void)
|
2006-08-10 22:08:14 +00:00
|
|
|
{
|
2019-07-02 23:29:14 +00:00
|
|
|
struct proc *p;
|
|
|
|
|
2019-07-07 19:20:13 +00:00
|
|
|
initlock(&pid_lock, "nextpid");
|
2019-07-19 12:38:51 +00:00
|
|
|
for(p = proc; p < &proc[NPROC]; p++) {
|
2019-07-02 23:29:14 +00:00
|
|
|
initlock(&p->lock, "proc");
|
2019-07-23 16:17:17 +00:00
|
|
|
|
|
|
|
// Allocate a page for the process's kernel stack.
|
|
|
|
// Map it high in memory, followed by an invalid
|
|
|
|
// guard page.
|
|
|
|
char *pa = kalloc();
|
|
|
|
if(pa == 0)
|
|
|
|
panic("kalloc");
|
|
|
|
uint64 va = KSTACK((int) (p - proc));
|
2019-07-24 19:28:37 +00:00
|
|
|
kvmmap(va, (uint64)pa, PGSIZE, PTE_R | PTE_W);
|
2019-07-23 16:17:17 +00:00
|
|
|
p->kstack = va;
|
2019-07-19 12:38:51 +00:00
|
|
|
}
|
2019-07-23 16:17:17 +00:00
|
|
|
kvminithart();
|
2006-08-10 22:08:14 +00:00
|
|
|
}
|
|
|
|
|
2019-06-05 15:42:03 +00:00
|
|
|
// Must be called with interrupts disabled,
|
|
|
|
// to prevent race with process being moved
|
|
|
|
// to a different CPU.
|
2017-01-31 22:47:16 +00:00
|
|
|
int
|
2019-06-05 15:42:03 +00:00
|
|
|
cpuid()
|
|
|
|
{
|
|
|
|
int id = r_tp();
|
|
|
|
return id;
|
2017-02-01 23:04:13 +00:00
|
|
|
}
|
|
|
|
|
2019-07-23 15:14:10 +00:00
|
|
|
// Return this CPU's cpu struct.
|
2019-06-05 15:42:03 +00:00
|
|
|
// Interrupts must be disabled.
|
2018-10-10 00:22:48 +00:00
|
|
|
struct cpu*
|
|
|
|
mycpu(void) {
|
2019-06-05 15:42:03 +00:00
|
|
|
int id = cpuid();
|
|
|
|
struct cpu *c = &cpus[id];
|
2018-10-10 00:22:48 +00:00
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2019-07-20 14:17:26 +00:00
|
|
|
// Return the current struct proc *, or zero if none.
|
2017-02-01 01:21:14 +00:00
|
|
|
struct proc*
|
|
|
|
myproc(void) {
|
2019-06-05 18:14:57 +00:00
|
|
|
push_off();
|
2019-06-05 15:42:03 +00:00
|
|
|
struct cpu *c = mycpu();
|
|
|
|
struct proc *p = c->proc;
|
2019-06-05 18:14:57 +00:00
|
|
|
pop_off();
|
2019-06-05 15:42:03 +00:00
|
|
|
return p;
|
2017-01-31 22:47:16 +00:00
|
|
|
}
|
|
|
|
|
2019-07-02 23:29:14 +00:00
|
|
|
int
|
|
|
|
allocpid() {
|
|
|
|
int pid;
|
|
|
|
|
2019-07-07 19:20:13 +00:00
|
|
|
acquire(&pid_lock);
|
2019-07-10 18:54:34 +00:00
|
|
|
pid = nextpid;
|
|
|
|
nextpid = nextpid + 1;
|
2019-07-07 19:20:13 +00:00
|
|
|
release(&pid_lock);
|
2019-07-10 18:54:34 +00:00
|
|
|
|
2019-07-02 23:29:14 +00:00
|
|
|
return pid;
|
|
|
|
}
|
|
|
|
|
2007-08-22 06:01:32 +00:00
|
|
|
// Look in the process table for an UNUSED proc.
|
2019-07-08 15:11:00 +00:00
|
|
|
// If found, initialize state required to run in the kernel,
|
2019-07-07 18:57:16 +00:00
|
|
|
// and return with p->lock held.
|
2019-07-10 14:13:08 +00:00
|
|
|
// If there are no free procs, return 0.
|
2007-08-22 06:01:32 +00:00
|
|
|
static struct proc*
|
|
|
|
allocproc(void)
|
2006-06-12 15:22:12 +00:00
|
|
|
{
|
2007-08-22 06:01:32 +00:00
|
|
|
struct proc *p;
|
2006-06-12 15:22:12 +00:00
|
|
|
|
2019-07-07 19:20:13 +00:00
|
|
|
for(p = proc; p < &proc[NPROC]; p++) {
|
2019-07-02 23:29:14 +00:00
|
|
|
acquire(&p->lock);
|
|
|
|
if(p->state == UNUSED) {
|
2009-05-31 00:28:45 +00:00
|
|
|
goto found;
|
2019-07-02 23:29:14 +00:00
|
|
|
} else {
|
|
|
|
release(&p->lock);
|
|
|
|
}
|
|
|
|
}
|
2007-08-22 06:01:32 +00:00
|
|
|
return 0;
|
2009-05-31 00:28:45 +00:00
|
|
|
|
|
|
|
found:
|
2019-07-02 23:29:14 +00:00
|
|
|
p->pid = allocpid();
|
2009-05-31 00:28:45 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// Allocate a trapframe page.
|
|
|
|
if((p->tf = (struct trapframe *)kalloc()) == 0){
|
2019-07-10 14:13:08 +00:00
|
|
|
release(&p->lock);
|
2019-05-31 13:45:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2018-09-29 12:30:50 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// An empty user page table.
|
2019-05-31 16:43:20 +00:00
|
|
|
p->pagetable = proc_pagetable(p);
|
|
|
|
|
|
|
|
// Set up new context to start executing at forkret,
|
|
|
|
// which returns to user space.
|
2019-10-16 16:27:08 +00:00
|
|
|
memset(&p->context, 0, sizeof(p->context));
|
2019-05-31 16:43:20 +00:00
|
|
|
p->context.ra = (uint64)forkret;
|
2019-07-23 16:17:17 +00:00
|
|
|
p->context.sp = p->kstack + PGSIZE;
|
2019-05-31 16:43:20 +00:00
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2019-07-01 21:46:06 +00:00
|
|
|
// free a proc structure and the data hanging from it,
|
|
|
|
// including user pages.
|
2019-07-07 18:57:16 +00:00
|
|
|
// p->lock must be held.
|
2019-07-01 21:46:06 +00:00
|
|
|
static void
|
|
|
|
freeproc(struct proc *p)
|
|
|
|
{
|
|
|
|
if(p->tf)
|
|
|
|
kfree((void*)p->tf);
|
|
|
|
p->tf = 0;
|
|
|
|
if(p->pagetable)
|
|
|
|
proc_freepagetable(p->pagetable, p->sz);
|
|
|
|
p->pagetable = 0;
|
2019-07-02 15:17:50 +00:00
|
|
|
p->sz = 0;
|
2019-07-01 21:46:06 +00:00
|
|
|
p->pid = 0;
|
|
|
|
p->parent = 0;
|
|
|
|
p->name[0] = 0;
|
2019-07-02 15:17:50 +00:00
|
|
|
p->chan = 0;
|
2019-07-01 21:46:06 +00:00
|
|
|
p->killed = 0;
|
2019-09-10 16:30:10 +00:00
|
|
|
p->xstate = 0;
|
2019-07-01 21:46:06 +00:00
|
|
|
p->state = UNUSED;
|
|
|
|
}
|
|
|
|
|
2019-05-31 16:43:20 +00:00
|
|
|
// Create a page table for a given process,
|
2019-07-10 14:13:08 +00:00
|
|
|
// with no user pages, but with trampoline pages.
|
2019-05-31 16:43:20 +00:00
|
|
|
pagetable_t
|
|
|
|
proc_pagetable(struct proc *p)
|
|
|
|
{
|
|
|
|
pagetable_t pagetable;
|
|
|
|
|
2019-07-10 14:13:08 +00:00
|
|
|
// An empty page table.
|
2019-05-31 16:43:20 +00:00
|
|
|
pagetable = uvmcreate();
|
2018-09-29 12:30:50 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// map the trampoline code (for system call return)
|
|
|
|
// at the highest user virtual address.
|
|
|
|
// only the supervisor uses it, on the way
|
|
|
|
// to/from user space, so not PTE_U.
|
2019-05-31 16:43:20 +00:00
|
|
|
mappages(pagetable, TRAMPOLINE, PGSIZE,
|
2019-07-26 08:53:46 +00:00
|
|
|
(uint64)trampoline, PTE_R | PTE_X);
|
2016-08-25 13:13:00 +00:00
|
|
|
|
2019-07-22 18:54:40 +00:00
|
|
|
// map the trapframe just below TRAMPOLINE, for trampoline.S.
|
|
|
|
mappages(pagetable, TRAPFRAME, PGSIZE,
|
2019-05-31 13:45:59 +00:00
|
|
|
(uint64)(p->tf), PTE_R | PTE_W);
|
2009-07-12 02:28:29 +00:00
|
|
|
|
2019-05-31 16:43:20 +00:00
|
|
|
return pagetable;
|
|
|
|
}
|
2010-09-13 19:34:44 +00:00
|
|
|
|
2019-05-31 16:43:20 +00:00
|
|
|
// Free a process's page table, and free the
|
2019-07-10 14:13:08 +00:00
|
|
|
// physical memory it refers to.
|
2019-05-31 16:43:20 +00:00
|
|
|
void
|
|
|
|
proc_freepagetable(pagetable_t pagetable, uint64 sz)
|
|
|
|
{
|
2019-07-24 19:28:37 +00:00
|
|
|
uvmunmap(pagetable, TRAMPOLINE, PGSIZE, 0);
|
|
|
|
uvmunmap(pagetable, TRAPFRAME, PGSIZE, 0);
|
2019-10-27 17:36:46 +00:00
|
|
|
uvmfree(pagetable, sz);
|
2006-06-12 15:22:12 +00:00
|
|
|
}
|
|
|
|
|
2019-05-31 15:45:42 +00:00
|
|
|
// a user program that calls exec("/init")
|
|
|
|
// od -t xC initcode
|
2019-06-05 18:31:13 +00:00
|
|
|
uchar initcode[] = {
|
2019-07-10 14:13:08 +00:00
|
|
|
0x17, 0x05, 0x00, 0x00, 0x13, 0x05, 0x05, 0x02,
|
|
|
|
0x97, 0x05, 0x00, 0x00, 0x93, 0x85, 0x05, 0x02,
|
|
|
|
0x9d, 0x48, 0x73, 0x00, 0x00, 0x00, 0x89, 0x48,
|
|
|
|
0x73, 0x00, 0x00, 0x00, 0xef, 0xf0, 0xbf, 0xff,
|
|
|
|
0x2f, 0x69, 0x6e, 0x69, 0x74, 0x00, 0x00, 0x01,
|
|
|
|
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
2019-05-31 15:45:42 +00:00
|
|
|
0x00, 0x00, 0x00
|
2019-05-31 13:45:59 +00:00
|
|
|
};
|
|
|
|
|
2009-08-08 08:07:30 +00:00
|
|
|
// Set up first user process.
|
|
|
|
void
|
|
|
|
userinit(void)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
2016-08-25 13:13:00 +00:00
|
|
|
|
2009-08-08 08:07:30 +00:00
|
|
|
p = allocproc();
|
|
|
|
initproc = p;
|
2019-05-31 13:45:59 +00:00
|
|
|
|
2019-07-10 14:13:08 +00:00
|
|
|
// allocate one user page and copy init's instructions
|
|
|
|
// and data into it.
|
2019-05-31 13:45:59 +00:00
|
|
|
uvminit(p->pagetable, initcode, sizeof(initcode));
|
2010-09-02 19:37:05 +00:00
|
|
|
p->sz = PGSIZE;
|
2019-05-31 13:45:59 +00:00
|
|
|
|
2019-07-08 15:11:00 +00:00
|
|
|
// prepare for the very first "return" from kernel to user.
|
2019-07-10 14:13:08 +00:00
|
|
|
p->tf->epc = 0; // user program counter
|
|
|
|
p->tf->sp = PGSIZE; // user stack pointer
|
2009-08-08 08:07:30 +00:00
|
|
|
|
|
|
|
safestrcpy(p->name, "initcode", sizeof(p->name));
|
2019-05-31 15:45:42 +00:00
|
|
|
p->cwd = namei("/");
|
2009-08-08 08:07:30 +00:00
|
|
|
|
|
|
|
p->state = RUNNABLE;
|
2016-08-13 07:44:13 +00:00
|
|
|
|
2019-07-02 13:14:47 +00:00
|
|
|
release(&p->lock);
|
2009-08-08 08:07:30 +00:00
|
|
|
}
|
|
|
|
|
2019-07-10 14:13:08 +00:00
|
|
|
// Grow or shrink user memory by n bytes.
|
2009-05-31 00:28:45 +00:00
|
|
|
// Return 0 on success, -1 on failure.
|
2006-09-08 14:26:51 +00:00
|
|
|
int
|
|
|
|
growproc(int n)
|
|
|
|
{
|
2011-01-11 18:01:13 +00:00
|
|
|
uint sz;
|
2019-05-31 13:45:59 +00:00
|
|
|
struct proc *p = myproc();
|
2016-08-25 13:13:00 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
sz = p->sz;
|
2010-08-10 21:08:41 +00:00
|
|
|
if(n > 0){
|
2019-07-02 13:14:47 +00:00
|
|
|
if((sz = uvmalloc(p->pagetable, sz, sz + n)) == 0) {
|
2010-08-10 21:08:41 +00:00
|
|
|
return -1;
|
2019-07-02 13:14:47 +00:00
|
|
|
}
|
2010-08-10 21:08:41 +00:00
|
|
|
} else if(n < 0){
|
2019-09-20 15:35:27 +00:00
|
|
|
sz = uvmdealloc(p->pagetable, sz, sz + n);
|
2010-08-10 21:08:41 +00:00
|
|
|
}
|
2019-05-31 13:45:59 +00:00
|
|
|
p->sz = sz;
|
2009-05-31 00:28:45 +00:00
|
|
|
return 0;
|
2006-09-08 14:26:51 +00:00
|
|
|
}
|
|
|
|
|
2019-07-10 14:13:08 +00:00
|
|
|
// Create a new process, copying the parent.
|
|
|
|
// Sets up child kernel stack to return as if from fork() system call.
|
2009-05-31 00:38:51 +00:00
|
|
|
int
|
|
|
|
fork(void)
|
2006-06-12 15:22:12 +00:00
|
|
|
{
|
2009-05-31 00:38:51 +00:00
|
|
|
int i, pid;
|
2006-06-12 15:22:12 +00:00
|
|
|
struct proc *np;
|
2019-05-31 13:45:59 +00:00
|
|
|
struct proc *p = myproc();
|
2006-06-12 15:22:12 +00:00
|
|
|
|
2006-07-16 01:47:40 +00:00
|
|
|
// Allocate process.
|
2016-08-13 07:44:13 +00:00
|
|
|
if((np = allocproc()) == 0){
|
2009-05-31 00:38:51 +00:00
|
|
|
return -1;
|
2016-08-13 07:44:13 +00:00
|
|
|
}
|
2006-07-12 01:48:35 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// Copy user memory from parent to child.
|
2019-07-01 21:46:06 +00:00
|
|
|
if(uvmcopy(p->pagetable, np->pagetable, p->sz) < 0){
|
|
|
|
freeproc(np);
|
2019-07-07 18:57:16 +00:00
|
|
|
release(&np->lock);
|
2019-07-01 21:46:06 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2019-05-31 13:45:59 +00:00
|
|
|
np->sz = p->sz;
|
2006-09-06 17:27:19 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
np->parent = p;
|
2007-08-21 19:22:08 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
// copy saved user registers.
|
|
|
|
*(np->tf) = *(p->tf);
|
|
|
|
|
|
|
|
// Cause fork to return 0 in the child.
|
|
|
|
np->tf->a0 = 0;
|
|
|
|
|
|
|
|
// increment reference counts on open file descriptors.
|
2009-05-31 00:38:51 +00:00
|
|
|
for(i = 0; i < NOFILE; i++)
|
2019-05-31 13:45:59 +00:00
|
|
|
if(p->ofile[i])
|
|
|
|
np->ofile[i] = filedup(p->ofile[i]);
|
|
|
|
np->cwd = idup(p->cwd);
|
2014-08-04 10:13:49 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
safestrcpy(np->name, p->name, sizeof(p->name));
|
2016-08-25 13:13:00 +00:00
|
|
|
|
2009-05-31 00:38:51 +00:00
|
|
|
pid = np->pid;
|
2014-08-04 10:13:49 +00:00
|
|
|
|
2009-05-31 00:38:51 +00:00
|
|
|
np->state = RUNNABLE;
|
2016-08-13 07:44:13 +00:00
|
|
|
|
2019-07-02 13:14:47 +00:00
|
|
|
release(&np->lock);
|
2016-08-25 13:13:00 +00:00
|
|
|
|
2009-05-31 00:38:51 +00:00
|
|
|
return pid;
|
2006-06-12 15:22:12 +00:00
|
|
|
}
|
|
|
|
|
2019-07-10 12:57:51 +00:00
|
|
|
// Pass p's abandoned children to init.
|
2019-09-23 10:50:25 +00:00
|
|
|
// Caller must hold p->lock.
|
2019-07-07 18:57:16 +00:00
|
|
|
void
|
2019-09-23 10:50:25 +00:00
|
|
|
reparent(struct proc *p)
|
|
|
|
{
|
2019-07-03 19:18:55 +00:00
|
|
|
struct proc *pp;
|
|
|
|
|
2019-07-07 19:20:13 +00:00
|
|
|
for(pp = proc; pp < &proc[NPROC]; pp++){
|
2019-07-25 10:30:49 +00:00
|
|
|
// this code uses pp->parent without holding pp->lock.
|
|
|
|
// acquiring the lock first could cause a deadlock
|
|
|
|
// if pp or a child of pp were also in exit()
|
|
|
|
// and about to try to lock p.
|
|
|
|
if(pp->parent == p){
|
|
|
|
// pp->parent can't change between the check and the acquire()
|
|
|
|
// because only the parent changes it, and we're the parent.
|
2019-07-06 20:38:41 +00:00
|
|
|
acquire(&pp->lock);
|
2019-07-25 10:30:49 +00:00
|
|
|
pp->parent = initproc;
|
2019-09-23 10:50:25 +00:00
|
|
|
// we should wake up init here, but that would require
|
|
|
|
// initproc->lock, which would be a deadlock, since we hold
|
|
|
|
// the lock on one of init's children (pp). this is why
|
|
|
|
// exit() always wakes init (before acquiring any locks).
|
2019-07-06 20:38:41 +00:00
|
|
|
release(&pp->lock);
|
2019-07-03 19:18:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-02 08:15:17 +00:00
|
|
|
// Exit the current process. Does not return.
|
|
|
|
// An exited process remains in the zombie state
|
2019-05-31 13:45:59 +00:00
|
|
|
// until its parent calls wait().
|
2010-09-02 08:15:17 +00:00
|
|
|
void
|
2019-09-10 16:30:10 +00:00
|
|
|
exit(int status)
|
2010-09-02 08:15:17 +00:00
|
|
|
{
|
2019-05-31 13:45:59 +00:00
|
|
|
struct proc *p = myproc();
|
2010-09-02 08:15:17 +00:00
|
|
|
|
2019-05-31 13:45:59 +00:00
|
|
|
if(p == initproc)
|
2010-09-02 08:15:17 +00:00
|
|
|
panic("init exiting");
|
|
|
|
|
|
|
|
// Close all open files.
|
2019-07-20 22:51:31 +00:00
|
|
|
for(int fd = 0; fd < NOFILE; fd++){
|
2019-05-31 13:45:59 +00:00
|
|
|
if(p->ofile[fd]){
|
2019-07-02 13:14:47 +00:00
|
|
|
struct file *f = p->ofile[fd];
|
|
|
|
fileclose(f);
|
2019-05-31 13:45:59 +00:00
|
|
|
p->ofile[fd] = 0;
|
2010-09-02 08:15:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-27 21:15:30 +00:00
|
|
|
begin_op();
|
2019-07-08 15:11:00 +00:00
|
|
|
iput(p->cwd);
|
2014-08-27 21:15:30 +00:00
|
|
|
end_op();
|
2019-07-08 15:11:00 +00:00
|
|
|
p->cwd = 0;
|
2019-07-02 13:14:47 +00:00
|
|
|
|
2019-09-23 11:24:41 +00:00
|
|
|
// we might re-parent a child to init. we can't be precise about
|
|
|
|
// waking up init, since we can't acquire its lock once we've
|
|
|
|
// acquired any other proc lock. so wake up init whether that's
|
|
|
|
// necessary or not. init may miss this wakeup, but that seems
|
|
|
|
// harmless.
|
2019-09-23 10:50:25 +00:00
|
|
|
acquire(&initproc->lock);
|
|
|
|
wakeup1(initproc);
|
|
|
|
release(&initproc->lock);
|
|
|
|
|
2019-09-23 11:24:41 +00:00
|
|
|
// grab a copy of p->parent, to ensure that we unlock the same
|
|
|
|
// parent we locked. in case our parent gives us away to init while
|
|
|
|
// we're waiting for the parent lock. we may then race with an
|
|
|
|
// exiting parent, but the result will be a harmless spurious wakeup
|
|
|
|
// to a dead or wrong process; proc structs are never re-allocated
|
|
|
|
// as anything else.
|
2019-09-23 10:50:25 +00:00
|
|
|
acquire(&p->lock);
|
|
|
|
struct proc *original_parent = p->parent;
|
|
|
|
release(&p->lock);
|
|
|
|
|
|
|
|
// we need the parent's lock in order to wake it up from wait().
|
|
|
|
// the parent-then-child rule says we have to lock it first.
|
|
|
|
acquire(&original_parent->lock);
|
|
|
|
|
2019-07-02 13:14:47 +00:00
|
|
|
acquire(&p->lock);
|
2019-07-06 20:38:41 +00:00
|
|
|
|
2019-07-20 22:51:31 +00:00
|
|
|
// Give any children to init.
|
2019-09-23 10:50:25 +00:00
|
|
|
reparent(p);
|
2019-07-06 20:38:41 +00:00
|
|
|
|
2010-09-02 08:15:17 +00:00
|
|
|
// Parent might be sleeping in wait().
|
2019-09-23 10:50:25 +00:00
|
|
|
wakeup1(original_parent);
|
2010-09-02 08:15:17 +00:00
|
|
|
|
2019-09-10 16:30:10 +00:00
|
|
|
p->xstate = status;
|
2019-07-20 22:51:31 +00:00
|
|
|
p->state = ZOMBIE;
|
|
|
|
|
2019-09-23 10:50:25 +00:00
|
|
|
release(&original_parent->lock);
|
2019-07-06 20:38:41 +00:00
|
|
|
|
2019-07-03 19:18:55 +00:00
|
|
|
// Jump into the scheduler, never to return.
|
|
|
|
sched();
|
|
|
|
panic("zombie exit");
|
2010-09-02 08:15:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for a child process to exit and return its pid.
|
|
|
|
// Return -1 if this process has no children.
|
|
|
|
int
|
2019-09-10 16:30:10 +00:00
|
|
|
wait(uint64 addr)
|
2010-09-02 08:15:17 +00:00
|
|
|
{
|
2019-05-31 13:45:59 +00:00
|
|
|
struct proc *np;
|
2010-09-02 08:15:17 +00:00
|
|
|
int havekids, pid;
|
2019-05-31 13:45:59 +00:00
|
|
|
struct proc *p = myproc();
|
2019-07-02 13:14:47 +00:00
|
|
|
|
2019-07-10 14:13:08 +00:00
|
|
|
// hold p->lock for the whole time to avoid lost
|
|
|
|
// wakeups from a child's exit().
|
2019-07-02 13:14:47 +00:00
|
|
|
acquire(&p->lock);
|
2019-07-10 14:13:08 +00:00
|
|
|
|
2010-09-02 08:15:17 +00:00
|
|
|
for(;;){
|
2016-09-08 18:22:38 +00:00
|
|
|
// Scan through table looking for exited children.
|
2010-09-02 08:15:17 +00:00
|
|
|
havekids = 0;
|
2019-07-07 19:20:13 +00:00
|
|
|
for(np = proc; np < &proc[NPROC]; np++){
|
2019-07-10 13:24:50 +00:00
|
|
|
// this code uses np->parent without holding np->lock.
|
|
|
|
// acquiring the lock first would cause a deadlock,
|
|
|
|
// since np might be an ancestor, and we already hold p->lock.
|
|
|
|
if(np->parent == p){
|
2019-07-10 14:13:08 +00:00
|
|
|
// np->parent can't change between the check and the acquire()
|
|
|
|
// because only the parent changes it, and we're the parent.
|
2019-07-10 13:24:50 +00:00
|
|
|
acquire(&np->lock);
|
|
|
|
havekids = 1;
|
|
|
|
if(np->state == ZOMBIE){
|
|
|
|
// Found one.
|
|
|
|
pid = np->pid;
|
2019-09-10 16:30:10 +00:00
|
|
|
if(addr != 0 && copyout(p->pagetable, addr, (char *)&np->xstate,
|
|
|
|
sizeof(np->xstate)) < 0) {
|
|
|
|
release(&np->lock);
|
|
|
|
release(&p->lock);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-07-10 13:24:50 +00:00
|
|
|
freeproc(np);
|
|
|
|
release(&np->lock);
|
|
|
|
release(&p->lock);
|
|
|
|
return pid;
|
|
|
|
}
|
2019-07-02 13:14:47 +00:00
|
|
|
release(&np->lock);
|
2010-09-02 08:15:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// No point waiting if we don't have any children.
|
2019-05-31 13:45:59 +00:00
|
|
|
if(!havekids || p->killed){
|
2019-07-02 13:14:47 +00:00
|
|
|
release(&p->lock);
|
2010-09-02 08:15:17 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2019-07-02 13:14:47 +00:00
|
|
|
|
2019-07-10 14:13:08 +00:00
|
|
|
// Wait for a child to exit.
|
2019-07-02 13:14:47 +00:00
|
|
|
sleep(p, &p->lock); //DOC: wait-sleep
|
2010-09-02 08:15:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-09-06 17:27:19 +00:00
|
|
|
// Per-CPU process scheduler.
|
2006-07-16 01:15:28 +00:00
|
|
|
// Each CPU calls scheduler() after setting itself up.
|
|
|
|
// Scheduler never returns. It loops, doing:
|
2019-07-10 13:24:50 +00:00
|
|
|
// - choose a process to run.
|
|
|
|
// - swtch to start running that process.
|
2007-08-30 17:39:56 +00:00
|
|
|
// - eventually that process transfers control
|
2019-07-10 13:24:50 +00:00
|
|
|
// via swtch back to the scheduler.
|
2006-06-12 15:22:12 +00:00
|
|
|
void
|
Changes to allow use of native x86 ELF compilers, which on my
Linux 2.4 box using gcc 3.4.6 don't seem to follow the same
conventions as the i386-jos-elf-gcc compilers.
Can run make 'TOOLPREFIX=' or edit the Makefile.
curproc[cpu()] can now be NULL, indicating that no proc is running.
This seemed safer to me than having curproc[0] and curproc[1]
both pointing at proc[0] potentially.
The old implementation of swtch depended on the stack frame layout
used inside swtch being okay to return from on the other stack
(exactly the V6 you are not expected to understand this).
It also could be called in two contexts: at boot time, to schedule
the very first process, and later, on behalf of a process, to sleep
or schedule some other process.
I split this into two functions: scheduler and swtch.
The scheduler is now a separate never-returning function, invoked
by each cpu once set up. The scheduler looks like:
scheduler() {
setjmp(cpu.context);
pick proc to schedule
blah blah blah
longjmp(proc.context)
}
The new swtch is intended to be called only when curproc[cpu()] is not NULL,
that is, only on behalf of a user proc. It does:
swtch() {
if(setjmp(proc.context) == 0)
longjmp(cpu.context)
}
to save the current proc context and then jump over to the scheduler,
running on the cpu stack.
Similarly the system call stubs are now in assembly in usys.S to avoid
needing to know the details of stack frame layout used by the compiler.
Also various changes in the debugging prints.
2006-07-11 01:07:40 +00:00
|
|
|
scheduler(void)
|
2006-06-12 15:22:12 +00:00
|
|
|
{
|
2006-07-16 01:15:28 +00:00
|
|
|
struct proc *p;
|
2017-02-01 01:21:14 +00:00
|
|
|
struct cpu *c = mycpu();
|
2019-07-02 17:55:52 +00:00
|
|
|
|
2017-02-01 23:04:13 +00:00
|
|
|
c->proc = 0;
|
2006-07-16 01:15:28 +00:00
|
|
|
for(;;){
|
2019-07-20 14:17:26 +00:00
|
|
|
// Avoid deadlock by ensuring that devices can interrupt.
|
2019-07-19 17:27:48 +00:00
|
|
|
intr_on();
|
|
|
|
|
2019-07-07 19:20:13 +00:00
|
|
|
for(p = proc; p < &proc[NPROC]; p++) {
|
2019-07-02 23:29:14 +00:00
|
|
|
acquire(&p->lock);
|
|
|
|
if(p->state == RUNNABLE) {
|
|
|
|
// Switch to chosen process. It is the process's job
|
|
|
|
// to release its lock and then reacquire it
|
|
|
|
// before jumping back to us.
|
|
|
|
p->state = RUNNING;
|
|
|
|
c->proc = p;
|
|
|
|
swtch(&c->scheduler, &p->context);
|
|
|
|
|
|
|
|
// Process is done running for now.
|
|
|
|
// It should have changed its p->state before coming back.
|
|
|
|
c->proc = 0;
|
2019-07-02 13:14:47 +00:00
|
|
|
}
|
2019-07-07 18:57:16 +00:00
|
|
|
release(&p->lock);
|
2006-07-12 01:48:35 +00:00
|
|
|
}
|
2006-06-12 15:22:12 +00:00
|
|
|
}
|
2006-07-16 01:15:28 +00:00
|
|
|
}
|
2006-06-12 15:22:12 +00:00
|
|
|
|
2019-07-10 14:13:08 +00:00
|
|
|
// Switch to scheduler. Must hold only p->lock
|
2016-09-02 09:40:54 +00:00
|
|
|
// and have changed proc->state. Saves and restores
|
|
|
|
// intena because intena is a property of this
|
|
|
|
// kernel thread, not this CPU. It should
|
2019-06-05 18:05:46 +00:00
|
|
|
// be proc->intena and proc->noff, but that would
|
2016-09-02 09:40:54 +00:00
|
|
|
// break in the few places where a lock is held but
|
|
|
|
// there's no process.
|
2006-07-16 01:15:28 +00:00
|
|
|
void
|
|
|
|
sched(void)
|
|
|
|
{
|
2008-10-15 05:01:39 +00:00
|
|
|
int intena;
|
2017-02-01 01:21:14 +00:00
|
|
|
struct proc *p = myproc();
|
2008-10-15 05:01:39 +00:00
|
|
|
|
2019-07-02 13:14:47 +00:00
|
|
|
if(!holding(&p->lock))
|
|
|
|
panic("sched p->lock");
|
2019-06-05 18:14:57 +00:00
|
|
|
if(mycpu()->noff != 1)
|
|
|
|
panic("sched locks");
|
2017-02-01 01:21:14 +00:00
|
|
|
if(p->state == RUNNING)
|
2009-07-12 02:28:29 +00:00
|
|
|
panic("sched running");
|
2019-06-05 18:14:57 +00:00
|
|
|
if(intr_get())
|
|
|
|
panic("sched interruptible");
|
|
|
|
|
2017-01-31 22:47:16 +00:00
|
|
|
intena = mycpu()->intena;
|
2019-05-31 13:45:59 +00:00
|
|
|
swtch(&p->context, &mycpu()->scheduler);
|
2017-01-31 22:47:16 +00:00
|
|
|
mycpu()->intena = intena;
|
Changes to allow use of native x86 ELF compilers, which on my
Linux 2.4 box using gcc 3.4.6 don't seem to follow the same
conventions as the i386-jos-elf-gcc compilers.
Can run make 'TOOLPREFIX=' or edit the Makefile.
curproc[cpu()] can now be NULL, indicating that no proc is running.
This seemed safer to me than having curproc[0] and curproc[1]
both pointing at proc[0] potentially.
The old implementation of swtch depended on the stack frame layout
used inside swtch being okay to return from on the other stack
(exactly the V6 you are not expected to understand this).
It also could be called in two contexts: at boot time, to schedule
the very first process, and later, on behalf of a process, to sleep
or schedule some other process.
I split this into two functions: scheduler and swtch.
The scheduler is now a separate never-returning function, invoked
by each cpu once set up. The scheduler looks like:
scheduler() {
setjmp(cpu.context);
pick proc to schedule
blah blah blah
longjmp(proc.context)
}
The new swtch is intended to be called only when curproc[cpu()] is not NULL,
that is, only on behalf of a user proc. It does:
swtch() {
if(setjmp(proc.context) == 0)
longjmp(cpu.context)
}
to save the current proc context and then jump over to the scheduler,
running on the cpu stack.
Similarly the system call stubs are now in assembly in usys.S to avoid
needing to know the details of stack frame layout used by the compiler.
Also various changes in the debugging prints.
2006-07-11 01:07:40 +00:00
|
|
|
}
|
|
|
|
|
2006-07-16 01:15:28 +00:00
|
|
|
// Give up the CPU for one scheduling round.
|
Changes to allow use of native x86 ELF compilers, which on my
Linux 2.4 box using gcc 3.4.6 don't seem to follow the same
conventions as the i386-jos-elf-gcc compilers.
Can run make 'TOOLPREFIX=' or edit the Makefile.
curproc[cpu()] can now be NULL, indicating that no proc is running.
This seemed safer to me than having curproc[0] and curproc[1]
both pointing at proc[0] potentially.
The old implementation of swtch depended on the stack frame layout
used inside swtch being okay to return from on the other stack
(exactly the V6 you are not expected to understand this).
It also could be called in two contexts: at boot time, to schedule
the very first process, and later, on behalf of a process, to sleep
or schedule some other process.
I split this into two functions: scheduler and swtch.
The scheduler is now a separate never-returning function, invoked
by each cpu once set up. The scheduler looks like:
scheduler() {
setjmp(cpu.context);
pick proc to schedule
blah blah blah
longjmp(proc.context)
}
The new swtch is intended to be called only when curproc[cpu()] is not NULL,
that is, only on behalf of a user proc. It does:
swtch() {
if(setjmp(proc.context) == 0)
longjmp(cpu.context)
}
to save the current proc context and then jump over to the scheduler,
running on the cpu stack.
Similarly the system call stubs are now in assembly in usys.S to avoid
needing to know the details of stack frame layout used by the compiler.
Also various changes in the debugging prints.
2006-07-11 01:07:40 +00:00
|
|
|
void
|
2006-07-16 01:47:40 +00:00
|
|
|
yield(void)
|
Changes to allow use of native x86 ELF compilers, which on my
Linux 2.4 box using gcc 3.4.6 don't seem to follow the same
conventions as the i386-jos-elf-gcc compilers.
Can run make 'TOOLPREFIX=' or edit the Makefile.
curproc[cpu()] can now be NULL, indicating that no proc is running.
This seemed safer to me than having curproc[0] and curproc[1]
both pointing at proc[0] potentially.
The old implementation of swtch depended on the stack frame layout
used inside swtch being okay to return from on the other stack
(exactly the V6 you are not expected to understand this).
It also could be called in two contexts: at boot time, to schedule
the very first process, and later, on behalf of a process, to sleep
or schedule some other process.
I split this into two functions: scheduler and swtch.
The scheduler is now a separate never-returning function, invoked
by each cpu once set up. The scheduler looks like:
scheduler() {
setjmp(cpu.context);
pick proc to schedule
blah blah blah
longjmp(proc.context)
}
The new swtch is intended to be called only when curproc[cpu()] is not NULL,
that is, only on behalf of a user proc. It does:
swtch() {
if(setjmp(proc.context) == 0)
longjmp(cpu.context)
}
to save the current proc context and then jump over to the scheduler,
running on the cpu stack.
Similarly the system call stubs are now in assembly in usys.S to avoid
needing to know the details of stack frame layout used by the compiler.
Also various changes in the debugging prints.
2006-07-11 01:07:40 +00:00
|
|
|
{
|
2019-07-02 23:29:14 +00:00
|
|
|
struct proc *p = myproc();
|
2019-08-19 22:12:19 +00:00
|
|
|
acquire(&p->lock);
|
2019-07-02 23:29:14 +00:00
|
|
|
p->state = RUNNABLE;
|
2006-07-16 01:15:28 +00:00
|
|
|
sched();
|
2019-07-02 23:29:14 +00:00
|
|
|
release(&p->lock);
|
2006-06-12 15:22:12 +00:00
|
|
|
}
|
2006-06-15 19:58:01 +00:00
|
|
|
|
2006-08-29 21:35:30 +00:00
|
|
|
// A fork child's very first scheduling by scheduler()
|
2019-05-31 13:45:59 +00:00
|
|
|
// will swtch to forkret.
|
2006-07-16 01:47:40 +00:00
|
|
|
void
|
|
|
|
forkret(void)
|
|
|
|
{
|
2011-08-23 00:05:15 +00:00
|
|
|
static int first = 1;
|
2019-06-05 18:31:13 +00:00
|
|
|
|
2019-07-02 13:14:47 +00:00
|
|
|
// Still holding p->lock from scheduler.
|
|
|
|
release(&myproc()->lock);
|
2011-08-23 00:05:15 +00:00
|
|
|
|
|
|
|
if (first) {
|
2019-08-18 18:35:11 +00:00
|
|
|
// File system initialization must be run in the context of a
|
|
|
|
// regular process (e.g., because it calls sleep), and thus cannot
|
2011-08-23 00:07:18 +00:00
|
|
|
// be run from main().
|
2011-08-23 00:05:15 +00:00
|
|
|
first = 0;
|
2019-08-18 18:35:11 +00:00
|
|
|
fsinit(ROOTDEV);
|
2011-08-23 00:05:15 +00:00
|
|
|
}
|
2019-05-31 13:45:59 +00:00
|
|
|
|
|
|
|
usertrapret();
|
2006-07-16 01:47:40 +00:00
|
|
|
}
|
|
|
|
|
2006-07-16 01:15:28 +00:00
|
|
|
// Atomically release lock and sleep on chan.
|
2009-08-31 06:02:08 +00:00
|
|
|
// Reacquires lock when awakened.
|
2006-06-15 19:58:01 +00:00
|
|
|
void
|
2006-07-16 01:15:28 +00:00
|
|
|
sleep(void *chan, struct spinlock *lk)
|
2006-06-15 19:58:01 +00:00
|
|
|
{
|
2019-07-02 17:40:33 +00:00
|
|
|
struct proc *p = myproc();
|
|
|
|
|
2019-07-02 13:14:47 +00:00
|
|
|
// Must acquire p->lock in order to
|
2006-07-16 01:15:28 +00:00
|
|
|
// change p->state and then call sched.
|
2019-07-02 13:14:47 +00:00
|
|
|
// Once we hold p->lock, we can be
|
2006-07-16 01:15:28 +00:00
|
|
|
// guaranteed that we won't miss any wakeup
|
2019-07-10 13:24:50 +00:00
|
|
|
// (wakeup locks p->lock),
|
2006-07-16 01:15:28 +00:00
|
|
|
// so it's okay to release lk.
|
2019-07-02 17:40:33 +00:00
|
|
|
if(lk != &p->lock){ //DOC: sleeplock0
|
|
|
|
acquire(&p->lock); //DOC: sleeplock1
|
2006-07-16 01:15:28 +00:00
|
|
|
release(lk);
|
|
|
|
}
|
2019-07-10 14:13:08 +00:00
|
|
|
|
2006-07-16 01:15:28 +00:00
|
|
|
// Go to sleep.
|
2019-07-02 17:40:33 +00:00
|
|
|
p->chan = chan;
|
|
|
|
p->state = SLEEPING;
|
2017-02-01 01:21:14 +00:00
|
|
|
|
2006-07-16 01:15:28 +00:00
|
|
|
sched();
|
2006-07-15 12:03:57 +00:00
|
|
|
|
2006-07-16 01:15:28 +00:00
|
|
|
// Tidy up.
|
2019-07-02 17:40:33 +00:00
|
|
|
p->chan = 0;
|
2006-07-16 01:15:28 +00:00
|
|
|
|
|
|
|
// Reacquire original lock.
|
2019-08-19 22:12:19 +00:00
|
|
|
if(lk != &p->lock){
|
2019-07-02 17:40:33 +00:00
|
|
|
release(&p->lock);
|
2006-07-16 01:15:28 +00:00
|
|
|
acquire(lk);
|
|
|
|
}
|
2006-06-15 19:58:01 +00:00
|
|
|
}
|
|
|
|
|
2019-07-10 12:57:51 +00:00
|
|
|
// Wake up all processes sleeping on chan.
|
|
|
|
// Must be called without any p->lock.
|
2006-07-15 12:03:57 +00:00
|
|
|
void
|
|
|
|
wakeup(void *chan)
|
|
|
|
{
|
2019-07-02 13:14:47 +00:00
|
|
|
struct proc *p;
|
|
|
|
|
2019-07-07 19:20:13 +00:00
|
|
|
for(p = proc; p < &proc[NPROC]; p++) {
|
2019-07-02 23:29:14 +00:00
|
|
|
acquire(&p->lock);
|
2019-07-02 13:14:47 +00:00
|
|
|
if(p->state == SLEEPING && p->chan == chan) {
|
|
|
|
p->state = RUNNABLE;
|
|
|
|
}
|
2019-07-02 23:29:14 +00:00
|
|
|
release(&p->lock);
|
|
|
|
}
|
2006-06-15 19:58:01 +00:00
|
|
|
}
|
2006-07-11 17:39:45 +00:00
|
|
|
|
2019-07-20 21:07:20 +00:00
|
|
|
// Wake up p if it is sleeping in wait(); used by exit().
|
|
|
|
// Caller must hold p->lock.
|
|
|
|
static void
|
|
|
|
wakeup1(struct proc *p)
|
|
|
|
{
|
2019-09-23 10:50:25 +00:00
|
|
|
if(!holding(&p->lock))
|
|
|
|
panic("wakeup1");
|
2019-07-20 21:07:20 +00:00
|
|
|
if(p->chan == p && p->state == SLEEPING) {
|
|
|
|
p->state = RUNNABLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-16 01:15:28 +00:00
|
|
|
// Kill the process with the given pid.
|
2019-07-10 14:13:08 +00:00
|
|
|
// The victim won't exit until it tries to return
|
2019-07-10 13:24:50 +00:00
|
|
|
// to user space (see usertrap() in trap.c).
|
2006-07-16 01:15:28 +00:00
|
|
|
int
|
2007-08-28 19:14:43 +00:00
|
|
|
kill(int pid)
|
2006-07-11 17:39:45 +00:00
|
|
|
{
|
2006-07-16 01:15:28 +00:00
|
|
|
struct proc *p;
|
|
|
|
|
2019-07-07 19:20:13 +00:00
|
|
|
for(p = proc; p < &proc[NPROC]; p++){
|
2019-07-10 13:24:50 +00:00
|
|
|
acquire(&p->lock);
|
2006-07-16 01:15:28 +00:00
|
|
|
if(p->pid == pid){
|
|
|
|
p->killed = 1;
|
2019-07-10 13:24:50 +00:00
|
|
|
if(p->state == SLEEPING){
|
|
|
|
// Wake process from sleep().
|
2006-07-16 01:15:28 +00:00
|
|
|
p->state = RUNNABLE;
|
2019-07-10 13:24:50 +00:00
|
|
|
}
|
2019-07-02 13:14:47 +00:00
|
|
|
release(&p->lock);
|
2006-07-16 01:15:28 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2019-07-10 13:24:50 +00:00
|
|
|
release(&p->lock);
|
2006-07-16 01:15:28 +00:00
|
|
|
}
|
|
|
|
return -1;
|
2006-07-11 17:39:45 +00:00
|
|
|
}
|
|
|
|
|
2019-06-04 09:57:47 +00:00
|
|
|
// Copy to either a user address, or kernel address,
|
|
|
|
// depending on usr_dst.
|
|
|
|
// Returns 0 on success, -1 on error.
|
|
|
|
int
|
2019-06-05 18:31:13 +00:00
|
|
|
either_copyout(int user_dst, uint64 dst, void *src, uint64 len)
|
2019-06-04 09:57:47 +00:00
|
|
|
{
|
|
|
|
struct proc *p = myproc();
|
|
|
|
if(user_dst){
|
|
|
|
return copyout(p->pagetable, dst, src, len);
|
|
|
|
} else {
|
|
|
|
memmove((char *)dst, src, len);
|
2019-06-05 18:31:13 +00:00
|
|
|
return 0;
|
2019-06-04 09:57:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy from either a user address, or kernel address,
|
|
|
|
// depending on usr_src.
|
|
|
|
// Returns 0 on success, -1 on error.
|
|
|
|
int
|
2019-06-05 18:31:13 +00:00
|
|
|
either_copyin(void *dst, int user_src, uint64 src, uint64 len)
|
2019-06-04 09:57:47 +00:00
|
|
|
{
|
|
|
|
struct proc *p = myproc();
|
|
|
|
if(user_src){
|
|
|
|
return copyin(p->pagetable, dst, src, len);
|
|
|
|
} else {
|
|
|
|
memmove(dst, (char*)src, len);
|
2019-06-05 18:31:13 +00:00
|
|
|
return 0;
|
2019-06-04 09:57:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-06 17:54:03 +00:00
|
|
|
// Print a process listing to console. For debugging.
|
|
|
|
// Runs when user types ^P on console.
|
|
|
|
// No lock to avoid wedging a stuck machine further.
|
|
|
|
void
|
|
|
|
procdump(void)
|
|
|
|
{
|
|
|
|
static char *states[] = {
|
|
|
|
[UNUSED] "unused",
|
|
|
|
[SLEEPING] "sleep ",
|
|
|
|
[RUNNABLE] "runble",
|
|
|
|
[RUNNING] "run ",
|
|
|
|
[ZOMBIE] "zombie"
|
|
|
|
};
|
|
|
|
struct proc *p;
|
|
|
|
char *state;
|
|
|
|
|
2019-07-27 08:15:06 +00:00
|
|
|
printf("\n");
|
2019-07-07 19:20:13 +00:00
|
|
|
for(p = proc; p < &proc[NPROC]; p++){
|
2019-06-06 17:54:03 +00:00
|
|
|
if(p->state == UNUSED)
|
|
|
|
continue;
|
|
|
|
if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
|
|
|
|
state = states[p->state];
|
|
|
|
else
|
|
|
|
state = "???";
|
|
|
|
printf("%d %s %s", p->pid, state, p->name);
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
}
|