xv6-65oo2/kernel/proc.c

684 lines
14 KiB
C
Raw Permalink Normal View History

2006-06-12 15:22:12 +00:00
#include "types.h"
2007-08-27 23:26:33 +00:00
#include "param.h"
#include "memlayout.h"
2019-05-31 13:45:59 +00:00
#include "riscv.h"
#include "spinlock.h"
#include "proc.h"
2019-05-31 13:45:59 +00:00
#include "defs.h"
2019-05-31 13:45:59 +00:00
struct cpu cpus[NCPU];
2019-07-10 12:57:51 +00:00
struct proc proc[NPROC];
2019-05-31 13:45:59 +00:00
struct proc *initproc;
2007-08-23 14:35:28 +00:00
int nextpid = 1;
2019-07-10 12:57:51 +00:00
struct spinlock pid_lock;
extern void forkret(void);
static void freeproc(struct proc *p);
2019-07-26 08:53:46 +00:00
extern char trampoline[]; // trampoline.S
2019-05-31 13:45:59 +00:00
2020-11-05 14:47:59 +00:00
// helps ensure that wakeups of wait()ing
// parents are not lost. helps obey the
// memory model when using p->parent.
2020-11-03 20:02:08 +00:00
// must be acquired before any p->lock.
2020-11-05 14:47:59 +00:00
struct spinlock wait_lock;
// Allocate a page for each process's kernel stack.
// Map it high in memory, followed by an invalid
// guard page.
void
2022-08-09 19:11:25 +00:00
proc_mapstacks(pagetable_t kpgtbl)
{
struct proc *p;
for(p = proc; p < &proc[NPROC]; p++) {
char *pa = kalloc();
if(pa == 0)
panic("kalloc");
uint64 va = KSTACK((int) (p - proc));
kvmmap(kpgtbl, va, (uint64)pa, PGSIZE, PTE_R | PTE_W);
}
}
2022-08-09 19:11:25 +00:00
// initialize the proc table.
void
2019-05-31 13:45:59 +00:00
procinit(void)
{
struct proc *p;
initlock(&pid_lock, "nextpid");
2020-11-05 14:47:59 +00:00
initlock(&wait_lock, "wait_lock");
for(p = proc; p < &proc[NPROC]; p++) {
initlock(&p->lock, "proc");
p->state = UNUSED;
p->kstack = KSTACK((int) (p - proc));
}
}
2019-06-05 15:42:03 +00:00
// Must be called with interrupts disabled,
// to prevent race with process being moved
// to a different CPU.
int
2019-06-05 15:42:03 +00:00
cpuid()
{
int id = r_tp();
return id;
}
// Return this CPU's cpu struct.
2019-06-05 15:42:03 +00:00
// Interrupts must be disabled.
2018-10-10 00:22:48 +00:00
struct cpu*
2022-08-09 19:11:25 +00:00
mycpu(void)
{
2019-06-05 15:42:03 +00:00
int id = cpuid();
struct cpu *c = &cpus[id];
2018-10-10 00:22:48 +00:00
return c;
}
2019-07-20 14:17:26 +00:00
// Return the current struct proc *, or zero if none.
struct proc*
2022-08-09 19:11:25 +00:00
myproc(void)
{
2019-06-05 18:14:57 +00:00
push_off();
2019-06-05 15:42:03 +00:00
struct cpu *c = mycpu();
struct proc *p = c->proc;
2019-06-05 18:14:57 +00:00
pop_off();
2019-06-05 15:42:03 +00:00
return p;
}
int
2022-08-09 19:11:25 +00:00
allocpid()
{
int pid;
acquire(&pid_lock);
2019-07-10 18:54:34 +00:00
pid = nextpid;
nextpid = nextpid + 1;
release(&pid_lock);
2019-07-10 18:54:34 +00:00
return pid;
}
// Look in the process table for an UNUSED proc.
// If found, initialize state required to run in the kernel,
2019-07-07 18:57:16 +00:00
// and return with p->lock held.
// If there are no free procs, or a memory allocation fails, return 0.
static struct proc*
allocproc(void)
2006-06-12 15:22:12 +00:00
{
struct proc *p;
2006-06-12 15:22:12 +00:00
for(p = proc; p < &proc[NPROC]; p++) {
acquire(&p->lock);
if(p->state == UNUSED) {
goto found;
} else {
release(&p->lock);
}
}
return 0;
found:
p->pid = allocpid();
2020-11-03 20:02:08 +00:00
p->state = USED;
2019-05-31 13:45:59 +00:00
// Allocate a trapframe page.
if((p->trapframe = (struct trapframe *)kalloc()) == 0){
2020-11-01 16:11:38 +00:00
freeproc(p);
2019-07-10 14:13:08 +00:00
release(&p->lock);
2019-05-31 13:45:59 +00:00
return 0;
}
2019-05-31 13:45:59 +00:00
// An empty user page table.
p->pagetable = proc_pagetable(p);
if(p->pagetable == 0){
freeproc(p);
release(&p->lock);
return 0;
}
// Set up new context to start executing at forkret,
// which returns to user space.
2019-10-16 16:27:08 +00:00
memset(&p->context, 0, sizeof(p->context));
p->context.ra = (uint64)forkret;
2019-07-23 16:17:17 +00:00
p->context.sp = p->kstack + PGSIZE;
return p;
}
// free a proc structure and the data hanging from it,
// including user pages.
2019-07-07 18:57:16 +00:00
// p->lock must be held.
static void
freeproc(struct proc *p)
{
if(p->trapframe)
kfree((void*)p->trapframe);
p->trapframe = 0;
if(p->pagetable)
proc_freepagetable(p->pagetable, p->sz);
p->pagetable = 0;
p->sz = 0;
p->pid = 0;
p->parent = 0;
p->name[0] = 0;
p->chan = 0;
p->killed = 0;
p->xstate = 0;
p->state = UNUSED;
}
2022-08-12 15:47:39 +00:00
// Create a user page table for a given process, with no user memory,
// but with trampoline and trapframe pages.
pagetable_t
proc_pagetable(struct proc *p)
{
pagetable_t pagetable;
2019-07-10 14:13:08 +00:00
// An empty page table.
pagetable = uvmcreate();
if(pagetable == 0)
return 0;
2019-05-31 13:45:59 +00:00
// map the trampoline code (for system call return)
// at the highest user virtual address.
// only the supervisor uses it, on the way
// to/from user space, so not PTE_U.
if(mappages(pagetable, TRAMPOLINE, PGSIZE,
(uint64)trampoline, PTE_R | PTE_X) < 0){
uvmfree(pagetable, 0);
return 0;
}
2022-08-12 15:47:39 +00:00
// map the trapframe page just below the trampoline page, for
// trampoline.S.
if(mappages(pagetable, TRAPFRAME, PGSIZE,
(uint64)(p->trapframe), PTE_R | PTE_W) < 0){
uvmunmap(pagetable, TRAMPOLINE, 1, 0);
uvmfree(pagetable, 0);
return 0;
}
2009-07-12 02:28:29 +00:00
return pagetable;
}
// Free a process's page table, and free the
2019-07-10 14:13:08 +00:00
// physical memory it refers to.
void
proc_freepagetable(pagetable_t pagetable, uint64 sz)
{
uvmunmap(pagetable, TRAMPOLINE, 1, 0);
uvmunmap(pagetable, TRAPFRAME, 1, 0);
2019-10-27 17:36:46 +00:00
uvmfree(pagetable, sz);
2006-06-12 15:22:12 +00:00
}
2019-05-31 15:45:42 +00:00
// a user program that calls exec("/init")
2022-08-09 19:11:25 +00:00
// assembled from ../user/initcode.S
// od -t xC ../user/initcode
2019-06-05 18:31:13 +00:00
uchar initcode[] = {
0x17, 0x05, 0x00, 0x00, 0x13, 0x05, 0x45, 0x02,
0x97, 0x05, 0x00, 0x00, 0x93, 0x85, 0x35, 0x02,
0x93, 0x08, 0x70, 0x00, 0x73, 0x00, 0x00, 0x00,
0x93, 0x08, 0x20, 0x00, 0x73, 0x00, 0x00, 0x00,
0xef, 0xf0, 0x9f, 0xff, 0x2f, 0x69, 0x6e, 0x69,
0x74, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
2019-05-31 13:45:59 +00:00
};
// Set up first user process.
void
userinit(void)
{
struct proc *p;
p = allocproc();
initproc = p;
2019-05-31 13:45:59 +00:00
2022-08-09 19:11:25 +00:00
// allocate one user page and copy initcode's instructions
2019-07-10 14:13:08 +00:00
// and data into it.
2022-08-09 19:11:25 +00:00
uvmfirst(p->pagetable, initcode, sizeof(initcode));
p->sz = PGSIZE;
2019-05-31 13:45:59 +00:00
// prepare for the very first "return" from kernel to user.
p->trapframe->epc = 0; // user program counter
p->trapframe->sp = PGSIZE; // user stack pointer
safestrcpy(p->name, "initcode", sizeof(p->name));
2019-05-31 15:45:42 +00:00
p->cwd = namei("/");
p->state = RUNNABLE;
release(&p->lock);
}
2019-07-10 14:13:08 +00:00
// Grow or shrink user memory by n bytes.
// Return 0 on success, -1 on failure.
2006-09-08 14:26:51 +00:00
int
growproc(int n)
{
uint64 sz;
2019-05-31 13:45:59 +00:00
struct proc *p = myproc();
2019-05-31 13:45:59 +00:00
sz = p->sz;
if(n > 0){
if((sz = uvmalloc(p->pagetable, sz, sz + n, PTE_W)) == 0) {
return -1;
}
} else if(n < 0){
sz = uvmdealloc(p->pagetable, sz, sz + n);
}
2019-05-31 13:45:59 +00:00
p->sz = sz;
return 0;
2006-09-08 14:26:51 +00:00
}
2019-07-10 14:13:08 +00:00
// Create a new process, copying the parent.
// Sets up child kernel stack to return as if from fork() system call.
2009-05-31 00:38:51 +00:00
int
fork(void)
2006-06-12 15:22:12 +00:00
{
2009-05-31 00:38:51 +00:00
int i, pid;
2006-06-12 15:22:12 +00:00
struct proc *np;
2019-05-31 13:45:59 +00:00
struct proc *p = myproc();
2006-06-12 15:22:12 +00:00
// Allocate process.
if((np = allocproc()) == 0){
2009-05-31 00:38:51 +00:00
return -1;
}
2019-05-31 13:45:59 +00:00
// Copy user memory from parent to child.
if(uvmcopy(p->pagetable, np->pagetable, p->sz) < 0){
freeproc(np);
2019-07-07 18:57:16 +00:00
release(&np->lock);
return -1;
}
2019-05-31 13:45:59 +00:00
np->sz = p->sz;
2006-09-06 17:27:19 +00:00
2019-05-31 13:45:59 +00:00
// copy saved user registers.
*(np->trapframe) = *(p->trapframe);
2019-05-31 13:45:59 +00:00
// Cause fork to return 0 in the child.
np->trapframe->a0 = 0;
2019-05-31 13:45:59 +00:00
// increment reference counts on open file descriptors.
2009-05-31 00:38:51 +00:00
for(i = 0; i < NOFILE; i++)
2019-05-31 13:45:59 +00:00
if(p->ofile[i])
np->ofile[i] = filedup(p->ofile[i]);
np->cwd = idup(p->cwd);
2019-05-31 13:45:59 +00:00
safestrcpy(np->name, p->name, sizeof(p->name));
2009-05-31 00:38:51 +00:00
pid = np->pid;
2020-11-03 20:02:08 +00:00
release(&np->lock);
2020-11-05 14:47:59 +00:00
acquire(&wait_lock);
2020-11-03 20:02:08 +00:00
np->parent = p;
2020-11-05 14:47:59 +00:00
release(&wait_lock);
2020-11-03 20:02:08 +00:00
acquire(&np->lock);
np->state = RUNNABLE;
release(&np->lock);
2009-05-31 00:38:51 +00:00
return pid;
2006-06-12 15:22:12 +00:00
}
2019-07-10 12:57:51 +00:00
// Pass p's abandoned children to init.
2020-11-05 14:47:59 +00:00
// Caller must hold wait_lock.
2019-07-07 18:57:16 +00:00
void
reparent(struct proc *p)
{
struct proc *pp;
for(pp = proc; pp < &proc[NPROC]; pp++){
if(pp->parent == p){
pp->parent = initproc;
2020-11-03 20:02:08 +00:00
wakeup(initproc);
}
}
}
// Exit the current process. Does not return.
// An exited process remains in the zombie state
2019-05-31 13:45:59 +00:00
// until its parent calls wait().
void
exit(int status)
{
2019-05-31 13:45:59 +00:00
struct proc *p = myproc();
2019-05-31 13:45:59 +00:00
if(p == initproc)
panic("init exiting");
// Close all open files.
2019-07-20 22:51:31 +00:00
for(int fd = 0; fd < NOFILE; fd++){
2019-05-31 13:45:59 +00:00
if(p->ofile[fd]){
struct file *f = p->ofile[fd];
fileclose(f);
2019-05-31 13:45:59 +00:00
p->ofile[fd] = 0;
}
}
2014-08-27 21:15:30 +00:00
begin_op();
iput(p->cwd);
2014-08-27 21:15:30 +00:00
end_op();
p->cwd = 0;
2020-11-05 14:47:59 +00:00
acquire(&wait_lock);
2019-07-20 22:51:31 +00:00
// Give any children to init.
reparent(p);
// Parent might be sleeping in wait().
2020-11-03 20:02:08 +00:00
wakeup(p->parent);
2020-11-05 12:32:10 +00:00
acquire(&p->lock);
p->xstate = status;
2019-07-20 22:51:31 +00:00
p->state = ZOMBIE;
2020-11-05 14:47:59 +00:00
release(&wait_lock);
// Jump into the scheduler, never to return.
sched();
panic("zombie exit");
}
// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
int
wait(uint64 addr)
{
struct proc *pp;
int havekids, pid;
2019-05-31 13:45:59 +00:00
struct proc *p = myproc();
2020-11-05 14:47:59 +00:00
acquire(&wait_lock);
2019-07-10 14:13:08 +00:00
for(;;){
// Scan through table looking for exited children.
havekids = 0;
for(pp = proc; pp < &proc[NPROC]; pp++){
if(pp->parent == p){
2020-11-05 12:32:10 +00:00
// make sure the child isn't still in exit() or swtch().
acquire(&pp->lock);
2020-11-05 12:32:10 +00:00
havekids = 1;
if(pp->state == ZOMBIE){
// Found one.
pid = pp->pid;
if(addr != 0 && copyout(p->pagetable, addr, (char *)&pp->xstate,
sizeof(pp->xstate)) < 0) {
release(&pp->lock);
2020-11-05 14:47:59 +00:00
release(&wait_lock);
return -1;
}
freeproc(pp);
release(&pp->lock);
2020-11-05 14:47:59 +00:00
release(&wait_lock);
return pid;
}
release(&pp->lock);
}
}
// No point waiting if we don't have any children.
2022-08-11 11:23:17 +00:00
if(!havekids || killed(p)){
2020-11-05 14:47:59 +00:00
release(&wait_lock);
return -1;
}
2019-07-10 14:13:08 +00:00
// Wait for a child to exit.
2020-11-05 14:47:59 +00:00
sleep(p, &wait_lock); //DOC: wait-sleep
}
}
2006-09-06 17:27:19 +00:00
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run.
// - swtch to start running that process.
2007-08-30 17:39:56 +00:00
// - eventually that process transfers control
// via swtch back to the scheduler.
2006-06-12 15:22:12 +00:00
void
2006-07-11 01:07:40 +00:00
scheduler(void)
2006-06-12 15:22:12 +00:00
{
struct proc *p;
struct cpu *c = mycpu();
c->proc = 0;
for(;;){
2019-07-20 14:17:26 +00:00
// Avoid deadlock by ensuring that devices can interrupt.
intr_on();
for(p = proc; p < &proc[NPROC]; p++) {
acquire(&p->lock);
if(p->state == RUNNABLE) {
// Switch to chosen process. It is the process's job
// to release its lock and then reacquire it
// before jumping back to us.
p->state = RUNNING;
c->proc = p;
swtch(&c->context, &p->context);
// Process is done running for now.
// It should have changed its p->state before coming back.
c->proc = 0;
}
2019-07-07 18:57:16 +00:00
release(&p->lock);
}
2006-06-12 15:22:12 +00:00
}
}
2006-06-12 15:22:12 +00:00
2019-07-10 14:13:08 +00:00
// Switch to scheduler. Must hold only p->lock
// and have changed proc->state. Saves and restores
// intena because intena is a property of this
// kernel thread, not this CPU. It should
// be proc->intena and proc->noff, but that would
// break in the few places where a lock is held but
// there's no process.
void
sched(void)
{
int intena;
struct proc *p = myproc();
if(!holding(&p->lock))
panic("sched p->lock");
2019-06-05 18:14:57 +00:00
if(mycpu()->noff != 1)
panic("sched locks");
if(p->state == RUNNING)
2009-07-12 02:28:29 +00:00
panic("sched running");
2019-06-05 18:14:57 +00:00
if(intr_get())
panic("sched interruptible");
intena = mycpu()->intena;
swtch(&p->context, &mycpu()->context);
mycpu()->intena = intena;
2006-07-11 01:07:40 +00:00
}
// Give up the CPU for one scheduling round.
2006-07-11 01:07:40 +00:00
void
yield(void)
2006-07-11 01:07:40 +00:00
{
struct proc *p = myproc();
2019-08-19 22:12:19 +00:00
acquire(&p->lock);
p->state = RUNNABLE;
sched();
release(&p->lock);
2006-06-12 15:22:12 +00:00
}
2006-06-15 19:58:01 +00:00
2006-08-29 21:35:30 +00:00
// A fork child's very first scheduling by scheduler()
2019-05-31 13:45:59 +00:00
// will swtch to forkret.
void
forkret(void)
{
static int first = 1;
2019-06-05 18:31:13 +00:00
// Still holding p->lock from scheduler.
release(&myproc()->lock);
if (first) {
// File system initialization must be run in the context of a
// regular process (e.g., because it calls sleep), and thus cannot
2011-08-23 00:07:18 +00:00
// be run from main().
first = 0;
fsinit(ROOTDEV);
}
2019-05-31 13:45:59 +00:00
usertrapret();
}
// Atomically release lock and sleep on chan.
// Reacquires lock when awakened.
2006-06-15 19:58:01 +00:00
void
sleep(void *chan, struct spinlock *lk)
2006-06-15 19:58:01 +00:00
{
struct proc *p = myproc();
// Must acquire p->lock in order to
// change p->state and then call sched.
// Once we hold p->lock, we can be
// guaranteed that we won't miss any wakeup
// (wakeup locks p->lock),
// so it's okay to release lk.
2020-11-03 20:02:08 +00:00
acquire(&p->lock); //DOC: sleeplock1
release(lk);
2019-07-10 14:13:08 +00:00
// Go to sleep.
p->chan = chan;
p->state = SLEEPING;
sched();
// Tidy up.
p->chan = 0;
// Reacquire original lock.
2020-11-03 20:02:08 +00:00
release(&p->lock);
acquire(lk);
2006-06-15 19:58:01 +00:00
}
2019-07-10 12:57:51 +00:00
// Wake up all processes sleeping on chan.
// Must be called without any p->lock.
void
wakeup(void *chan)
{
struct proc *p;
for(p = proc; p < &proc[NPROC]; p++) {
2020-11-03 20:02:08 +00:00
if(p != myproc()){
acquire(&p->lock);
if(p->state == SLEEPING && p->chan == chan) {
p->state = RUNNABLE;
}
release(&p->lock);
}
}
}
// Kill the process with the given pid.
2019-07-10 14:13:08 +00:00
// The victim won't exit until it tries to return
// to user space (see usertrap() in trap.c).
int
kill(int pid)
{
struct proc *p;
for(p = proc; p < &proc[NPROC]; p++){
acquire(&p->lock);
if(p->pid == pid){
p->killed = 1;
if(p->state == SLEEPING){
// Wake process from sleep().
p->state = RUNNABLE;
}
release(&p->lock);
return 0;
}
release(&p->lock);
}
return -1;
}
2022-08-11 18:22:00 +00:00
void
setkilled(struct proc *p)
{
acquire(&p->lock);
p->killed = 1;
release(&p->lock);
}
2022-08-11 11:23:17 +00:00
int
killed(struct proc *p)
{
2022-08-11 18:22:00 +00:00
int k;
acquire(&p->lock);
k = p->killed;
release(&p->lock);
return k;
2022-08-11 11:23:17 +00:00
}
// Copy to either a user address, or kernel address,
// depending on usr_dst.
// Returns 0 on success, -1 on error.
int
2019-06-05 18:31:13 +00:00
either_copyout(int user_dst, uint64 dst, void *src, uint64 len)
{
struct proc *p = myproc();
if(user_dst){
return copyout(p->pagetable, dst, src, len);
} else {
memmove((char *)dst, src, len);
2019-06-05 18:31:13 +00:00
return 0;
}
}
// Copy from either a user address, or kernel address,
// depending on usr_src.
// Returns 0 on success, -1 on error.
int
2019-06-05 18:31:13 +00:00
either_copyin(void *dst, int user_src, uint64 src, uint64 len)
{
struct proc *p = myproc();
if(user_src){
return copyin(p->pagetable, dst, src, len);
} else {
memmove(dst, (char*)src, len);
2019-06-05 18:31:13 +00:00
return 0;
}
}
// Print a process listing to console. For debugging.
// Runs when user types ^P on console.
// No lock to avoid wedging a stuck machine further.
void
procdump(void)
{
static char *states[] = {
[UNUSED] "unused",
[USED] "used",
[SLEEPING] "sleep ",
[RUNNABLE] "runble",
[RUNNING] "run ",
[ZOMBIE] "zombie"
};
struct proc *p;
char *state;
2019-07-27 08:15:06 +00:00
printf("\n");
for(p = proc; p < &proc[NPROC]; p++){
if(p->state == UNUSED)
continue;
if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
state = states[p->state];
else
state = "???";
printf("%d %s %s", p->pid, state, p->name);
printf("\n");
}
}