root/proc.c

/* [previous][next][first][last][top][bottom][index][help]  */

DEFINITIONS

This source file includes following definitions.
  1. pinit
  2. cpuid
  3. mycpu
  4. myproc
  5. allocproc
  6. userinit
  7. growproc
  8. fork
  9. exit
  10. wait
  11. scheduler
  12. sched
  13. yield
  14. forkret
  15. sleep
  16. wakeup1
  17. wakeup
  18. kill
  19. procdump

   1 #include "types.h"
   2 #include "defs.h"
   3 #include "param.h"
   4 #include "memlayout.h"
   5 #include "mmu.h"
   6 #include "x86.h"
   7 #include "proc.h"
   8 #include "spinlock.h"
   9 
  10 struct {
  11   struct spinlock lock;
  12   struct proc proc[NPROC];
  13 } ptable;
  14 
  15 static struct proc *initproc;
  16 
  17 int nextpid = 1;
  18 extern void forkret(void);
  19 extern void trapret(void);
  20 
  21 static void wakeup1(void *chan);
  22 
  23 void
  24 pinit(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
  25 {
  26   initlock(&ptable.lock, "ptable");
  27 }
  28 
  29 // Must be called with interrupts disabled
  30 int
  31 cpuid() {
     /* [previous][next][first][last][top][bottom][index][help]  */
  32   return mycpu()-cpus;
  33 }
  34 
  35 // Must be called with interrupts disabled to avoid the caller being
  36 // rescheduled between reading lapicid and running through the loop.
  37 struct cpu*
  38 mycpu(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
  39 {
  40   int apicid, i;
  41   
  42   if(readeflags()&FL_IF)
  43     panic("mycpu called with interrupts enabled\n");
  44   
  45   apicid = lapicid();
  46   // APIC IDs are not guaranteed to be contiguous. Maybe we should have
  47   // a reverse map, or reserve a register to store &cpus[i].
  48   for (i = 0; i < ncpu; ++i) {
  49     if (cpus[i].apicid == apicid)
  50       return &cpus[i];
  51   }
  52   panic("unknown apicid\n");
  53 }
  54 
  55 // Disable interrupts so that we are not rescheduled
  56 // while reading proc from the cpu structure
  57 struct proc*
  58 myproc(void) {
     /* [previous][next][first][last][top][bottom][index][help]  */
  59   struct cpu *c;
  60   struct proc *p;
  61   pushcli();
  62   c = mycpu();
  63   p = c->proc;
  64   popcli();
  65   return p;
  66 }
  67 
  68 //PAGEBREAK: 32
  69 // Look in the process table for an UNUSED proc.
  70 // If found, change state to EMBRYO and initialize
  71 // state required to run in the kernel.
  72 // Otherwise return 0.
  73 static struct proc*
  74 allocproc(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
  75 {
  76   struct proc *p;
  77   char *sp;
  78 
  79   acquire(&ptable.lock);
  80 
  81   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
  82     if(p->state == UNUSED)
  83       goto found;
  84 
  85   release(&ptable.lock);
  86   return 0;
  87 
  88 found:
  89   p->state = EMBRYO;
  90   p->pid = nextpid++;
  91 
  92   release(&ptable.lock);
  93 
  94   // Allocate kernel stack.
  95   if((p->kstack = kalloc()) == 0){
  96     p->state = UNUSED;
  97     return 0;
  98   }
  99   sp = p->kstack + KSTACKSIZE;
 100 
 101   // Leave room for trap frame.
 102   sp -= sizeof *p->tf;
 103   p->tf = (struct trapframe*)sp;
 104 
 105   // Set up new context to start executing at forkret,
 106   // which returns to trapret.
 107   sp -= 4;
 108   *(uint*)sp = (uint)trapret;
 109 
 110   sp -= sizeof *p->context;
 111   p->context = (struct context*)sp;
 112   memset(p->context, 0, sizeof *p->context);
 113   p->context->eip = (uint)forkret;
 114 
 115   return p;
 116 }
 117 
 118 //PAGEBREAK: 32
 119 // Set up first user process.
 120 void
 121 userinit(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
 122 {
 123   struct proc *p;
 124   extern char _binary_initcode_start[], _binary_initcode_size[];
 125 
 126   p = allocproc();
 127   
 128   initproc = p;
 129   if((p->pgdir = setupkvm()) == 0)
 130     panic("userinit: out of memory?");
 131   inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
 132   p->sz = PGSIZE;
 133   memset(p->tf, 0, sizeof(*p->tf));
 134   p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
 135   p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
 136   p->tf->es = p->tf->ds;
 137   p->tf->ss = p->tf->ds;
 138   p->tf->eflags = FL_IF;
 139   p->tf->esp = PGSIZE;
 140   p->tf->eip = 0;  // beginning of initcode.S
 141 
 142   safestrcpy(p->name, "initcode", sizeof(p->name));
 143   p->cwd = namei("/");
 144 
 145   // this assignment to p->state lets other cores
 146   // run this process. the acquire forces the above
 147   // writes to be visible, and the lock is also needed
 148   // because the assignment might not be atomic.
 149   acquire(&ptable.lock);
 150 
 151   p->state = RUNNABLE;
 152 
 153   release(&ptable.lock);
 154 }
 155 
 156 // Grow current process's memory by n bytes.
 157 // Return 0 on success, -1 on failure.
 158 int
 159 growproc(int n)
     /* [previous][next][first][last][top][bottom][index][help]  */
 160 {
 161   uint sz;
 162   struct proc *curproc = myproc();
 163 
 164   sz = curproc->sz;
 165   if(n > 0){
 166     if((sz = allocuvm(curproc->pgdir, sz, sz + n)) == 0)
 167       return -1;
 168   } else if(n < 0){
 169     if((sz = deallocuvm(curproc->pgdir, sz, sz + n)) == 0)
 170       return -1;
 171   }
 172   curproc->sz = sz;
 173   switchuvm(curproc);
 174   return 0;
 175 }
 176 
 177 // Create a new process copying p as the parent.
 178 // Sets up stack to return as if from system call.
 179 // Caller must set state of returned proc to RUNNABLE.
 180 int
 181 fork(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
 182 {
 183   int i, pid;
 184   struct proc *np;
 185   struct proc *curproc = myproc();
 186 
 187   // Allocate process.
 188   if((np = allocproc()) == 0){
 189     return -1;
 190   }
 191 
 192   // Copy process state from proc.
 193   if((np->pgdir = copyuvm(curproc->pgdir, curproc->sz)) == 0){
 194     kfree(np->kstack);
 195     np->kstack = 0;
 196     np->state = UNUSED;
 197     return -1;
 198   }
 199   np->sz = curproc->sz;
 200   np->parent = curproc;
 201   *np->tf = *curproc->tf;
 202 
 203   // Clear %eax so that fork returns 0 in the child.
 204   np->tf->eax = 0;
 205 
 206   for(i = 0; i < NOFILE; i++)
 207     if(curproc->ofile[i])
 208       np->ofile[i] = filedup(curproc->ofile[i]);
 209   np->cwd = idup(curproc->cwd);
 210 
 211   safestrcpy(np->name, curproc->name, sizeof(curproc->name));
 212 
 213   pid = np->pid;
 214 
 215   acquire(&ptable.lock);
 216 
 217   np->state = RUNNABLE;
 218 
 219   release(&ptable.lock);
 220 
 221   return pid;
 222 }
 223 
 224 // Exit the current process.  Does not return.
 225 // An exited process remains in the zombie state
 226 // until its parent calls wait() to find out it exited.
 227 void
 228 exit(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
 229 {
 230   struct proc *curproc = myproc();
 231   struct proc *p;
 232   int fd;
 233 
 234   if(curproc == initproc)
 235     panic("init exiting");
 236 
 237   // Close all open files.
 238   for(fd = 0; fd < NOFILE; fd++){
 239     if(curproc->ofile[fd]){
 240       fileclose(curproc->ofile[fd]);
 241       curproc->ofile[fd] = 0;
 242     }
 243   }
 244 
 245   begin_op();
 246   iput(curproc->cwd);
 247   end_op();
 248   curproc->cwd = 0;
 249 
 250   acquire(&ptable.lock);
 251 
 252   // Parent might be sleeping in wait().
 253   wakeup1(curproc->parent);
 254 
 255   // Pass abandoned children to init.
 256   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
 257     if(p->parent == curproc){
 258       p->parent = initproc;
 259       if(p->state == ZOMBIE)
 260         wakeup1(initproc);
 261     }
 262   }
 263 
 264   // Jump into the scheduler, never to return.
 265   curproc->state = ZOMBIE;
 266   sched();
 267   panic("zombie exit");
 268 }
 269 
 270 // Wait for a child process to exit and return its pid.
 271 // Return -1 if this process has no children.
 272 int
 273 wait(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
 274 {
 275   struct proc *p;
 276   int havekids, pid;
 277   struct proc *curproc = myproc();
 278   
 279   acquire(&ptable.lock);
 280   for(;;){
 281     // Scan through table looking for exited children.
 282     havekids = 0;
 283     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
 284       if(p->parent != curproc)
 285         continue;
 286       havekids = 1;
 287       if(p->state == ZOMBIE){
 288         // Found one.
 289         pid = p->pid;
 290         kfree(p->kstack);
 291         p->kstack = 0;
 292         freevm(p->pgdir);
 293         p->pid = 0;
 294         p->parent = 0;
 295         p->name[0] = 0;
 296         p->killed = 0;
 297         p->state = UNUSED;
 298         release(&ptable.lock);
 299         return pid;
 300       }
 301     }
 302 
 303     // No point waiting if we don't have any children.
 304     if(!havekids || curproc->killed){
 305       release(&ptable.lock);
 306       return -1;
 307     }
 308 
 309     // Wait for children to exit.  (See wakeup1 call in proc_exit.)
 310     sleep(curproc, &ptable.lock);  //DOC: wait-sleep
 311   }
 312 }
 313 
 314 //PAGEBREAK: 42
 315 // Per-CPU process scheduler.
 316 // Each CPU calls scheduler() after setting itself up.
 317 // Scheduler never returns.  It loops, doing:
 318 //  - choose a process to run
 319 //  - swtch to start running that process
 320 //  - eventually that process transfers control
 321 //      via swtch back to the scheduler.
 322 void
 323 scheduler(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
 324 {
 325   struct proc *p;
 326   struct cpu *c = mycpu();
 327   c->proc = 0;
 328   
 329   for(;;){
 330     // Enable interrupts on this processor.
 331     sti();
 332 
 333     // Loop over process table looking for process to run.
 334     acquire(&ptable.lock);
 335     for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
 336       if(p->state != RUNNABLE)
 337         continue;
 338 
 339       // Switch to chosen process.  It is the process's job
 340       // to release ptable.lock and then reacquire it
 341       // before jumping back to us.
 342       c->proc = p;
 343       switchuvm(p);
 344       p->state = RUNNING;
 345 
 346       swtch(&(c->scheduler), p->context);
 347       switchkvm();
 348 
 349       // Process is done running for now.
 350       // It should have changed its p->state before coming back.
 351       c->proc = 0;
 352     }
 353     release(&ptable.lock);
 354 
 355   }
 356 }
 357 
 358 // Enter scheduler.  Must hold only ptable.lock
 359 // and have changed proc->state. Saves and restores
 360 // intena because intena is a property of this
 361 // kernel thread, not this CPU. It should
 362 // be proc->intena and proc->ncli, but that would
 363 // break in the few places where a lock is held but
 364 // there's no process.
 365 void
 366 sched(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
 367 {
 368   int intena;
 369   struct proc *p = myproc();
 370 
 371   if(!holding(&ptable.lock))
 372     panic("sched ptable.lock");
 373   if(mycpu()->ncli != 1)
 374     panic("sched locks");
 375   if(p->state == RUNNING)
 376     panic("sched running");
 377   if(readeflags()&FL_IF)
 378     panic("sched interruptible");
 379   intena = mycpu()->intena;
 380   swtch(&p->context, mycpu()->scheduler);
 381   mycpu()->intena = intena;
 382 }
 383 
 384 // Give up the CPU for one scheduling round.
 385 void
 386 yield(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
 387 {
 388   acquire(&ptable.lock);  //DOC: yieldlock
 389   myproc()->state = RUNNABLE;
 390   sched();
 391   release(&ptable.lock);
 392 }
 393 
 394 // A fork child's very first scheduling by scheduler()
 395 // will swtch here.  "Return" to user space.
 396 void
 397 forkret(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
 398 {
 399   static int first = 1;
 400   // Still holding ptable.lock from scheduler.
 401   release(&ptable.lock);
 402 
 403   if (first) {
 404     // Some initialization functions must be run in the context
 405     // of a regular process (e.g., they call sleep), and thus cannot
 406     // be run from main().
 407     first = 0;
 408     iinit(ROOTDEV);
 409     initlog(ROOTDEV);
 410   }
 411 
 412   // Return to "caller", actually trapret (see allocproc).
 413 }
 414 
 415 // Atomically release lock and sleep on chan.
 416 // Reacquires lock when awakened.
 417 void
 418 sleep(void *chan, struct spinlock *lk)
     /* [previous][next][first][last][top][bottom][index][help]  */
 419 {
 420   struct proc *p = myproc();
 421   
 422   if(p == 0)
 423     panic("sleep");
 424 
 425   if(lk == 0)
 426     panic("sleep without lk");
 427 
 428   // Must acquire ptable.lock in order to
 429   // change p->state and then call sched.
 430   // Once we hold ptable.lock, we can be
 431   // guaranteed that we won't miss any wakeup
 432   // (wakeup runs with ptable.lock locked),
 433   // so it's okay to release lk.
 434   if(lk != &ptable.lock){  //DOC: sleeplock0
 435     acquire(&ptable.lock);  //DOC: sleeplock1
 436     release(lk);
 437   }
 438   // Go to sleep.
 439   p->chan = chan;
 440   p->state = SLEEPING;
 441 
 442   sched();
 443 
 444   // Tidy up.
 445   p->chan = 0;
 446 
 447   // Reacquire original lock.
 448   if(lk != &ptable.lock){  //DOC: sleeplock2
 449     release(&ptable.lock);
 450     acquire(lk);
 451   }
 452 }
 453 
 454 //PAGEBREAK!
 455 // Wake up all processes sleeping on chan.
 456 // The ptable lock must be held.
 457 static void
 458 wakeup1(void *chan)
     /* [previous][next][first][last][top][bottom][index][help]  */
 459 {
 460   struct proc *p;
 461 
 462   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
 463     if(p->state == SLEEPING && p->chan == chan)
 464       p->state = RUNNABLE;
 465 }
 466 
 467 // Wake up all processes sleeping on chan.
 468 void
 469 wakeup(void *chan)
     /* [previous][next][first][last][top][bottom][index][help]  */
 470 {
 471   acquire(&ptable.lock);
 472   wakeup1(chan);
 473   release(&ptable.lock);
 474 }
 475 
 476 // Kill the process with the given pid.
 477 // Process won't exit until it returns
 478 // to user space (see trap in trap.c).
 479 int
 480 kill(int pid)
     /* [previous][next][first][last][top][bottom][index][help]  */
 481 {
 482   struct proc *p;
 483 
 484   acquire(&ptable.lock);
 485   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
 486     if(p->pid == pid){
 487       p->killed = 1;
 488       // Wake process from sleep if necessary.
 489       if(p->state == SLEEPING)
 490         p->state = RUNNABLE;
 491       release(&ptable.lock);
 492       return 0;
 493     }
 494   }
 495   release(&ptable.lock);
 496   return -1;
 497 }
 498 
 499 //PAGEBREAK: 36
 500 // Print a process listing to console.  For debugging.
 501 // Runs when user types ^P on console.
 502 // No lock to avoid wedging a stuck machine further.
 503 void
 504 procdump(void)
     /* [previous][next][first][last][top][bottom][index][help]  */
 505 {
 506   static char *states[] = {
 507   [UNUSED]    "unused",
 508   [EMBRYO]    "embryo",
 509   [SLEEPING]  "sleep ",
 510   [RUNNABLE]  "runble",
 511   [RUNNING]   "run   ",
 512   [ZOMBIE]    "zombie"
 513   };
 514   int i;
 515   struct proc *p;
 516   char *state;
 517   uint pc[10];
 518 
 519   for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
 520     if(p->state == UNUSED)
 521       continue;
 522     if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
 523       state = states[p->state];
 524     else
 525       state = "???";
 526     cprintf("%d %s %s", p->pid, state, p->name);
 527     if(p->state == SLEEPING){
 528       getcallerpcs((uint*)p->context->ebp+2, pc);
 529       for(i=0; i<10 && pc[i] != 0; i++)
 530         cprintf(" %p", pc[i]);
 531     }
 532     cprintf("\n");
 533   }
 534 }

/* [previous][next][first][last][top][bottom][index][help]  */