2 #include "../port/lib.h"
6 #include "../port/error.h"
10 int schedgain = 30; /* units in seconds */
13 void updatecpu(Proc*);
14 int reprioritize(Proc*);
16 ulong delayedscheds; /* statistics */
21 static struct Procalloc
40 { /* BUG: generate automatically */
56 static void pidfree(Proc*);
57 static void rebalance(void);
63 schedinit(void) /* never returns */
69 if((e = up->edf) != nil && (e->flags & Admitted))
84 * Holding locks from pexit:
94 up->qnext = procalloc.free;
97 /* proc is free now, make sure unlock() wont touch it */
98 up = procalloc.Lock.p = nil;
110 * If changing this routine, look also at sleep(). It
111 * contains a copy of the guts of sched().
119 panic("cpu%d: ilockdepth %d, last lock %#p at %#p, sched called from %#p",
122 up != nil ? up->lastilock: nil,
123 (up != nil && up->lastilock) ? up->lastilock->pc: 0,
127 * Delay the sched until the process gives up the locks
128 * it is holding. This avoids dumb lock loops.
129 * Don't delay if the process is Moribund.
130 * It called sched to die.
131 * But do sched eventually. This avoids a missing unlock
132 * from hanging the entire kernel.
133 * But don't reschedule procs holding palloc or procalloc.
134 * Those are far too important to be holding while asleep.
136 * This test is not exact. There can still be a few instructions
137 * in the middle of taslock when a process holds a lock
138 * but Lock.p has not yet been initialized.
141 if(up->state != Moribund)
142 if(up->delaysched < 20
143 || palloc.Lock.p == up
144 || procalloc.Lock.p == up){
157 if(setlabel(&up->sched)){
162 gotolabel(&m->sched);
167 p->priority = reprioritize(p);
170 m->schedticks = m->ticks + HZ/10;
174 up->mach = MACHP(m->machno);
177 gotolabel(&up->sched);
189 return runvec & ~((1<<(up->priority+1))-1);
193 * here once per clock tick to see if we should resched
198 /* once a second, rebalance will reprioritize ready procs */
202 /* unless preempted, get to run for at least 100ms */
204 || (!up->fixedpri && m->ticks > m->schedticks && anyready())){
205 m->readied = nil; /* avoid cooperative scheduling */
211 * here at the end of non-clock interrupts to see if we should preempt the
212 * current process. Returns 1 if preempted, 0 otherwise.
217 if(up != nil && up->state == Running)
218 if(up->preempted == 0)
221 m->readied = nil; /* avoid cooperative scheduling */
232 * Update the cpu time average for this particular process,
233 * which is about to change from up -> not up or vice versa.
234 * p->lastupdate is the last time an updatecpu happened.
236 * The cpu time average is a decaying average that lasts
237 * about D clock ticks. D is chosen to be approximately
238 * the cpu time of a cpu-intensive "quick job". A job has to run
239 * for approximately D clock ticks before we home in on its
240 * actual cpu usage. Thus if you manage to get in and get out
241 * quickly, you won't be penalized during your burst. Once you
242 * start using your share of the cpu for more than about D
243 * clock ticks though, your p->cpu hits 1000 (1.0) and you end up
244 * below all the other quick jobs. Interactive tasks, because
245 * they basically always use less than their fair share of cpu,
248 * If the process has not been running, then we want to
251 * cpu = cpu * (D-1)/D
255 * cpu = cpu * ((D-1)/D)^n
257 * but D is big enough that this is approximately
259 * cpu = cpu * (D-n)/D
261 * so we use that instead.
263 * If the process has been running, we apply the filter to
264 * 1 - cpu, yielding a similar equation. Note that cpu is
265 * stored in fixed point (* 1000).
267 * Updatecpu must be called before changing up, in order
268 * to maintain accurate cpu usage statistics. It can be called
269 * at any time to bring the stats for a given proc up-to-date.
275 int D = schedgain*HZ*Scaling;
280 t = MACHP(0)->ticks*Scaling + Scaling/2;
281 n = t - p->lastupdate;
291 p->cpu = (ocpu*(D-n))/D;
298 //iprint("pid %d %s for %d cpu %d -> %d\n", p->pid,p==up?"active":"inactive",n, ocpu,p->cpu);
302 * On average, p has used p->cpu of a cpu recently.
303 * Its fair share is conf.nmach/m->load of a cpu. If it has been getting
304 * too much, penalize it. If it has been getting not enough, reward it.
305 * I don't think you can get much more than your fair share that
306 * often, so most of the queues are for using less. Having a priority
307 * of 3 means you're just right. Having a higher priority (up to p->basepri)
308 * means you're not using as much as you could.
311 reprioritize(Proc *p)
313 int fairshare, n, load, ratio;
315 load = MACHP(0)->load;
320 * fairshare = 1.000 * conf.nmach * 1.000/load,
321 * except the decimal point is moved three places
322 * on both load and fairshare.
324 fairshare = (conf.nmach*1000*1000)/load;
328 ratio = (fairshare+n/2) / n;
329 if(ratio > p->basepri)
332 panic("reprioritize");
333 //iprint("pid %d cpu %d load %d fair %d pri %d\n", p->pid, p->cpu, load, fairshare, ratio);
338 * add a process to a scheduling queue
341 queueproc(Schedq *rq, Proc *p)
361 * try to remove a process from a scheduling queue (called splhi)
364 dequeueproc(Schedq *rq, Proc *tp)
372 * the queue may have changed before we locked runq,
373 * refind the target process.
376 for(p = rq->head; p != nil; p = p->rnext){
383 * p->mach==0 only when process state is saved
385 if(p == nil || p->mach != nil){
396 runvec &= ~(1<<(rq-runq));
399 if(p->state != Ready)
400 print("dequeueproc %s %lud %s\n", p->text, p->pid, statename[p->state]);
407 * ready(p) picks a new priority for a process and sticks it in the
408 * runq for that priority.
415 void (*pt)(Proc*, int, vlong);
417 if(p->state == Ready){
418 print("double ready %s %lud pc %p\n", p->text, p->pid, getcallerpc(&p));
428 if(up != p && (p->wired == nil || p->wired == MACHP(m->machno)))
429 m->readied = p; /* group scheduling */
432 pri = reprioritize(p);
444 * yield the processor and drop our priority
450 /* pretend we just used 1/2 tick */
451 up->lastupdate -= Scaling/2;
457 * recalculate priorities once a second. We need to do this
458 * since priorities will otherwise only be recalculated when
459 * the running process blocks.
471 if(t - balancetime < HZ)
475 for(pri=0, rq=runq; pri<Npriq; pri++, rq++){
480 if(p->mp != MACHP(m->machno))
482 if(pri == p->basepri)
485 npri = reprioritize(p);
488 p = dequeueproc(rq, p);
490 queueproc(&runq[npri], p);
499 * pick a process to run
508 void (*pt)(Proc*, int, vlong);
512 /* cooperative scheduling until the clock ticks */
513 if((p = m->readied) != nil && p->mach == nil && p->state == Ready
514 && (p->wired == nil || p->wired == MACHP(m->machno))
515 && runq[Nrq-1].head == nil && runq[Nrq-2].head == nil){
517 rq = &runq[p->priority];
525 * find a process that last ran on this processor (affinity),
526 * or one that hasn't moved in a while (load balancing). Every
527 * time around the loop affinity goes down.
532 * find the highest priority target process that this
533 * processor can run given affinity constraints.
536 for(rq = &runq[Nrq-1]; rq >= runq; rq--){
537 for(p = rq->head; p != nil; p = p->rnext){
538 if(p->mp == nil || p->mp == MACHP(m->machno)
539 || (p->wired == nil && i > 0))
544 /* waste time or halt the CPU */
547 /* remember how much time we're here */
549 m->perf.inidle += now-start;
555 p = dequeueproc(rq, p);
560 p->mp = MACHP(m->machno);
563 edfrun(p, rq == &runq[PriEdf]); /* start deadline timer and do admin */
579 /* Only reliable way to see if we are Running */
598 if((p = procalloc.free) != nil)
601 snprint(msg, sizeof msg, "no procs; %s forking",
602 up != nil ? up->text: "kernel");
607 procalloc.free = p->qnext;
627 p->syscalltrace = nil;
632 p->errstr = p->errbuf0;
633 p->syserrstr = p->errbuf1;
634 p->errbuf0[0] = '\0';
635 p->errbuf1[0] = '\0';
639 kstrdup(&p->user, "*nouser");
640 kstrdup(&p->text, "*notext");
641 kstrdup(&p->args, "");
644 memset(p->seg, 0, sizeof p->seg);
646 p->noteid = pidalloc(p);
648 p->kstack = smalloc(KSTACK);
653 procpriority(p, PriNormal, 0);
655 p->lastupdate = MACHP(0)->ticks*Scaling;
662 * wire this proc to a machine
665 procwired(Proc *p, int bm)
669 char nwired[MAXMACH];
673 /* pick a machine to wire to */
674 memset(nwired, 0, sizeof(nwired));
677 for(i=0; i<conf.nproc; i++, pp++){
679 if(wm != nil && pp->pid)
680 nwired[wm->machno]++;
683 for(i=0; i<conf.nmach; i++)
684 if(nwired[i] < nwired[bm])
687 /* use the virtual machine requested */
688 bm = bm % conf.nmach;
691 p->wired = MACHP(bm);
696 procpriority(Proc *p, int pri, int fixed)
712 procinit0(void) /* bad planning - clashes with devproc.c */
717 procalloc.free = xalloc(conf.nproc*sizeof(Proc));
718 if(procalloc.free == nil){
720 panic("cannot allocate %lud procs (%ludMB)", conf.nproc, conf.nproc*sizeof(Proc)/(1024*1024));
722 procalloc.arena = procalloc.free;
725 for(i=0; i<conf.nproc-1; i++,p++)
731 * sleep if a condition is not true. Another process will
732 * awaken us after it sets the condition. When we awaken
733 * the condition may no longer be true.
735 * we lock both the process and the rendezvous to keep r->p
736 * and p->r synchronized.
739 sleep(Rendez *r, int (*f)(void*), void *arg)
742 void (*pt)(Proc*, int, vlong);
747 print("process %lud sleeps with %d locks held, last lock %#p locked at pc %#p, sleep called from %#p\n",
748 up->pid, up->nlocks, up->lastlock, up->lastlock->pc, getcallerpc(&r));
752 print("double sleep called from %#p, %lud %lud\n", getcallerpc(&r), r->p->pid, up->pid);
757 * Wakeup only knows there may be something to do by testing
758 * r->p in order to get something to lock on.
759 * Flush that information out to memory in case the sleep is
764 if((*f)(arg) || up->notepending){
766 * if condition happened or a note is pending
774 * now we are committed to
775 * change state and call scheduler
787 if(setlabel(&up->sched)) {
789 * here when the process is awakened
795 * here to go to sleep (i.e. stop Running)
799 gotolabel(&m->sched);
803 if(up->notepending) {
815 if(up->procctl == Proc_exitme && up->closingfgrp != nil)
823 return up->trend == nil || up->tfn(arg);
827 twakeup(Ureg*, Timer *t)
840 tsleep(Rendez *r, int (*fn)(void*), void *arg, ulong ms)
843 print("tsleep: timer active: mode %d, tf %#p\n", up->tmode, up->tf);
848 up->tmode = Trelative;
866 * Expects that only one process can call wakeup for any given Rendez.
867 * We hold both locks to ensure that r->p and p->r remain consistent.
868 * Richard Miller has a better solution that doesn't require both to
869 * be held simultaneously, but I'm a paranoid - presotto.
884 if(p->state != Wakeme || p->r != r){
885 iprint("%p %p %d\n", p->r, r, p->state);
886 panic("wakeup: state");
901 * if waking a sleeping process, this routine must hold both
902 * p->rlock and r->lock. However, it can't know them in
903 * the same order as wakeup causing a possible lock ordering
904 * deadlock. We break the deadlock by giving up the p->rlock
905 * lock if we can't get the r->lock and retrying.
908 postnote(Proc *p, int dolock, char *n, int flag)
925 if(n != nil && flag != NUser && (p->notify == 0 || p->notified))
929 if(p->nnote < NNOTE && n != nil) {
930 kstrcpy(p->note[p->nnote].msg, n, ERRMAX);
931 p->note[p->nnote++].flag = flag;
938 /* this loop is to avoid lock ordering problems. */
946 /* waiting for a wakeup? */
950 /* try for the second lock */
952 if(p->state != Wakeme || r->p != p)
953 panic("postnote: state %d %d %d", r->p != p, p->r != r, p->state);
961 /* give other process time to get out of critical section and try again */
971 /* Try and pull out of a eqlock */
972 if((q = p->eql) != nil){
974 if(p->state == Queueing && p->eql == q){
977 for(l = nil, d = q->head; d != nil; l = d, d = d->qnext){
986 p->eql = nil; /* not taken */
996 /* Try and pull out of a rendezvous */
998 if(p->state == Rendezvous) {
1001 l = &REND(p->rgrp, p->rendtag);
1002 for(d = *l; d != nil; d = d->rendhash) {
1019 * weird thing: keep at most NBROKEN around
1033 if(broken.n == NBROKEN) {
1035 memmove(&broken.p[0], &broken.p[1], sizeof(Proc*)*(NBROKEN-1));
1038 broken.p[broken.n++] = p;
1053 for(b=0; b < broken.n; b++)
1054 if(broken.p[b] == p) {
1056 memmove(&broken.p[b], &broken.p[b+1],
1057 sizeof(Proc*)*(NBROKEN-(b+1)));
1071 for(i=0; i<n; i++) {
1081 pexit(char *exitstr, int freemem)
1092 void (*pt)(Proc*, int, vlong);
1101 /* nil out all the resources under lock (free later) */
1113 qunlock(&up->debug);
1127 * if not a kernel process and have a parent,
1128 * do some housekeeping.
1130 if(up->kp == 0 && up->parentpid != 0) {
1131 wq = smalloc(sizeof(Waitq));
1132 wq->w.pid = up->pid;
1133 utime = up->time[TUser] + up->time[TCUser];
1134 stime = up->time[TSys] + up->time[TCSys];
1135 wq->w.time[TUser] = tk2ms(utime);
1136 wq->w.time[TSys] = tk2ms(stime);
1137 wq->w.time[TReal] = tk2ms(MACHP(0)->ticks - up->time[TReal]);
1138 if(exitstr != nil && exitstr[0])
1139 snprint(wq->w.msg, sizeof(wq->w.msg), "%s %lud: %s", up->text, up->pid, exitstr);
1141 wq->w.msg[0] = '\0';
1146 * Check that parent is still alive.
1148 if(p->pid == up->parentpid && p->state != Broken) {
1150 p->time[TCUser] += utime;
1151 p->time[TCSys] += stime;
1153 * If there would be more than 128 wait records
1154 * processes for my parent, then don't leave a wait
1155 * record behind. This helps prevent badly written
1156 * daemon processes from accumulating lots of wait
1159 if(p->nwait < 128) {
1160 wq->next = p->waitq;
1171 else if(up->kp == 0 && up->parent == nil){
1173 exitstr = "unknown";
1174 panic("boot process died: %s", exitstr);
1180 qlock(&up->seglock);
1181 es = &up->seg[NSEG];
1182 for(s = up->seg; s < es; s++) {
1188 qunlock(&up->seglock);
1190 lock(&up->exl); /* Prevent my children from leaving waits */
1196 while((wq = up->waitq) != nil){
1197 up->waitq = wq->next;
1201 /* release debuggers */
1203 if(up->pdbg != nil) {
1204 wakeup(&up->pdbg->sleep);
1207 if(up->syscalltrace != nil) {
1208 free(up->syscalltrace);
1209 up->syscalltrace = nil;
1211 qunlock(&up->debug);
1213 /* Sched must not loop for these locks */
1218 up->state = Moribund;
1229 return p->waitq != 0;
1238 if(!canqlock(&up->qwaitr))
1242 qunlock(&up->qwaitr);
1247 while(up->waitq == nil) {
1248 if(up->nchild == 0) {
1253 sleep(&up->waitr, haswaitq, up);
1257 up->waitq = wq->next;
1261 qunlock(&up->qwaitr);
1265 memmove(w, &wq->w, sizeof(Waitmsg));
1274 return &procalloc.arena[i];
1287 if(p->seg[BSEG] != nil)
1288 bss = p->seg[BSEG]->top;
1292 s = statename[p->state];
1293 print("%3lud:%10s pc %#p dbgpc %#p %8s (%s) ut %ld st %ld bss %lux qpc %#p nl %d nd %lud lpc %#p pri %lud\n",
1294 p->pid, p->text, p->pc, dbgpc(p), s, statename[p->state],
1295 p->time[0], p->time[1], bss, p->qpc, p->nlocks, p->delaysched,
1296 p->lastlock ? p->lastlock->pc : 0, p->priority);
1306 print("up %lud\n", up->pid);
1308 print("no current process\n");
1309 for(i=0; i<conf.nproc; i++) {
1310 p = &procalloc.arena[i];
1311 if(p->state == Dead)
1319 * wait till all processes have flushed their mmu
1320 * state about segement s
1323 procflushseg(Segment *s)
1325 int i, ns, nm, nwait;
1329 * tell all processes with this
1330 * segment to flush their mmu's
1333 for(i=0; i<conf.nproc; i++) {
1334 p = &procalloc.arena[i];
1335 if(p->state == Dead)
1337 for(ns = 0; ns < NSEG; ns++)
1338 if(p->seg[ns] == s){
1340 for(nm = 0; nm < conf.nmach; nm++){
1341 if(MACHP(nm)->proc == p){
1342 MACHP(nm)->flushmmu = 1;
1354 * wait for all other processors to take a clock interrupt
1355 * and flush their mmu's
1357 for(nm = 0; nm < conf.nmach; nm++)
1358 while(m->machno != nm && MACHP(nm)->flushmmu)
1368 for(rq = &runq[Nrq-1]; rq >= runq; rq--){
1371 print("rq%ld:", rq-runq);
1372 for(p = rq->head; p != nil; p = p->rnext)
1373 print(" %lud(%lud)", p->pid, m->ticks - p->readytime);
1377 print("nrdy %d\n", nrdy);
1381 kproc(char *name, void (*func)(void *), void *arg)
1392 p->scallnr = up->scallnr;
1395 p->slash = up->slash;
1400 memmove(p->note, up->note, sizeof(p->note));
1401 p->nnote = up->nnote;
1403 p->lastnote = up->lastnote;
1404 p->notify = up->notify;
1408 procpriority(p, PriKproc, 0);
1410 kprocchild(p, func, arg);
1412 kstrdup(&p->user, eve);
1413 kstrdup(&p->text, name);
1419 memset(p->time, 0, sizeof(p->time));
1420 p->time[TReal] = MACHP(0)->ticks;
1425 * called splhi() by notify(). See comment in notify for the
1434 switch(p->procctl) {
1437 pprint("Killed: Insufficient physical memory\n");
1438 pexit("Killed: Insufficient physical memory", 1);
1441 spllo(); /* pexit has locks in it */
1452 p->psstate = "Stopped";
1453 /* free a waiting debugger */
1456 if(p->pdbg != nil) {
1457 wakeup(&p->pdbg->sleep);
1477 assert(up->nerrlab < NERR);
1478 kstrcpy(up->errstr, err, ERRMAX);
1479 setlabel(&up->errlab[NERR-1]);
1486 assert(up->nerrlab > 0);
1487 gotolabel(&up->errlab[--up->nerrlab]);
1491 exhausted(char *resource)
1495 snprint(buf, sizeof buf, "no free %s", resource);
1496 iprint("%s\n", buf);
1510 ep = procalloc.arena+conf.nproc;
1511 for(p = procalloc.arena; p < ep; p++) {
1512 if(p->state == Dead || p->kp || !canqlock(&p->seglock))
1515 for(i=1; i<NSEG; i++) {
1517 if(s == nil || !canqlock(s))
1519 l += (ulong)mcountseg(s);
1522 qunlock(&p->seglock);
1523 if(l > max && ((p->procmode&0222) || strcmp(eve, p->user)!=0)) {
1528 if(kp == nil || !canqlock(&kp->seglock))
1530 print("%lud: %s killed: %s\n", kp->pid, kp->text, why);
1531 for(p = procalloc.arena; p < ep; p++) {
1532 if(p->state == Dead || p->kp)
1534 if(p != kp && p->seg[BSEG] != nil && p->seg[BSEG] == kp->seg[BSEG])
1535 p->procctl = Proc_exitbig;
1537 kp->procctl = Proc_exitbig;
1538 for(i = 0; i < NSEG; i++) {
1540 if(s != nil && canqlock(s)) {
1541 mfreeseg(s, s->base, (s->top - s->base)/BY2PG);
1545 qunlock(&kp->seglock);
1549 * change ownership to 'new' of all processes owned by 'old'. Used when
1553 renameuser(char *old, char *new)
1557 ep = procalloc.arena+conf.nproc;
1558 for(p = procalloc.arena; p < ep; p++)
1559 if(p->user!=nil && strcmp(old, p->user)==0)
1560 kstrdup(&p->user, new);
1564 * time accounting called by clock() splhi'd
1576 p->time[p->insyscall]++;
1579 /* calculate decaying duty cycles */
1581 per = n - m->perf.last;
1583 per = (m->perf.period*(HZ-1) + per)/HZ;
1585 m->perf.period = per;
1587 m->perf.avg_inidle = (m->perf.avg_inidle*(HZ-1)+m->perf.inidle)/HZ;
1590 m->perf.avg_inintr = (m->perf.avg_inintr*(HZ-1)+m->perf.inintr)/HZ;
1593 /* only one processor gets to compute system load averages */
1598 * calculate decaying load average.
1599 * if we decay by (n-1)/n then it takes
1600 * n clock ticks to go from load L to .36 L once
1601 * things quiet down. it takes about 5 n clock
1602 * ticks to go to zero. so using HZ means this is
1603 * approximately the load over the last second,
1604 * with a tail lasting about 5 seconds.
1609 m->load = (m->load*(HZ-1)+n)/HZ;
1615 static int gen, wrapped;
1621 pid = ++gen & 0x7FFFFFFF;
1626 h = pid % nelem(procalloc.ht);
1628 for(x = procalloc.ht[h]; x != nil; x = x->pidhash)
1633 p->pidhash = procalloc.ht[h];
1634 procalloc.ht[h] = p;
1646 h = p->pid % nelem(procalloc.ht);
1648 for(l = &procalloc.ht[h]; *l != nil; l = &(*l)->pidhash)
1657 procindex(ulong pid)
1664 h = pid % nelem(procalloc.ht);
1666 for(p = procalloc.ht[h]; p != nil; p = p->pidhash)
1668 s = p - procalloc.arena;