3 #include "../port/lib.h"
7 #include "../port/error.h"
12 int shargs(char*, int, char**);
14 extern void checkpages(void);
15 extern void checkpagerefs(void);
29 pexit("fork aborted", 1);
33 sysrfork(va_list list)
44 flag = va_arg(list, ulong);
45 /* Check flags before we commit */
46 if((flag & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
48 if((flag & (RFNAMEG|RFCNAMEG)) == (RFNAMEG|RFCNAMEG))
50 if((flag & (RFENVG|RFCENVG)) == (RFENVG|RFCENVG))
53 if((flag&RFPROC) == 0) {
54 if(flag & (RFMEM|RFNOWAIT))
56 if(flag & (RFFDG|RFCFDG)) {
59 up->fgrp = dupfgrp(ofg);
61 up->fgrp = dupfgrp(nil);
64 if(flag & (RFNAMEG|RFCNAMEG)) {
68 pgrpcpy(up->pgrp, opg);
69 /* inherit noattach */
70 up->pgrp->noattach = opg->noattach;
74 up->pgrp->noattach = 1;
80 if(flag & (RFENVG|RFCENVG)) {
82 up->egrp = smalloc(sizeof(Egrp));
85 envcpy(up->egrp, oeg);
89 up->noteid = pidalloc(0);
95 p->scallnr = up->scallnr;
102 memmove(p->note, up->note, sizeof(p->note));
103 p->privatemem = up->privatemem;
104 p->noswap = up->noswap;
105 p->nnote = up->nnote;
107 p->lastnote = up->lastnote;
108 p->notify = up->notify;
112 /* Abort the child process on error */
115 kprocchild(p, abortion, 0);
120 /* Make a new set of memory segments */
124 qunlock(&p->seglock);
127 for(i = 0; i < NSEG; i++)
128 if(up->seg[i] != nil)
129 p->seg[i] = dupseg(up->seg, i, n);
130 qunlock(&p->seglock);
133 /* File descriptors */
134 if(flag & (RFFDG|RFCFDG)) {
136 p->fgrp = dupfgrp(up->fgrp);
138 p->fgrp = dupfgrp(nil);
146 if(flag & (RFNAMEG|RFCNAMEG)) {
149 pgrpcpy(p->pgrp, up->pgrp);
150 /* inherit noattach */
151 p->pgrp->noattach = up->pgrp->noattach;
158 p->pgrp->noattach = 1;
167 /* Environment group */
168 if(flag & (RFENVG|RFCENVG)) {
169 p->egrp = smalloc(sizeof(Egrp));
172 envcpy(p->egrp, up->egrp);
179 p->procmode = up->procmode;
180 if(up->procctl == Proc_tracesyscall)
181 p->procctl = Proc_tracesyscall;
183 poperror(); /* abortion */
185 /* Craft a return frame which will cause the child to pop out of
186 * the scheduler in user mode with the return register zero
188 forkchild(p, up->dbgreg);
191 if((flag&RFNOWAIT) == 0){
192 p->parentpid = up->pid;
197 if((flag&RFNOTEG) == 0)
198 p->noteid = up->noteid;
201 memset(p->time, 0, sizeof(p->time));
202 p->time[TReal] = MACHP(0)->ticks;
204 kstrdup(&p->text, up->text);
205 kstrdup(&p->user, up->user);
210 * since the bss/data segments are now shareable,
211 * any mmu info about this process is now stale
212 * (i.e. has bad properties) and has to be discarded.
215 p->basepri = up->basepri;
216 p->priority = up->basepri;
217 p->fixedpri = up->fixedpri;
221 procwired(p, wm->machno);
233 return (cp[0]<<24) | (cp[1]<<16) | (cp[2]<<8) | cp[3];
237 sysexec(va_list list)
242 char **argv, **argp, **argp0;
243 char *a, *charp, *args, *file, *file0;
244 char *progarg[sizeof(Exec)/2+1], *elem, progelem[64];
245 ulong magic, ssize, nargs, nbytes, n;
246 uintptr t, d, b, entry, bssend, text, data, bss, tstk, align;
249 char line[sizeof(Exec)];
255 file0 = va_arg(list, char*);
256 validaddr((uintptr)file0, 1, 0);
257 argp0 = va_arg(list, char**);
258 file0 = validnamedup(file0, 1);
263 /* Disaster after commit */
265 pexit(up->errstr, 1);
272 tc = namec(file, Aopen, OEXEC, 0);
278 kstrdup(&elem, up->genbuf);
280 n = devtab[tc->type]->read(tc, &exec, sizeof(Exec), 0);
283 if(n == sizeof(Exec) && (magic = l2be(exec.magic)) == AOUT_MAGIC){
284 text = l2be(exec.text);
285 entry = l2be(exec.entry);
289 align = 0x200000; /* 2MB segment alignment for amd64 */
292 align = 0x4000; /* MIPS has 16K page alignment */
295 if(text >= (USTKTOP-USTKSIZE)-(UTZERO+sizeof(Exec))
296 || entry < UTZERO+sizeof(Exec)
297 || entry >= UTZERO+sizeof(Exec)+text)
299 break; /* for binary */
303 * Process #! /bin/sh args ...
305 memmove(line, &exec, n);
306 if(indir || line[0]!='#' || line[1]!='!')
308 n = shargs(line, n, progarg);
313 * First arg becomes complete file name
319 if(strlen(elem) >= sizeof progelem)
321 strcpy(progelem, elem);
322 progarg[0] = progelem;
327 data = l2be(exec.data);
328 bss = l2be(exec.bss);
330 t = (UTZERO+sizeof(Exec)+text+align) & ~align;
332 d = (t + data + align) & ~align;
333 bssend = t + data + bss;
334 b = (bssend + align) & ~align;
335 if(t >= (USTKTOP-USTKSIZE) || d >= (USTKTOP-USTKSIZE) || b >= (USTKTOP-USTKSIZE))
339 * Args: pass 1: count
341 nbytes = sizeof(Tos); /* hole for profiling clock at top of stack (and more) */
347 nbytes += strlen(a) + 1;
352 evenaddr((uintptr)argp);
353 validaddr((uintptr)argp, BY2WD, 0);
356 if(((uintptr)argp&(BY2PG-1)) < BY2WD)
357 validaddr((uintptr)argp, BY2WD, 0);
358 validaddr((uintptr)a, 1, 0);
359 nbytes += ((char*)vmemchr(a, 0, 0x7FFFFFFF) - a) + 1;
362 ssize = BY2WD*(nargs+1) + ((nbytes+(BY2WD-1)) & ~(BY2WD-1));
365 * 8-byte align SP for those (e.g. sparc) that need it.
366 * execregs() will subtract another 4 bytes for argc.
368 if(BY2WD == 4 && (ssize+4) & 7)
371 if(PGROUND(ssize) >= USTKSIZE)
375 * Build the stack segment, putting it in kernel virtual for the moment
379 qunlock(&up->seglock);
388 } while((s = isoverlap(up, tstk-USTKSIZE, USTKSIZE)) != nil);
389 up->seg[ESEG] = newseg(SG_STACK, tstk-USTKSIZE, USTKSIZE/BY2PG);
392 * Args: pass 2: assemble; the pages will be faulted in
394 tos = (Tos*)(tstk - sizeof(Tos));
395 tos->cyclefreq = m->cyclefreq;
400 argv = (char**)(tstk - ssize);
401 charp = (char*)(tstk - nbytes);
408 for(i=0; i<nargs; i++){
409 if(indir && *argp==nil) {
413 *argv++ = charp + (USTKTOP-tstk);
414 n = strlen(*argp) + 1;
415 memmove(charp, *argp++, n);
419 /* copy args; easiest from new process's stack */
421 if(n > 128) /* don't waste too much space on huge arg lists */
425 if(n>0 && args[n-1]!='\0'){
426 /* make sure last arg is NUL-terminated */
427 /* put NUL at UTF-8 character boundary */
429 if(fullrune(args+i, n-i))
438 * Special segments are maintained across exec
440 for(i = SSEG; i <= BSEG; i++) {
442 /* prevent a second free if we have an error */
445 for(i = ESEG+1; i < NSEG; i++) {
447 if(s != nil && (s->type&SG_CEXEC) != 0) {
456 if((f = up->fgrp) != nil) {
457 for(i=0; i<=f->maxfd; i++)
461 /* Text. Shared. Attaches to cache image if possible */
462 /* attachimage returns a locked cache image */
463 img = attachimage(SG_TEXT|SG_RONLY, tc, UTZERO, (t-UTZERO)>>PGSHIFT);
468 ts->flen = sizeof(Exec)+text;
472 s = newseg(SG_DATA, t, (d-t)>>PGSHIFT);
475 /* Attached by hand */
478 s->fstart = ts->fstart+ts->flen;
481 /* BSS. Zero fill on demand */
482 up->seg[BSEG] = newseg(SG_BSS, d, (b-d)>>PGSHIFT);
489 s->base = USTKTOP-USTKSIZE;
491 relocateseg(s, USTKTOP-tstk);
493 qunlock(&up->seglock);
494 poperror(); /* seglock */
497 * '/' processes are higher priority (hack to make /ip more responsive).
499 if(devtab[tc->type]->dc == L'/')
500 up->basepri = PriRoot;
501 up->priority = up->basepri;
504 poperror(); /* file0 */
524 * At this point, the mmu contains info about the old address
525 * space and needs to be flushed
530 up->procctl = Proc_stopme;
531 return execregs(entry, ssize, nargs);
535 shargs(char *s, int n, char **ap)
540 n -= 2; /* skip #! */
551 while(*s==' ' || *s=='\t')
556 while(*s && *s!=' ' && *s!='\t')
573 syssleep(va_list list)
577 ms = va_arg(list, long);
579 if (up->edf != nil && (up->edf->flags & Admitted))
586 tsleep(&up->sleep, return0, 0, ms);
592 sysalarm(va_list list)
594 return procalarm(va_arg(list, ulong));
599 sysexits(va_list list)
602 char *inval = "invalid exit string";
605 status = va_arg(list, char*);
610 validaddr((uintptr)status, 1, 0);
611 if(vmemchr(status, 0, ERRMAX) == 0){
612 memmove(buf, status, ERRMAX);
621 return 0; /* not reached */
625 sys_wait(va_list list)
631 ow = va_arg(list, OWaitmsg*);
635 validaddr((uintptr)ow, sizeof(OWaitmsg), 1);
636 evenaddr((uintptr)ow);
640 readnum(0, ow->pid, NUMSIZE, w.pid, NUMSIZE);
641 readnum(0, ow->time+TUser*NUMSIZE, NUMSIZE, w.time[TUser], NUMSIZE);
642 readnum(0, ow->time+TSys*NUMSIZE, NUMSIZE, w.time[TSys], NUMSIZE);
643 readnum(0, ow->time+TReal*NUMSIZE, NUMSIZE, w.time[TReal], NUMSIZE);
644 strncpy(ow->msg, w.msg, sizeof(ow->msg)-1);
645 ow->msg[sizeof(ow->msg)-1] = '\0';
651 sysawait(va_list list)
657 p = va_arg(list, char*);
658 n = va_arg(list, uint);
659 validaddr((uintptr)p, n, 1);
661 return (uintptr)snprint(p, n, "%d %lud %lud %lud %q",
663 w.time[TUser], w.time[TSys], w.time[TReal],
668 werrstr(char *fmt, ...)
676 vseprint(up->syserrstr, up->syserrstr+ERRMAX, fmt, va);
681 generrstr(char *buf, uint nbuf)
687 validaddr((uintptr)buf, nbuf, 1);
688 if(nbuf > sizeof tmp)
690 memmove(tmp, buf, nbuf);
692 /* make sure it's NUL-terminated */
694 memmove(buf, up->syserrstr, nbuf);
696 memmove(up->syserrstr, tmp, nbuf);
701 syserrstr(va_list list)
706 buf = va_arg(list, char*);
707 len = va_arg(list, uint);
708 return (uintptr)generrstr(buf, len);
711 /* compatibility for old binaries */
713 sys_errstr(va_list list)
715 return (uintptr)generrstr(va_arg(list, char*), 64);
719 sysnotify(va_list list)
721 int (*f)(void*, char*);
722 f = va_arg(list, void*);
724 validaddr((uintptr)f, sizeof(void*), 0);
730 sysnoted(va_list list)
732 if(va_arg(list, int) != NRSTR && !up->notified)
738 syssegbrk(va_list list)
744 addr = va_arg(list, uintptr);
745 for(i = 0; i < NSEG; i++) {
747 if(s == nil || addr < s->base || addr >= s->top)
749 switch(s->type&SG_TYPE) {
757 return (uintptr)ibrk(va_arg(list, uintptr), i);
761 return 0; /* not reached */
765 syssegattach(va_list list)
772 attr = va_arg(list, ulong);
773 name = va_arg(list, char*);
774 va = va_arg(list, uintptr);
775 len = va_arg(list, ulong);
776 return segattach(up, attr, name, va, len);
780 syssegdetach(va_list list)
786 addr = va_arg(list, uintptr);
790 qunlock(&up->seglock);
795 for(i = 0; i < NSEG; i++)
796 if((s = up->seg[i]) != nil) {
798 if((addr >= s->base && addr < s->top) ||
799 (s->top == s->base && addr == s->base))
808 * Check we are not detaching the initial stack segment.
810 if(s == up->seg[SSEG]){
817 qunlock(&up->seglock);
820 /* Ensure we flush any entries from the lost segment */
826 syssegfree(va_list list)
831 from = va_arg(list, uintptr);
832 to = va_arg(list, ulong);
836 s = seg(up, from, 1);
840 from = PGROUND(from);
849 mfreeseg(s, from, (to - from) / BY2PG);
855 /* For binary compatibility */
857 sysbrk_(va_list list)
859 return (uintptr)ibrk(va_arg(list, uintptr), BSEG);
863 sysrendezvous(va_list list)
865 uintptr tag, val, new;
868 tag = va_arg(list, uintptr);
869 new = va_arg(list, uintptr);
870 l = &REND(up->rgrp, tag);
873 for(p = *l; p != nil; p = p->rendhash) {
874 if(p->rendtag == tag) {
887 /* Going to sleep here */
892 up->state = Rendezvous;
901 * The implementation of semaphores is complicated by needing
902 * to avoid rescheduling in syssemrelease, so that it is safe
903 * to call from real-time processes. This means syssemrelease
904 * cannot acquire any qlocks, only spin locks.
906 * Semacquire and semrelease must both manipulate the semaphore
907 * wait list. Lock-free linked lists only exist in theory, not
908 * in practice, so the wait list is protected by a spin lock.
910 * The semaphore value *addr is stored in user memory, so it
911 * cannot be read or written while holding spin locks.
913 * Thus, we can access the list only when holding the lock, and
914 * we can access the semaphore only when not holding the lock.
915 * This makes things interesting. Note that sleep's condition function
916 * is called while holding two locks - r and up->rlock - so it cannot
917 * access the semaphore value either.
919 * An acquirer announces its intention to try for the semaphore
920 * by putting a Sema structure onto the wait list and then
921 * setting Sema.waiting. After one last check of semaphore,
922 * the acquirer sleeps until Sema.waiting==0. A releaser of n
923 * must wake up n acquirers who have Sema.waiting set. It does
924 * this by clearing Sema.waiting and then calling wakeup.
926 * There are three interesting races here.
928 * The first is that in this particular sleep/wakeup usage, a single
929 * wakeup can rouse a process from two consecutive sleeps!
932 * (a) set Sema.waiting = 1
934 * (b) set Sema.waiting = 0
935 * (a) check Sema.waiting inside sleep, return w/o sleeping
936 * (a) try for semaphore, fail
937 * (a) set Sema.waiting = 1
942 * This is okay - semacquire will just go around the loop
943 * again. It does mean that at the top of the for(;;) loop in
944 * semacquire, phore.waiting might already be set to 1.
946 * The second is that a releaser might wake an acquirer who is
947 * interrupted before he can acquire the lock. Since
948 * release(n) issues only n wakeup calls -- only n can be used
949 * anyway -- if the interrupted process is not going to use his
950 * wakeup call he must pass it on to another acquirer.
952 * The third race is similar to the second but more subtle. An
953 * acquirer sets waiting=1 and then does a final canacquire()
954 * before going to sleep. The opposite order would result in
955 * missing wakeups that happen between canacquire and
956 * waiting=1. (In fact, the whole point of Sema.waiting is to
957 * avoid missing wakeups between canacquire() and sleep().) But
958 * there can be spurious wakeups between a successful
959 * canacquire() and the following semdequeue(). This wakeup is
960 * not useful to the acquirer, since he has already acquired
961 * the semaphore. Like in the previous case, though, the
962 * acquirer must pass the wakeup call along.
964 * This is all rather subtle. The code below has been verified
965 * with the spin model /sys/src/9/port/semaphore.p. The
966 * original code anticipated the second race but not the first
967 * or third, which were caught only with spin. The first race
968 * is mentioned in /sys/doc/sleep.ps, but I'd forgotten about it.
969 * It was lucky that my abstract model of sleep/wakeup still managed
970 * to preserve that behavior.
972 * I remain slightly concerned about memory coherence
973 * outside of locks. The spin model does not take
974 * queued processor writes into account so we have to
975 * think hard. The only variables accessed outside locks
976 * are the semaphore value itself and the boolean flag
977 * Sema.waiting. The value is only accessed with cmpswap,
978 * whose job description includes doing the right thing as
979 * far as memory coherence across processors. That leaves
980 * Sema.waiting. To handle it, we call coherence() before each
981 * read and after each write. - rsc
984 /* Add semaphore p with addr a to list in seg. */
986 semqueue(Segment *s, long *a, Sema *p)
988 memset(p, 0, sizeof *p);
990 lock(&s->sema); /* uses s->sema.Rendez.Lock, but no one else is */
992 p->prev = s->sema.prev;
998 /* Remove semaphore p from list in seg. */
1000 semdequeue(Segment *s, Sema *p)
1003 p->next->prev = p->prev;
1004 p->prev->next = p->next;
1008 /* Wake up n waiters with addr a on list in seg. */
1010 semwakeup(Segment *s, long *a, long n)
1015 for(p=s->sema.next; p!=&s->sema && n>0; p=p->next){
1016 if(p->addr == a && p->waiting){
1026 /* Add delta to semaphore and wake up waiters as appropriate. */
1028 semrelease(Segment *s, long *addr, long delta)
1034 while(!cmpswap(addr, value, value+delta));
1035 semwakeup(s, addr, delta);
1039 /* Try to acquire semaphore using compare-and-swap */
1041 canacquire(long *addr)
1045 while((value=*addr) > 0)
1046 if(cmpswap(addr, value, value-1))
1051 /* Should we wake up? */
1056 return !((Sema*)p)->waiting;
1059 /* Acquire semaphore (subtract 1). */
1061 semacquire(Segment *s, long *addr, int block)
1066 if(canacquire(addr))
1072 semqueue(s, addr, &phore);
1076 if(canacquire(addr)){
1082 sleep(&phore, semawoke, &phore);
1085 semdequeue(s, &phore);
1086 coherence(); /* not strictly necessary due to lock in semdequeue */
1088 semwakeup(s, addr, 1);
1094 /* Acquire semaphore or time-out */
1096 tsemacquire(Segment *s, long *addr, ulong ms)
1098 int acquired, timedout;
1102 if(canacquire(addr))
1106 acquired = timedout = 0;
1107 semqueue(s, addr, &phore);
1111 if(canacquire(addr)){
1118 tsleep(&phore, semawoke, &phore, ms);
1119 elms = TK2MS(m->ticks - t);
1127 semdequeue(s, &phore);
1128 coherence(); /* not strictly necessary due to lock in semdequeue */
1130 semwakeup(s, addr, 1);
1139 syssemacquire(va_list list)
1145 addr = va_arg(list, long*);
1146 block = va_arg(list, int);
1147 evenaddr((uintptr)addr);
1148 s = seg(up, (uintptr)addr, 0);
1149 if(s == nil || (s->type&SG_RONLY) != 0 || (uintptr)addr+sizeof(long) > s->top){
1150 validaddr((uintptr)addr, sizeof(long), 1);
1155 return (uintptr)semacquire(s, addr, block);
1159 systsemacquire(va_list list)
1165 addr = va_arg(list, long*);
1166 ms = va_arg(list, ulong);
1167 evenaddr((uintptr)addr);
1168 s = seg(up, (uintptr)addr, 0);
1169 if(s == nil || (s->type&SG_RONLY) != 0 || (uintptr)addr+sizeof(long) > s->top){
1170 validaddr((uintptr)addr, sizeof(long), 1);
1175 return (uintptr)tsemacquire(s, addr, ms);
1179 syssemrelease(va_list list)
1184 addr = va_arg(list, long*);
1185 delta = va_arg(list, long);
1186 evenaddr((uintptr)addr);
1187 s = seg(up, (uintptr)addr, 0);
1188 if(s == nil || (s->type&SG_RONLY) != 0 || (uintptr)addr+sizeof(long) > s->top){
1189 validaddr((uintptr)addr, sizeof(long), 1);
1192 /* delta == 0 is a no-op, not a release */
1193 if(delta < 0 || *addr < 0)
1195 return (uintptr)semrelease(s, addr, delta);
1198 /* For binary compatibility */
1200 sys_nsec(va_list list)
1204 /* return in register on 64bit machine */
1205 if(sizeof(uintptr) == sizeof(vlong)){
1207 return (uintptr)todget(nil);
1210 v = va_arg(list, vlong*);
1211 evenaddr((uintptr)v);
1212 validaddr((uintptr)v, sizeof(vlong), 1);