]> git.lizzy.rs Git - plan9front.git/blob - sys/src/9/port/fault.c
merge
[plan9front.git] / sys / src / 9 / port / fault.c
1 #include        "u.h"
2 #include        "../port/lib.h"
3 #include        "mem.h"
4 #include        "dat.h"
5 #include        "fns.h"
6 #include        "../port/error.h"
7
8 int
9 fault(ulong addr, int read)
10 {
11         Segment *s;
12         char *sps;
13         int pnd;
14
15         if(up == nil)
16                 panic("fault: nil up");
17         if(up->nlocks.ref)
18                 print("fault: nlocks %ld\n", up->nlocks.ref);
19
20         pnd = up->notepending;
21         sps = up->psstate;
22         up->psstate = "Fault";
23
24         m->pfault++;
25         for(;;) {
26                 spllo();
27
28                 s = seg(up, addr, 1);           /* leaves s->lk qlocked if seg != nil */
29                 if(s == 0) {
30                         up->psstate = sps;
31                         return -1;
32                 }
33
34                 if(!read && (s->type&SG_RONLY)) {
35                         qunlock(&s->lk);
36                         up->psstate = sps;
37                         return -1;
38                 }
39
40                 if(fixfault(s, addr, read, 1) == 0)
41                         break;
42
43                 splhi();
44                 switch(up->procctl){
45                 case Proc_exitme:
46                 case Proc_exitbig:
47                         procctl(up);
48                 }
49         }
50
51         up->psstate = sps;
52         up->notepending |= pnd;
53
54         return 0;
55 }
56
57 static void
58 faulterror(char *s, Chan *c, int freemem)
59 {
60         char buf[ERRMAX];
61
62         if(c && c->path){
63                 snprint(buf, sizeof buf, "%s accessing %s: %s", s, c->path->s, up->errstr);
64                 s = buf;
65         }
66         if(up->nerrlab) {
67                 postnote(up, 1, s, NDebug);
68                 error(s);
69         }
70         pexit(s, freemem);
71 }
72
73 void    (*checkaddr)(ulong, Segment *, Page *);
74 ulong   addr2check;
75
76 int
77 fixfault(Segment *s, ulong addr, int read, int doputmmu)
78 {
79         int type;
80         int ref;
81         Pte **p, *etp;
82         ulong mmuphys=0, soff;
83         Page **pg, *lkp, *new;
84         Page *(*fn)(Segment*, ulong);
85
86         addr &= ~(BY2PG-1);
87         soff = addr-s->base;
88         p = &s->map[soff/PTEMAPMEM];
89         if(*p == 0)
90                 *p = ptealloc();
91
92         etp = *p;
93         pg = &etp->pages[(soff&(PTEMAPMEM-1))/BY2PG];
94         type = s->type&SG_TYPE;
95
96         if(pg < etp->first)
97                 etp->first = pg;
98         if(pg > etp->last)
99                 etp->last = pg;
100
101         switch(type) {
102         default:
103                 panic("fault");
104                 break;
105
106         case SG_TEXT:                   /* Demand load */
107                 if(pagedout(*pg))
108                         pio(s, addr, soff, pg);
109
110                 mmuphys = PPN((*pg)->pa) | PTERONLY|PTEVALID;
111                 (*pg)->modref = PG_REF;
112                 break;
113
114         case SG_BSS:
115         case SG_SHARED:                 /* Zero fill on demand */
116         case SG_STACK:
117                 if(*pg == 0) {
118                         new = newpage(1, &s, addr);
119                         if(s == 0)
120                                 return -1;
121
122                         *pg = new;
123                 }
124                 goto common;
125
126         case SG_DATA:
127         common:                 /* Demand load/pagein/copy on write */
128                 if(pagedout(*pg))
129                         pio(s, addr, soff, pg);
130
131                 /*
132                  *  It's only possible to copy on write if
133                  *  we're the only user of the segment.
134                  */
135                 if(read && conf.copymode == 0 && s->ref == 1) {
136                         mmuphys = PPN((*pg)->pa)|PTERONLY|PTEVALID;
137                         (*pg)->modref |= PG_REF;
138                         break;
139                 }
140
141                 lkp = *pg;
142                 lock(lkp);
143                 ref = lkp->ref;
144                 if(ref == 0)
145                         panic("fault %#p ref == 0", lkp);
146                 if(lkp->image == &swapimage)
147                         ref += swapcount(lkp->daddr);
148                 if(ref == 1 && lkp->image) {
149                         /*
150                          * save a copy of the original for the image cache
151                          * and uncache the page. page might temporarily be
152                          * unlocked while trying to acquire palloc lock so
153                          * recheck ref in case it got grabbed.
154                          */
155                         duppage(lkp);
156
157                         ref = lkp->ref;
158                 }
159                 unlock(lkp);
160                 if(ref > 1){
161                         new = newpage(0, &s, addr);
162                         if(s == 0)
163                                 return -1;
164                         *pg = new;
165                         copypage(lkp, *pg);
166                         putpage(lkp);
167                 }
168                 mmuphys = PPN((*pg)->pa) | PTEWRITE | PTEVALID;
169                 (*pg)->modref = PG_MOD|PG_REF;
170                 break;
171
172         case SG_PHYSICAL:
173                 if(*pg == 0) {
174                         fn = s->pseg->pgalloc;
175                         if(fn)
176                                 *pg = (*fn)(s, addr);
177                         else {
178                                 new = smalloc(sizeof(Page));
179                                 new->va = addr;
180                                 new->pa = s->pseg->pa+(addr-s->base);
181                                 new->ref = 1;
182                                 *pg = new;
183                         }
184                 }
185
186                 if (checkaddr && addr == addr2check)
187                         (*checkaddr)(addr, s, *pg);
188                 mmuphys = PPN((*pg)->pa) |PTEWRITE|PTEUNCACHED|PTEVALID;
189                 (*pg)->modref = PG_MOD|PG_REF;
190                 break;
191         }
192         qunlock(&s->lk);
193
194         if(doputmmu)
195                 putmmu(addr, mmuphys, *pg);
196
197         return 0;
198 }
199
200 void
201 pio(Segment *s, ulong addr, ulong soff, Page **p)
202 {
203         Page *new;
204         KMap *k;
205         Chan *c;
206         int n, ask;
207         char *kaddr;
208         ulong daddr;
209         Page *loadrec;
210
211 retry:
212         loadrec = *p;
213         if(loadrec == 0) {      /* from a text/data image */
214                 daddr = s->fstart+soff;
215                 new = lookpage(s->image, daddr);
216                 if(new != nil) {
217                         *p = new;
218                         return;
219                 }
220
221                 c = s->image->c;
222                 ask = s->flen-soff;
223                 if(ask > BY2PG)
224                         ask = BY2PG;
225         }
226         else {                  /* from a swap image */
227                 daddr = swapaddr(loadrec);
228                 new = lookpage(&swapimage, daddr);
229                 if(new != nil) {
230                         putswap(loadrec);
231                         *p = new;
232                         return;
233                 }
234
235                 c = swapimage.c;
236                 ask = BY2PG;
237         }
238         qunlock(&s->lk);
239
240         new = newpage(0, 0, addr);
241         k = kmap(new);
242         kaddr = (char*)VA(k);
243
244         while(waserror()) {
245                 if(strcmp(up->errstr, Eintr) == 0)
246                         continue;
247                 kunmap(k);
248                 putpage(new);
249                 faulterror(Eioload, c, 0);
250         }
251         n = devtab[c->type]->read(c, kaddr, ask, daddr);
252         if(n != ask)
253                 error(Eioload);
254         if(ask < BY2PG)
255                 memset(kaddr+ask, 0, BY2PG-ask);
256
257         poperror();
258         kunmap(k);
259         qlock(&s->lk);
260         if(loadrec == 0) {      /* This is demand load */
261                 /*
262                  *  race, another proc may have gotten here first while
263                  *  s->lk was unlocked
264                  */
265                 if(*p == 0) { 
266                         new->daddr = daddr;
267                         cachepage(new, s->image);
268                         *p = new;
269                 }
270                 else
271                         putpage(new);
272         }
273         else {                  /* This is paged out */
274                 /*
275                  *  race, another proc may have gotten here first
276                  *  (and the pager may have run on that page) while
277                  *  s->lk was unlocked
278                  */
279                 if(*p != loadrec){
280                         if(!pagedout(*p)){
281                                 /* another process did it for me */
282                                 putpage(new);
283                                 goto done;
284                         } else if(*p) {
285                                 /* another process and the pager got in */
286                                 putpage(new);
287                                 goto retry;
288                         } else {
289                                 /* another process segfreed the page */
290                                 k = kmap(new);
291                                 memset((void*)VA(k), 0, ask);
292                                 kunmap(k);
293                                 *p = new;
294                                 goto done;
295                         }
296                 }
297
298                 new->daddr = daddr;
299                 cachepage(new, &swapimage);
300                 *p = new;
301                 putswap(loadrec);
302         }
303
304 done:
305         if(s->flushme)
306                 memset((*p)->cachectl, PG_TXTFLUSH, sizeof((*p)->cachectl));
307 }
308
309 /*
310  * Called only in a system call
311  */
312 int
313 okaddr(ulong addr, ulong len, int write)
314 {
315         Segment *s;
316
317         if((long)len >= 0) {
318                 for(;;) {
319                         s = seg(up, addr, 0);
320                         if(s == 0 || (write && (s->type&SG_RONLY)))
321                                 break;
322
323                         if(addr+len > s->top) {
324                                 len -= s->top - addr;
325                                 addr = s->top;
326                                 continue;
327                         }
328                         return 1;
329                 }
330         }
331         pprint("suicide: invalid address %#lux/%lud in sys call pc=%#lux\n", addr, len, userpc());
332         return 0;
333 }
334
335 void
336 validaddr(ulong addr, ulong len, int write)
337 {
338         if(!okaddr(addr, len, write)){
339                 postnote(up, 1, "sys: bad address in syscall", NDebug);
340                 error(Ebadarg);
341         }
342 }
343
344 /*
345  * &s[0] is known to be a valid address.
346  */
347 void*
348 vmemchr(void *s, int c, int n)
349 {
350         int m;
351         ulong a;
352         void *t;
353
354         a = (ulong)s;
355         while(PGROUND(a) != PGROUND(a+n-1)){
356                 /* spans pages; handle this page */
357                 m = BY2PG - (a & (BY2PG-1));
358                 t = memchr((void*)a, c, m);
359                 if(t)
360                         return t;
361                 a += m;
362                 n -= m;
363                 if(a < KZERO)
364                         validaddr(a, 1, 0);
365         }
366
367         /* fits in one page */
368         return memchr((void*)a, c, n);
369 }
370
371 Segment*
372 seg(Proc *p, ulong addr, int dolock)
373 {
374         Segment **s, **et, *n;
375
376         et = &p->seg[NSEG];
377         for(s = p->seg; s < et; s++) {
378                 n = *s;
379                 if(n == 0)
380                         continue;
381                 if(addr >= n->base && addr < n->top) {
382                         if(dolock == 0)
383                                 return n;
384
385                         qlock(&n->lk);
386                         if(addr >= n->base && addr < n->top)
387                                 return n;
388                         qunlock(&n->lk);
389                 }
390         }
391
392         return 0;
393 }
394
395 extern void checkmmu(ulong, ulong);
396 void
397 checkpages(void)
398 {
399         int checked;
400         ulong addr, off;
401         Pte *p;
402         Page *pg;
403         Segment **sp, **ep, *s;
404         
405         if(up == nil)
406                 return;
407
408         checked = 0;
409         for(sp=up->seg, ep=&up->seg[NSEG]; sp<ep; sp++){
410                 s = *sp;
411                 if(s == nil)
412                         continue;
413                 qlock(&s->lk);
414                 for(addr=s->base; addr<s->top; addr+=BY2PG){
415                         off = addr - s->base;
416                         p = s->map[off/PTEMAPMEM];
417                         if(p == 0)
418                                 continue;
419                         pg = p->pages[(off&(PTEMAPMEM-1))/BY2PG];
420                         if(pg == 0 || pagedout(pg))
421                                 continue;
422                         checkmmu(addr, pg->pa);
423                         checked++;
424                 }
425                 qunlock(&s->lk);
426         }
427         print("%ld %s: checked %d page table entries\n", up->pid, up->text, checked);
428 }