2 #include "../port/lib.h"
14 pe = PHYSDRAM + soc.dramsize;
15 for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(1))
16 l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
17 | PTEKERNEL | PTESH(SHARE_INNER);
19 for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(2))
20 l1[PTL1X(pa, 2)] = (uintptr)&l1[L1TABLEX(pa, 1)] | PTEVALID | PTETABLE;
22 for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(3))
23 l1[PTL1X(pa, 3)] = (uintptr)&l1[L1TABLEX(pa, 2)] | PTEVALID | PTETABLE;
26 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
27 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
28 | PTEKERNEL | PTESH(SHARE_INNER);
30 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
31 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
33 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
34 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
37 pe = -VIRTIO + soc.physio;
38 for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
39 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
40 | PTEKERNEL | PTESH(SHARE_OUTER) | PTEDEVICE;
42 for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
43 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
45 for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
46 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
50 mmu0clear(uintptr *l1)
54 pe = PHYSDRAM + soc.dramsize;
57 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3)){
58 if(PTL1X(pa, 3) != PTL1X(va, 3))
62 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2)){
63 if(PTL1X(pa, 2) != PTL1X(va, 2))
66 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
67 if(PTL1X(pa, 1) != PTL1X(va, 1))
75 m->mmul1 = mallocalign(L1SIZE+L1TOPSIZE, BY2PG, L1SIZE, 0);
77 panic("mmu1init: no memory for mmul1");
78 memset(m->mmul1, 0, L1SIZE+L1TOPSIZE);
85 if((uintptr)va >= KZERO)
86 return (uintptr)va-KZERO;
87 panic("paddr: va=%#p pc=%#p", va, getcallerpc(&va));
94 if(pa < (uintptr)-KZERO)
102 if(pa < (uintptr)-KZERO)
103 return (void*)(pa + KZERO);
104 panic("kaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
125 mmukmap(uintptr va, uintptr pa, usize size)
127 uintptr a, pe, off, attr;
132 attr = va & PTEMA(7);
136 pe = (pa + size + (PGLSZ(1)-1)) & -PGLSZ(1);
138 ((uintptr*)L1)[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
139 | PTEKERNEL | PTESH(SHARE_OUTER) | attr;
148 mmuwalk(uintptr va, int level)
154 x = PTLX(va, PTLEVELS-1);
155 table = &m->mmul1[L1TABLEX(va, PTLEVELS-1)];
156 for(i = PTLEVELS-2; i >= level; i--){
159 if(pte & (0xFFFFULL<<48))
160 iprint("strange pte %#p va %#p\n", pte, va);
161 pte &= ~(0xFFFFULL<<48 | BY2PG-1);
168 up->mmufree = pg->next;
171 pg->va = va & -PGLSZ(1);
172 if((pg->next = up->mmul1) == nil)
177 pg->va = va & -PGLSZ(2);
178 if((pg->next = up->mmul2) == nil)
183 memset(KADDR(pg->pa), 0, BY2PG);
185 table[x] = pg->pa | PTEVALID | PTETABLE;
186 table = KADDR(pg->pa);
188 table[x] = PADDR(&m->mmul1[L1TABLEX(va, 2)]) | PTEVALID | PTETABLE;
189 table = &m->mmul1[L1TABLEX(va, 2)];
192 x = PTLX(va, (uintptr)i);
197 static Proc *asidlist[256];
213 a %= nelem(asidlist);
215 continue; // reserved
217 if(x == p || x == nil || (x->asid < 0 && x->mach == nil))
235 if(a > 0 && asidlist[a] == p)
244 * Prevent the following scenario:
245 * pX sleeps on cpuA, leaving its page tables in mmul1
246 * pX wakes up on cpuB, and exits, freeing its page tables
247 * pY on cpuB allocates a freed page table page and overwrites with data
248 * cpuA takes an interrupt, and is now running with bad page tables
249 * In theory this shouldn't hurt because only user address space tables
250 * are affected, and mmuswitch will clear mmul1 before a user process is
251 * dispatched. But empirically it correlates with weird problems, eg
252 * resetting of the core clock at 0x4000001C which confuses local timers.
262 putmmu(uintptr va, uintptr pa, Page *pg)
267 // iprint("cpu%d: putmmu va %#p asid %d proc %lud %s\n", m->machno, va, up->asid, up->pid, up->text);
269 while((pte = mmuwalk(va, 0)) == nil){
271 assert(up->mmufree == nil);
272 up->mmufree = newpage(0, nil, 0);
277 if((old & PTEVALID) != 0)
278 flushasidvall((uvlong)up->asid<<48 | va>>12);
280 flushasidva((uvlong)up->asid<<48 | va>>12);
281 *pte = pa | PTEPAGE | PTEUSER | PTENG | PTEAF | PTESH(SHARE_INNER);
282 if(pg->txtflush & (1UL<<m->machno)){
283 /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
284 cachedwbinvse((void*)KADDR(pg->pa), BY2PG);
285 cacheiinvse((void*)va, BY2PG);
286 pg->txtflush &= ~(1UL<<m->machno);
297 assert(p->mmul2 == nil);
300 p->mmul1tail->next = p->mmufree;
301 p->mmufree = p->mmul1;
302 p->mmul1 = p->mmul1tail = nil;
305 p->mmul2tail->next = p->mmufree;
306 p->mmufree = p->mmul2;
307 p->mmul2 = p->mmul2tail = nil;
317 for(va = UZERO; va < USTKTOP; va += PGLSZ(PTLEVELS-1))
318 m->mmul1[PTL1X(va, PTLEVELS-1)] = 0;
321 setttbr(PADDR(&m->mmul1[L1TABLEX(0, PTLEVELS-1)]));
331 for(t = p->mmul1; t != nil; t = t->next){
333 m->mmul1[PTL1X(va, 1)] = t->pa | PTEVALID | PTETABLE;
336 for(t = p->mmul2; t != nil; t = t->next){
338 m->mmul1[PTL1X(va, 2)] = t->pa | PTEVALID | PTETABLE;
340 m->mmul1[PTL1X(va, 3)] = PADDR(&m->mmul1[L1TABLEX(va, 2)]) |
346 flushasid((uvlong)p->asid<<48);
348 // iprint("cpu%d: mmuswitch asid %d proc %lud %s\n", m->machno, p->asid, p->pid, p->text);
349 setttbr((uvlong)p->asid<<48 | PADDR(&m->mmul1[L1TABLEX(0, PTLEVELS-1)]));
360 if((t = p->mmufree) != nil){
362 p->mmufree = t->next;
364 panic("mmurelease: bad page ref");
366 } while((t = p->mmufree) != nil);
383 checkmmu(uintptr, uintptr)