2 #include "../port/lib.h"
14 pe = PHYSDRAM + soc.dramsize;
15 for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(1))
16 l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
17 | PTEKERNEL | PTESH(SHARE_INNER);
19 for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(2))
20 l1[PTL1X(pa, 2)] = (uintptr)&l1[L1TABLEX(pa, 1)] | PTEVALID | PTETABLE;
22 for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(3))
23 l1[PTL1X(pa, 3)] = (uintptr)&l1[L1TABLEX(pa, 2)] | PTEVALID | PTETABLE;
26 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
27 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
28 | PTEKERNEL | PTESH(SHARE_INNER);
30 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
31 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
33 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
34 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
37 pe = -VIRTIO + soc.physio;
38 for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
39 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
40 | PTEKERNEL | PTESH(SHARE_OUTER) | PTEDEVICE;
42 for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
43 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
45 for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
46 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
50 mmu0clear(uintptr *l1)
54 pe = PHYSDRAM + soc.dramsize;
57 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3)){
58 if(PTL1X(pa, 3) != PTL1X(va, 3))
62 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2)){
63 if(PTL1X(pa, 2) != PTL1X(va, 2))
66 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
67 if(PTL1X(pa, 1) != PTL1X(va, 1))
75 m->mmul1 = mallocalign(L1SIZE+L1TOPSIZE, BY2PG, L1SIZE, 0);
77 panic("mmu1init: no memory for mmul1");
78 memset(m->mmul1, 0, L1SIZE+L1TOPSIZE);
85 if((uintptr)va >= KZERO)
86 return (uintptr)va-KZERO;
87 panic("paddr: va=%#p pc=%#p", va, getcallerpc(&va));
94 if(pa < (uintptr)-KZERO)
102 if(pa < (uintptr)-KZERO)
103 return (void*)(pa + KZERO);
104 panic("kaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
125 mmukmap(uintptr va, uintptr pa, usize size)
132 assert((va % PGLSZ(1)) == 0);
135 pe = (pa + size + (PGLSZ(1)-1)) & -PGLSZ(1);
137 ((uintptr*)L1)[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
138 | PTEKERNEL | PTESH(SHARE_OUTER) | PTEDEVICE;
147 mmuwalk(uintptr va, int level)
153 x = PTLX(va, PTLEVELS-1);
154 table = &m->mmul1[L1TABLEX(va, PTLEVELS-1)];
155 for(i = PTLEVELS-2; i >= level; i--){
158 if(pte & (0xFFFFULL<<48))
159 iprint("strange pte %#p va %#p\n", pte, va);
160 pte &= ~(0xFFFFULL<<48 | BY2PG-1);
167 up->mmufree = pg->next;
170 pg->va = va & -PGLSZ(1);
171 if((pg->next = up->mmul1) == nil)
176 pg->va = va & -PGLSZ(2);
177 if((pg->next = up->mmul2) == nil)
182 memset(KADDR(pg->pa), 0, BY2PG);
184 table[x] = pg->pa | PTEVALID | PTETABLE;
185 table = KADDR(pg->pa);
187 table[x] = PADDR(&m->mmul1[L1TABLEX(va, 2)]) | PTEVALID | PTETABLE;
188 table = &m->mmul1[L1TABLEX(va, 2)];
191 x = PTLX(va, (uintptr)i);
196 static Proc *asidlist[256];
212 a %= nelem(asidlist);
214 continue; // reserved
216 if(x == p || x == nil || (x->asid < 0 && x->mach == nil))
234 if(a > 0 && asidlist[a] == p)
243 * Prevent the following scenario:
244 * pX sleeps on cpuA, leaving its page tables in mmul1
245 * pX wakes up on cpuB, and exits, freeing its page tables
246 * pY on cpuB allocates a freed page table page and overwrites with data
247 * cpuA takes an interrupt, and is now running with bad page tables
248 * In theory this shouldn't hurt because only user address space tables
249 * are affected, and mmuswitch will clear mmul1 before a user process is
250 * dispatched. But empirically it correlates with weird problems, eg
251 * resetting of the core clock at 0x4000001C which confuses local timers.
261 putmmu(uintptr va, uintptr pa, Page *pg)
266 // iprint("cpu%d: putmmu va %#p asid %d proc %lud %s\n", m->machno, va, up->asid, up->pid, up->text);
268 while((pte = mmuwalk(va, 0)) == nil){
270 assert(up->mmufree == nil);
271 up->mmufree = newpage(0, nil, 0);
276 if((old & PTEVALID) != 0)
277 flushasidvall((uvlong)up->asid<<48 | va>>12);
279 flushasidva((uvlong)up->asid<<48 | va>>12);
280 *pte = pa | PTEPAGE | PTEUSER | PTENG | PTEAF | PTESH(SHARE_INNER);
281 if(pg->txtflush & (1UL<<m->machno)){
282 /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
283 cachedwbinvse((void*)KADDR(pg->pa), BY2PG);
284 cacheiinvse((void*)va, BY2PG);
285 pg->txtflush &= ~(1UL<<m->machno);
296 assert(p->mmul2 == nil);
299 p->mmul1tail->next = p->mmufree;
300 p->mmufree = p->mmul1;
301 p->mmul1 = p->mmul1tail = nil;
304 p->mmul2tail->next = p->mmufree;
305 p->mmufree = p->mmul2;
306 p->mmul2 = p->mmul2tail = nil;
316 for(va = UZERO; va < USTKTOP; va += PGLSZ(PTLEVELS-1))
317 m->mmul1[PTL1X(va, PTLEVELS-1)] = 0;
320 setttbr(PADDR(&m->mmul1[L1TABLEX(0, PTLEVELS-1)]));
330 for(t = p->mmul1; t != nil; t = t->next){
332 m->mmul1[PTL1X(va, 1)] = t->pa | PTEVALID | PTETABLE;
335 for(t = p->mmul2; t != nil; t = t->next){
337 m->mmul1[PTL1X(va, 2)] = t->pa | PTEVALID | PTETABLE;
339 m->mmul1[PTL1X(va, 3)] = PADDR(&m->mmul1[L1TABLEX(va, 2)]) |
345 flushasid((uvlong)p->asid<<48);
347 // iprint("cpu%d: mmuswitch asid %d proc %lud %s\n", m->machno, p->asid, p->pid, p->text);
348 setttbr((uvlong)p->asid<<48 | PADDR(&m->mmul1[L1TABLEX(0, PTLEVELS-1)]));
359 if((t = p->mmufree) != nil){
361 p->mmufree = t->next;
363 panic("mmurelease: bad page ref");
365 } while((t = p->mmufree) != nil);
382 checkmmu(uintptr, uintptr)