2 #include "../port/lib.h"
11 uintptr va, pa, pe, attr;
14 attr = PTEWRITE | PTEAF | PTEKERNEL | PTESH(SHARE_INNER);
15 pe = PHYSDRAM + soc.dramsize;
16 if(pe > (uintptr)-KZERO)
18 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
19 if(pe - pa < PGLSZ(1)){
20 l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
21 l1[PTL1X(pa, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
22 for(; pa < pe; pa += PGLSZ(0), va += PGLSZ(0))
23 l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
26 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
27 l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | attr;
29 pe = (uintptr)-KZERO; /* populate top levels for mmukmap() */
31 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2)){
32 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
33 l1[PTL1X(pa, 2)] = (uintptr)&l1[L1TABLEX(pa, 1)] | PTEVALID | PTETABLE;
36 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3)){
37 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
38 l1[PTL1X(pa, 3)] = (uintptr)&l1[L1TABLEX(pa, 2)] | PTEVALID | PTETABLE;
42 attr = PTEWRITE | PTEAF | PTEKERNEL | PTESH(SHARE_OUTER) | PTEDEVICE;
43 pe = soc.physio + IOSIZE;
44 for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
45 if(pe - pa < PGLSZ(1)){
46 l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
47 for(; pa < pe; pa += PGLSZ(0), va += PGLSZ(0))
48 l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
51 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
54 for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
55 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
57 for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
58 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
61 pe = soc.armlocal + MB;
62 for(pa = soc.armlocal, va = ARMLOCAL; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
63 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
65 for(pa = soc.armlocal, va = ARMLOCAL; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
66 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
68 for(pa = soc.armlocal, va = ARMLOCAL; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
69 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
73 mmu0clear(uintptr *l1)
77 pe = PHYSDRAM + soc.dramsize;
78 if(pe > (uintptr)-KZERO)
80 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
81 if(PTL1X(pa, 1) != PTL1X(va, 1))
84 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
85 if(PTL1X(pa, 2) != PTL1X(va, 2))
88 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
89 if(PTL1X(pa, 3) != PTL1X(va, 3))
101 pe = PHYSDRAM + soc.dramsize;
102 if(pe > (uintptr)-KZERO)
103 pe = (uintptr)-KZERO;
104 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
105 l1[PTL1X(pa, 1)] = l1[PTL1X(va, 1)];
107 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
108 l1[PTL1X(pa, 2)] = l1[PTL1X(va, 2)];
110 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
111 l1[PTL1X(pa, 3)] = l1[PTL1X(va, 3)];
112 setttbr(PADDR(&l1[L1TABLEX(0, PTLEVELS-1)]));
118 m->mmutop = mallocalign(L1TOPSIZE, BY2PG, 0, 0);
120 panic("mmu1init: no memory for mmutop");
121 memset(m->mmutop, 0, L1TOPSIZE);
128 if((uintptr)va >= KZERO)
129 return (uintptr)va-KZERO;
130 panic("paddr: va=%#p pc=%#p", va, getcallerpc(&va));
137 if(pa < (uintptr)-KZERO)
145 if(pa < (uintptr)-KZERO)
146 return (void*)(pa + KZERO);
147 panic("kaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
168 mmukmap(uintptr va, uintptr pa, usize size)
170 uintptr a, pe, off, attr;
175 attr = va & PTEMA(7);
179 pe = (pa + size + (PGLSZ(1)-1)) & -PGLSZ(1);
181 ((uintptr*)L1)[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
182 | PTEKERNEL | PTESH(SHARE_OUTER) | attr;
191 mmuwalk(uintptr va, int level)
197 x = PTLX(va, PTLEVELS-1);
199 for(i = PTLEVELS-2; i >= level; i--){
202 if(pte & (0xFFFFULL<<48))
203 iprint("strange pte %#p va %#p\n", pte, va);
204 pte &= ~(0xFFFFULL<<48 | BY2PG-1);
210 up->mmufree = pg->next;
211 pg->va = va & -PGLSZ(i+1);
212 if((pg->next = up->mmuhead[i+1]) == nil)
213 up->mmutail[i+1] = pg;
214 up->mmuhead[i+1] = pg;
215 memset(KADDR(pg->pa), 0, BY2PG);
217 table[x] = pg->pa | PTEVALID | PTETABLE;
218 table = KADDR(pg->pa);
220 x = PTLX(va, (uintptr)i);
225 static Proc *asidlist[256];
241 a %= nelem(asidlist);
243 continue; // reserved
245 if(x == p || x == nil || (x->asid < 0 && x->mach == nil))
263 if(a > 0 && asidlist[a] == p)
272 * Prevent the following scenario:
273 * pX sleeps on cpuA, leaving its page tables in mmutop
274 * pX wakes up on cpuB, and exits, freeing its page tables
275 * pY on cpuB allocates a freed page table page and overwrites with data
276 * cpuA takes an interrupt, and is now running with bad page tables
277 * In theory this shouldn't hurt because only user address space tables
278 * are affected, and mmuswitch will clear mmutop before a user process is
279 * dispatched. But empirically it correlates with weird problems, eg
280 * resetting of the core clock at 0x4000001C which confuses local timers.
290 putmmu(uintptr va, uintptr pa, Page *pg)
296 while((pte = mmuwalk(va, 0)) == nil){
298 up->mmufree = newpage(0, nil, 0);
303 if((old & PTEVALID) != 0)
304 flushasidvall((uvlong)up->asid<<48 | va>>12);
306 flushasidva((uvlong)up->asid<<48 | va>>12);
307 *pte = pa | PTEPAGE | PTEUSER | PTENG | PTEAF | PTESH(SHARE_INNER);
308 if(pg->txtflush & (1UL<<m->machno)){
309 /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
310 cachedwbinvse((void*)KADDR(pg->pa), BY2PG);
311 cacheiinvse((void*)va, BY2PG);
312 pg->txtflush &= ~(1UL<<m->machno);
324 for(i=1; i<PTLEVELS; i++){
325 if(p->mmuhead[i] == nil)
327 p->mmutail[i]->next = p->mmufree;
328 p->mmufree = p->mmuhead[i];
329 p->mmuhead[i] = p->mmutail[i] = nil;
339 for(va = UZERO; va < USTKTOP; va += PGLSZ(PTLEVELS-1))
340 m->mmutop[PTLX(va, PTLEVELS-1)] = 0;
343 setttbr(PADDR(m->mmutop));
352 for(t = p->mmuhead[PTLEVELS-1]; t != nil; t = t->next){
354 m->mmutop[PTLX(va, PTLEVELS-1)] = t->pa | PTEVALID | PTETABLE;
358 flushasid((uvlong)p->asid<<48);
360 setttbr((uvlong)p->asid<<48 | PADDR(m->mmutop));
371 if((t = p->mmufree) != nil){
373 p->mmufree = t->next;
375 panic("mmurelease: bad page ref");
377 } while((t = p->mmufree) != nil);
394 checkmmu(uintptr, uintptr)