2 #include "../port/lib.h"
11 uintptr va, pa, pe, attr;
14 attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
15 pe = PHYSDRAM + soc.dramsize;
16 if(pe > (uintptr)-KZERO)
18 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
19 if(pe - pa < PGLSZ(1)){
20 l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
21 l1[PTL1X(pa, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
22 for(; pa < pe; pa += PGLSZ(0), va += PGLSZ(0))
23 l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
26 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
27 l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | attr;
30 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
31 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
33 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
34 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
37 attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_INNER);
38 pe = PHYSDRAM + soc.dramsize;
39 for(pa = PHYSDRAM, va = KMAP; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
40 if(pe - pa < PGLSZ(1)){
41 l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
42 for(; pa < pe; pa += PGLSZ(0), va += PGLSZ(0))
43 l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
46 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
49 for(pa = PHYSDRAM, va = KMAP; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
50 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
52 for(pa = PHYSDRAM, va = KMAP; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
53 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
56 attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
57 pe = soc.physio + soc.iosize;
58 for(pa = soc.physio, va = soc.virtio; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
59 if(((pa|va) & PGLSZ(1)-1) != 0){
60 l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
61 for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
62 assert(l1[PTLX(va, 0)] == 0);
63 l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
67 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
71 attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
72 pe = soc.armlocal + MB;
73 for(pa = soc.armlocal, va = ARMLOCAL; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
74 if(((pa|va) & PGLSZ(1)-1) != 0){
75 l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
76 for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
77 assert(l1[PTLX(va, 0)] == 0);
78 l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
82 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
87 attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
88 pe = soc.pciwin + 512*MB;
89 for(pa = soc.pciwin, va = VIRTPCI; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
90 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
94 for(va = KSEG0; va != 0; va += PGLSZ(2))
95 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
97 for(va = KSEG0; va != 0; va += PGLSZ(3))
98 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
102 mmu0clear(uintptr *l1)
106 pe = PHYSDRAM + soc.dramsize;
107 if(pe > (uintptr)-KZERO)
108 pe = (uintptr)-KZERO;
109 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
110 if(PTL1X(pa, 1) != PTL1X(va, 1))
111 l1[PTL1X(pa, 1)] = 0;
113 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
114 if(PTL1X(pa, 2) != PTL1X(va, 2))
115 l1[PTL1X(pa, 2)] = 0;
117 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
118 if(PTL1X(pa, 3) != PTL1X(va, 3))
119 l1[PTL1X(pa, 3)] = 0;
123 mmuidmap(uintptr *l1)
127 pe = PHYSDRAM + soc.dramsize;
128 if(pe > (uintptr)-KZERO)
129 pe = (uintptr)-KZERO;
130 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
131 l1[PTL1X(pa, 1)] = l1[PTL1X(va, 1)];
133 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
134 l1[PTL1X(pa, 2)] = l1[PTL1X(va, 2)];
136 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
137 l1[PTL1X(pa, 3)] = l1[PTL1X(va, 3)];
138 setttbr(PADDR(&l1[L1TABLEX(0, PTLEVELS-1)]));
145 m->mmutop = mallocalign(L1TOPSIZE, BY2PG, 0, 0);
147 panic("mmu1init: no memory for mmutop");
148 memset(m->mmutop, 0, L1TOPSIZE);
152 /* KZERO maps the first 1GB of ram */
156 if((uintptr)va >= KZERO)
157 return (uintptr)va-KZERO;
158 panic("paddr: va=%#p pc=%#p", va, getcallerpc(&va));
165 if(pa < (uintptr)-KZERO)
173 if(pa < (uintptr)-KZERO)
174 return (void*)(pa + KZERO);
175 panic("kaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
179 /* KMAP maps all of ram (up to 4GB) */
183 if(pa < (uintptr)-KZERO)
184 return (void*)(pa + KZERO);
185 return (void*)(pa + KMAP);
191 return kmapaddr(p->pa);
205 mmukmap(uintptr va, uintptr pa, usize size)
207 uintptr a, pe, off, attr;
212 attr = va & PTEMA(7);
216 pe = (pa + size + (PGLSZ(1)-1)) & -PGLSZ(1);
219 ((uintptr*)L1)[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
220 | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | attr;
229 vmap(uintptr pa, int)
231 if(soc.pciwin && pa >= soc.pciwin)
232 return (void*)(VIRTPCI + (pa - soc.pciwin));
233 if(soc.armlocal && pa >= soc.armlocal)
234 return (void*)(ARMLOCAL + (pa - soc.armlocal));
235 if(soc.physio && pa >= soc.physio)
236 return (void*)(soc.virtio + (pa - soc.physio));
246 mmuwalk(uintptr va, int level)
252 x = PTLX(va, PTLEVELS-1);
254 for(i = PTLEVELS-2; i >= level; i--){
257 if(pte & (0xFFFFULL<<48))
258 iprint("strange pte %#p va %#p\n", pte, va);
259 pte &= ~(0xFFFFULL<<48 | BY2PG-1);
264 up->mmufree = pg->next;
265 pg->va = va & -PGLSZ(i+1);
266 if((pg->next = up->mmuhead[i+1]) == nil)
267 up->mmutail[i+1] = pg;
268 up->mmuhead[i+1] = pg;
270 memset(kmapaddr(pte), 0, BY2PG);
272 table[x] = pte | PTEVALID | PTETABLE;
274 table = kmapaddr(pte);
275 x = PTLX(va, (uintptr)i);
280 static Proc *asidlist[256];
296 a %= nelem(asidlist);
298 continue; // reserved
300 if(x == p || x == nil || (x->asid < 0 && x->mach == nil))
318 if(a > 0 && asidlist[a] == p)
327 * Prevent the following scenario:
328 * pX sleeps on cpuA, leaving its page tables in mmutop
329 * pX wakes up on cpuB, and exits, freeing its page tables
330 * pY on cpuB allocates a freed page table page and overwrites with data
331 * cpuA takes an interrupt, and is now running with bad page tables
332 * In theory this shouldn't hurt because only user address space tables
333 * are affected, and mmuswitch will clear mmutop before a user process is
334 * dispatched. But empirically it correlates with weird problems, eg
335 * resetting of the core clock at 0x4000001C which confuses local timers.
345 putmmu(uintptr va, uintptr pa, Page *pg)
351 while((pte = mmuwalk(va, 0)) == nil){
353 up->mmufree = newpage(0, nil, 0);
358 if((old & PTEVALID) != 0)
359 flushasidvall((uvlong)up->asid<<48 | va>>12);
361 flushasidva((uvlong)up->asid<<48 | va>>12);
362 *pte = pa | PTEPAGE | PTEUSER | PTEPXN | PTENG | PTEAF | PTESH(SHARE_INNER);
363 if(pg->txtflush & (1UL<<m->machno)){
364 /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
365 cachedwbinvse(kmap(pg), BY2PG);
366 cacheiinvse((void*)va, BY2PG);
367 pg->txtflush &= ~(1UL<<m->machno);
379 for(i=1; i<PTLEVELS; i++){
380 if(p->mmuhead[i] == nil)
382 p->mmutail[i]->next = p->mmufree;
383 p->mmufree = p->mmuhead[i];
384 p->mmuhead[i] = p->mmutail[i] = nil;
394 for(va = UZERO; va < USTKTOP; va += PGLSZ(PTLEVELS-1))
395 m->mmutop[PTLX(va, PTLEVELS-1)] = 0;
398 setttbr(PADDR(m->mmutop));
407 for(t = p->mmuhead[PTLEVELS-1]; t != nil; t = t->next){
409 m->mmutop[PTLX(va, PTLEVELS-1)] = t->pa | PTEVALID | PTETABLE;
413 flushasid((uvlong)p->asid<<48);
415 setttbr((uvlong)p->asid<<48 | PADDR(m->mmutop));
426 if((t = p->mmufree) != nil){
428 p->mmufree = t->next;
430 panic("mmurelease: bad page ref");
432 } while((t = p->mmufree) != nil);
449 checkmmu(uintptr, uintptr)