2 #include "../port/lib.h"
11 uintptr va, pa, pe, attr;
14 attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
16 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
17 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
18 l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | attr;
22 attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
23 pe = soc.physio + soc.iosize;
24 for(pa = soc.physio, va = soc.virtio; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
25 if(((pa|va) & PGLSZ(1)-1) != 0){
26 l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
27 for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
28 assert(l1[PTLX(va, 0)] == 0);
29 l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
33 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
37 attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
38 pe = soc.armlocal + MB;
39 for(pa = soc.armlocal, va = ARMLOCAL; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
40 if(((pa|va) & PGLSZ(1)-1) != 0){
41 l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
42 for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
43 assert(l1[PTLX(va, 0)] == 0);
44 l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
48 l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
52 for(va = KSEG0; va != 0; va += PGLSZ(2))
53 l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
55 for(va = KSEG0; va != 0; va += PGLSZ(3))
56 l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
60 mmu0clear(uintptr *l1)
65 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
66 if(PTL1X(pa, 1) != PTL1X(va, 1))
69 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
70 if(PTL1X(pa, 2) != PTL1X(va, 2))
73 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
74 if(PTL1X(pa, 3) != PTL1X(va, 3))
83 pe = PHYSDRAM + soc.dramsize;
84 if(pe > (uintptr)-KZERO)
86 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
87 l1[PTL1X(pa, 1)] = l1[PTL1X(va, 1)];
89 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
90 l1[PTL1X(pa, 2)] = l1[PTL1X(va, 2)];
92 for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
93 l1[PTL1X(pa, 3)] = l1[PTL1X(va, 3)];
94 setttbr(PADDR(&l1[L1TABLEX(0, PTLEVELS-1)]));
101 m->mmutop = mallocalign(L1TOPSIZE, BY2PG, 0, 0);
103 panic("mmu1init: no memory for mmutop");
104 memset(m->mmutop, 0, L1TOPSIZE);
108 /* KZERO maps the first 1GB of ram */
112 if((uintptr)va >= KZERO)
113 return (uintptr)va-KZERO;
114 panic("paddr: va=%#p pc=%#p", va, getcallerpc(&va));
121 if(pa < (uintptr)-KZERO)
129 if(pa < (uintptr)-KZERO)
130 return (void*)(pa + KZERO);
131 panic("kaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
138 if(pa < (uintptr)-KZERO)
139 return (void*)(pa + KZERO);
140 if(pa >= KMAPEND-KMAP)
141 panic("kmapaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
142 return (void*)(pa + KMAP);
148 return kmapaddr(p->pa);
161 #define INITMAP (ROUND((uintptr)end + BY2PG, PGLSZ(1))-KZERO)
169 return mallocalign(BY2PG, BY2PG, 0, 0);
171 pa = conf.mem[0].base;
172 assert((pa % BY2PG) == 0);
173 assert(pa < INITMAP);
174 conf.mem[0].base += BY2PG;
179 l1map(uintptr va, uintptr pa, uintptr pe, uintptr attr)
189 attr |= PTEKERNEL | PTEAF;
194 if(l1[PTL1X(va, 1)] == 0 && (pe-pa) >= PGLSZ(1) && ((va|pa) & PGLSZ(1)-1) == 0){
195 l1[PTL1X(va, 1)] = PTEVALID | PTEBLOCK | pa | attr;
200 if(l1[PTL1X(va, 1)] & PTEVALID) {
201 assert((l1[PTL1X(va, 1)] & PTETABLE) == PTETABLE);
202 l0 = KADDR(l1[PTL1X(va, 1)] & -PGLSZ(0));
205 memset(l0, 0, BY2PG);
206 l1[PTL1X(va, 1)] = PTEVALID | PTETABLE | PADDR(l0);
208 assert(l0[PTLX(va, 0)] == 0);
209 l0[PTLX(va, 0)] = PTEVALID | PTEPAGE | pa | attr;
216 kmapram(uintptr base, uintptr limit)
218 if(base < (uintptr)-KZERO && limit > (uintptr)-KZERO){
219 kmapram(base, (uintptr)-KZERO);
220 kmapram((uintptr)-KZERO, limit);
225 if(base >= limit || limit <= INITMAP)
228 l1map((uintptr)kmapaddr(base), base, limit,
229 PTEWRITE | PTEPXN | PTEUXN | PTESH(SHARE_INNER));
240 if(p = getconf("*maxmem")){
241 memsize = strtoull(p, &e, 0) - PHYSDRAM;
242 for(i = 1; i < nelem(conf.mem); i++){
243 if(e <= p || *e != ' ')
246 conf.mem[i].base = strtoull(p, &e, 0);
247 if(e <= p || *e != ' ')
250 conf.mem[i].limit = strtoull(p, &e, 0);
254 if (memsize < INITMAP) /* sanity */
257 getramsize(&conf.mem[0]);
258 if(conf.mem[0].limit == 0){
259 conf.mem[0].base = PHYSDRAM;
260 conf.mem[0].limit = PHYSDRAM + memsize;
262 conf.mem[0].limit = conf.mem[0].base + memsize;
265 * now we know the real memory regions, unmap
266 * everything above INITMAP and map again with
270 for(va = INITMAP+KZERO; va != 0; va += PGLSZ(1)){
272 ((uintptr*)L1)[PTL1X(pa, 1)] = 0;
273 ((uintptr*)L1)[PTL1X(va, 1)] = 0;
277 pa = PGROUND((uintptr)end)-KZERO;
278 for(i=0; i<nelem(conf.mem); i++){
279 if(conf.mem[i].limit >= KMAPEND-KMAP)
280 conf.mem[i].limit = KMAPEND-KMAP;
282 if(conf.mem[i].limit <= conf.mem[i].base){
283 conf.mem[i].limit = conf.mem[i].base = 0;
287 if(conf.mem[i].base < PHYSDRAM + soc.dramsize
288 && conf.mem[i].limit > PHYSDRAM + soc.dramsize)
289 conf.mem[i].limit = PHYSDRAM + soc.dramsize;
291 /* take kernel out of allocatable space */
292 if(pa > conf.mem[i].base && pa < conf.mem[i].limit)
293 conf.mem[i].base = pa;
295 kmapram(conf.mem[i].base, conf.mem[i].limit);
299 /* rampage() is now done, count up the pages for each bank */
300 for(i=0; i<nelem(conf.mem); i++)
301 conf.mem[i].npage = (conf.mem[i].limit - conf.mem[i].base)/BY2PG;
305 mmukmap(uintptr va, uintptr pa, usize size)
314 attr = va & PTEMA(7);
315 attr |= PTEWRITE | PTEUXN | PTEPXN | PTESH(SHARE_OUTER);
320 l1map(va, pa, pa + off + size, attr);
327 vmap(uvlong pa, int size)
329 static uintptr base = VMAP;
330 uvlong pe = pa + size;
334 base += PGROUND(pe) - (pa & -BY2PG);
336 return (void*)mmukmap(va | PTEDEVICE, pa, size);
345 mmuwalk(uintptr va, int level)
351 x = PTLX(va, PTLEVELS-1);
353 for(i = PTLEVELS-2; i >= level; i--){
356 if(pte & (0xFFFFULL<<48))
357 iprint("strange pte %#p va %#p\n", pte, va);
358 pte &= ~(0xFFFFULL<<48 | BY2PG-1);
363 up->mmufree = pg->next;
364 pg->va = va & -PGLSZ(i+1);
365 if((pg->next = up->mmuhead[i+1]) == nil)
366 up->mmutail[i+1] = pg;
367 up->mmuhead[i+1] = pg;
369 memset(kmapaddr(pte), 0, BY2PG);
371 table[x] = pte | PTEVALID | PTETABLE;
373 table = kmapaddr(pte);
374 x = PTLX(va, (uintptr)i);
379 static Proc *asidlist[256];
395 a %= nelem(asidlist);
397 continue; // reserved
399 if(x == p || x == nil || (x->asid < 0 && x->mach == nil))
417 if(a > 0 && asidlist[a] == p)
426 * Prevent the following scenario:
427 * pX sleeps on cpuA, leaving its page tables in mmutop
428 * pX wakes up on cpuB, and exits, freeing its page tables
429 * pY on cpuB allocates a freed page table page and overwrites with data
430 * cpuA takes an interrupt, and is now running with bad page tables
431 * In theory this shouldn't hurt because only user address space tables
432 * are affected, and mmuswitch will clear mmutop before a user process is
433 * dispatched. But empirically it correlates with weird problems, eg
434 * resetting of the core clock at 0x4000001C which confuses local timers.
444 putmmu(uintptr va, uintptr pa, Page *pg)
450 while((pte = mmuwalk(va, 0)) == nil){
452 up->mmufree = newpage(0, nil, 0);
457 if((old & PTEVALID) != 0)
458 flushasidvall((uvlong)up->asid<<48 | va>>12);
460 flushasidva((uvlong)up->asid<<48 | va>>12);
461 *pte = pa | PTEPAGE | PTEUSER | PTEPXN | PTENG | PTEAF |
462 (((pa & PTEMA(7)) == PTECACHED)? PTESH(SHARE_INNER): PTESH(SHARE_OUTER));
463 if(pg->txtflush & (1UL<<m->machno)){
464 /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
465 cachedwbinvse(kmap(pg), BY2PG);
466 cacheiinvse((void*)va, BY2PG);
467 pg->txtflush &= ~(1UL<<m->machno);
479 for(i=1; i<PTLEVELS; i++){
480 if(p->mmuhead[i] == nil)
482 p->mmutail[i]->next = p->mmufree;
483 p->mmufree = p->mmuhead[i];
484 p->mmuhead[i] = p->mmutail[i] = nil;
494 for(va = UZERO; va < USTKTOP; va += PGLSZ(PTLEVELS-1))
495 m->mmutop[PTLX(va, PTLEVELS-1)] = 0;
498 setttbr(PADDR(m->mmutop));
508 flushasid((uvlong)p->asid<<48);
510 setttbr((uvlong)p->asid<<48 | PADDR(m->mmutop));
512 for(t = p->mmuhead[PTLEVELS-1]; t != nil; t = t->next){
514 m->mmutop[PTLX(va, PTLEVELS-1)] = t->pa | PTEVALID | PTETABLE;
526 if((t = p->mmufree) != nil){
528 p->mmufree = t->next;
530 panic("mmurelease: bad page ref");
532 } while((t = p->mmufree) != nil);
549 checkmmu(uintptr, uintptr)