2 #include "../port/lib.h"
9 #define FEXT(d, o, w) (((d)>>(o)) & ((1<<(w))-1))
10 #define L1X(va) FEXT((va), 20, 12)
11 #define L2X(va) FEXT((va), 12, 8)
12 #define L2AP(ap) l2ap(ap)
13 #define L1ptedramattrs soc.l1ptedramattrs
14 #define L2ptedramattrs soc.l2ptedramattrs
15 #define PTEDRAM (PHYSDRAM|Dom0|L1AP(Krw)|Section|L1ptedramattrs)
18 L1lo = UZERO/MiB, /* L1X(UZERO)? */
19 L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
20 L2size = 256*sizeof(PTE),
24 * Set up initial PTEs for cpu0 (called with mmu off)
36 * map all of ram at KZERO
39 for(pa = PHYSDRAM; pa < PHYSDRAM+soc.dramsize; pa += MiB){
40 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|L1ptedramattrs;
45 * identity map first MB of ram so mmu can be enabled
47 l1[L1X(PHYSDRAM)] = PTEDRAM;
53 for(pa = soc.physio; pa < soc.physio+IOSIZE; pa += MiB){
54 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section;
59 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section;
62 * double map exception vectors near top of virtual memory
65 l1[L1X(va)] = (uintptr)l2|Dom0|Coarse;
66 l2[L2X(va)] = PHYSDRAM|L2AP(Krw)|Small|L2ptedramattrs;
70 * enable/disable identity map of first MB of ram
78 l1[L1X(PHYSDRAM)] = on? PTEDRAM: Fault;
79 cachedwbtlb(&l1[L1X(PHYSDRAM)], sizeof(PTE));
80 mmuinvalidateaddr(PHYSDRAM);
85 mmul2empty(Proc* proc, int clear)
92 for(page = *l2; page != nil; page = page->next){
94 memset((void*)page->va, 0, L2size);
95 l1[page->daddr] = Fault;
99 *l2 = proc->mmul2cache;
100 proc->mmul2cache = proc->mmul2;
109 /* clean out any user mappings still in l1 */
112 m->mmul1[L1lo] = Fault;
114 memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
118 l1 = &m->mmul1[L1hi - m->mmul1hi];
122 memset(l1, 0, m->mmul1hi*sizeof(PTE));
128 mmuswitch(Proc* proc)
134 if(proc != nil && proc->newtlb){
141 /* move in new map */
144 for(page = proc->mmul2; page != nil; page = page->next){
146 l1[x] = PPN(page->pa)|Dom0|Coarse;
147 if(x >= L1lo + m->mmul1lo && x < L1hi - m->mmul1hi){
148 if(x+1 - L1lo < L1hi - x)
149 m->mmul1lo = x+1 - L1lo;
151 m->mmul1hi = L1hi - x;
155 /* make sure map is in memory */
156 /* could be smarter about how much? */
157 cachedwbtlb(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
159 /* lose any possible stale tlb entries */
175 mmurelease(Proc* proc)
180 for(page = proc->mmul2cache; page != nil; page = next){
183 panic("mmurelease: page->ref %lud", page->ref);
186 if(proc->mmul2cache != nil)
188 proc->mmul2cache = nil;
192 /* make sure map is in memory */
193 /* could be smarter about how much? */
194 cachedwbtlb(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
196 /* lose any possible stale tlb entries */
201 putmmu(uintptr va, uintptr pa, Page* page)
208 * disable interrupts to prevent flushmmu (called from hzclock)
209 * from clearing page tables while we are setting them
215 /* l2 pages only have 256 entries - wastes 3K per 1M of address space */
216 if(up->mmul2cache == nil){
218 pg = newpage(1, 0, 0);
220 /* if newpage slept, we might be on a different cpu */
222 pg->va = VA(kmap(pg));
225 up->mmul2cache = pg->next;
228 pg->next = up->mmul2;
231 /* force l2 page to memory (armv6) */
232 cachedwbtlb((void *)pg->va, L2size);
234 *l1 = PPN(pg->pa)|Dom0|Coarse;
235 cachedwbtlb(l1, sizeof *l1);
237 if(x >= L1lo + m->mmul1lo && x < L1hi - m->mmul1hi){
238 if(x+1 - L1lo < L1hi - x)
239 m->mmul1lo = x+1 - L1lo;
241 m->mmul1hi = L1hi - x;
244 pte = KADDR(PPN(*l1));
246 /* protection bits are
249 * PTEWRITE|PTEUNCACHED|PTEVALID;
252 if(!(pa & PTEUNCACHED))
258 pte[L2X(va)] = PPN(pa)|x;
259 cachedwbtlb(&pte[L2X(va)], sizeof(PTE));
261 /* clear out the current entry */
262 mmuinvalidateaddr(PPN(va));
264 if((page->txtflush & (1<<m->machno)) != 0){
265 /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
266 cachedwbse((void*)(page->pa|KZERO), BY2PG);
267 cacheiinvse((void*)page->va, BY2PG);
268 page->txtflush &= ~(1<<m->machno);
270 //checkmmu(va, PPN(pa));
275 mmuuncache(void* v, usize size)
282 * Simple helper for ucalloc().
283 * Uncache a Section, must already be
287 assert(!(va & (1*MiB-1)) && size == 1*MiB);
291 if((*pte & (Fine|Section|Coarse)) != Section)
293 *pte &= ~L1ptedramattrs;
294 mmuinvalidateaddr(va);
295 cachedwbinvse(pte, 4);
301 * Return the number of bytes that can be accessed via KADDR(pa).
302 * If pa is not a valid argument to KADDR, return 0.
307 if(pa < PHYSDRAM + memsize) /* assumes PHYSDRAM is 0 */
308 return PHYSDRAM + memsize - pa;
313 mmukmap(uintptr va, uintptr pa, usize size)
319 assert((va & (MiB-1)) == 0);
323 pte = pte0 = &m->mmul1[L1X(va)];
324 for(n = 0; n < size; n += MiB)
328 for(n = 0; n < size; n += MiB){
329 *pte++ = (pa+n)|Dom0|L1AP(Krw)|Section;
330 mmuinvalidateaddr(va+n);
332 cachedwbtlb(pte0, (uintptr)pte - (uintptr)pte0);
337 checkmmu(uintptr va, uintptr pa)
345 iprint("checkmmu cpu%d va=%lux l1 %p=%ux\n", m->machno, va, l1, *l1);
348 pte = KADDR(PPN(*l1));
350 if(pa == ~0 || (pa != 0 && PPN(*pte) != pa))
351 iprint("checkmmu va=%lux pa=%lux l1 %p=%ux pte %p=%ux\n", va, pa, l1, *l1, pte, *pte);