2 #include "../port/lib.h"
8 #include "../port/error.h"
11 * to avoid mmu and cash flushing, we use the pid register in the MMU
12 * to map all user addresses. Although there are 64 possible pids, we
13 * can only use 31 because there are only 32 protection domains and we
14 * need one for the kernel. Pid i is thus associated with domain i.
15 * Domain 0 is used for the kernel.
18 /* real protection bits */
21 /* level 1 descriptor bits */
29 L1Domain0= (0<<L1DomShift),
30 L1KernelRO= (0x0<<10),
31 L1KernelRW= (0x1<<10),
34 L1SectBaseMask= (0xFFF<<20),
35 L1PTBaseMask= (0x3FFFFF<<10),
37 /* level 2 descriptor bits */
43 L2KernelRW= (0x55<<4),
46 L2PageBaseMask= (0xFFFFF<<12),
58 * We map all of memory, flash, and the zeros area with sections.
59 * Special use space is mapped on the fly with regmap.
67 /* get a prototype level 1 page */
68 l1table = xspanalloc(16*1024, 16*1024, 0);
69 memset(l1table, 0, 16*1024);
71 /* map low mem (I really don't know why I have to do this -- presotto) */
72 for(o = 0; o < 1*OneMeg; o += OneMeg)
73 l1table[(0+o)>>20] = L1Section | L1KernelRW| L1Domain0
74 | L1Cached | L1Buffered
75 | ((0+o)&L1SectBaseMask);
78 for(o = 0; o < DRAMTOP-DRAMZERO; o += OneMeg)
79 l1table[(DRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0
80 | L1Cached | L1Buffered
81 | ((PHYSDRAM0+o)&L1SectBaseMask);
84 for(o = 0; o < UCDRAMTOP-UCDRAMZERO; o += OneMeg)
85 l1table[(UCDRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0
86 | ((PHYSDRAM0+o)&L1SectBaseMask);
89 for(o = 0; o < NULLTOP-NULLZERO; o += OneMeg)
90 l1table[(NULLZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
91 | L1Cached | L1Buffered
92 | ((PHYSNULL0+o)&L1SectBaseMask);
95 for(o = 0; o < FLASHTOP-FLASHZERO; o += OneMeg)
96 l1table[(FLASHZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
97 | ((PHYSFLASH0+o)&L1SectBaseMask);
99 /* map peripheral control module regs */
100 mapspecial(0x80000000, OneMeg);
102 /* map system control module regs */
103 mapspecial(0x90000000, OneMeg);
106 * double map start of ram to exception vectors
109 t = xspanalloc(BY2PG, 1024, 0);
111 l1table[a>>20] = L1PageTable | L1Domain0 | (((ulong)t) & L1PTBaseMask);
112 t[(a&0xfffff)>>PGSHIFT] = L2SmallPage | L2KernelRW | (PHYSDRAM0 & L2PageBaseMask);
121 /* set up the domain register to cause all domains to obey pte access bits */
126 putttb((ulong)l1table);
139 _map(ulong pa, int len, ulong zero, ulong top, ulong l1prop, ulong l2prop)
142 ulong va, i, base, end, off, entry;
147 large = len >= 128*1024;
149 base = pa & ~(OneMeg-1);
150 end = (pa+len-1) & ~(OneMeg-1);
152 base = pa & ~(BY2PG-1);
153 end = (pa+len-1) & ~(BY2PG-1);
157 for(va = zero; va < top && base <= end; va += OneMeg){
158 switch(l1table[va>>20] & L1TypeMask){
160 /* found unused entry on level 1 table */
163 rv = (ulong*)(va+off);
164 l1table[va>>20] = L1Section | l1prop | L1Domain0 |
165 (base & L1SectBaseMask);
170 /* create an L2 page table and keep going */
171 t = xspanalloc(BY2PG, 1024, 0);
173 l1table[va>>20] = L1PageTable | L1Domain0 |
174 (((ulong)t) & L1PTBaseMask);
178 /* if it's already mapped in a one meg area, don't remap */
179 entry = l1table[va>>20];
180 i = entry & L1SectBaseMask;
181 if(pa >= i && (pa+len) <= i + OneMeg)
182 if((entry & ~L1SectBaseMask) == (L1Section | l1prop | L1Domain0))
183 return (void*)(va + (pa & (OneMeg-1)));
192 /* here if we're using page maps instead of sections */
193 t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
194 for(i = 0; i < OneMeg && base <= end; i += BY2PG){
195 entry = t[i>>PGSHIFT];
197 /* found unused entry on level 2 table */
198 if((entry & L2TypeMask) != L2SmallPage){
200 rv = (ulong*)(va+i+off);
201 t[i>>PGSHIFT] = L2SmallPage | l2prop |
202 (base & L2PageBaseMask);
217 /* map in i/o registers */
219 mapspecial(ulong pa, int len)
221 return _map(pa, len, REGZERO, REGTOP, L1KernelRW, L2KernelRW);
224 /* map add on memory */
226 mapmem(ulong pa, int len, int cached)
231 l1 = L1KernelRW|L1Cached|L1Buffered;
232 l2 = L2KernelRW|L2Cached|L2Buffered;
237 return _map(pa, len, EMEMZERO, EMEMTOP, l1, l2);
240 /* map a virtual address to a physical one */
247 entry = l1table[va>>20];
248 switch(entry & L1TypeMask){
250 return (entry & L1SectBaseMask) | (va & (OneMeg-1));
252 t = (ulong*)(entry & L1PTBaseMask);
254 entry = t[va>>PGSHIFT];
255 switch(entry & L1TypeMask){
257 return (entry & L2PageBaseMask) | (va & (BY2PG-1));
263 /* map a physical address to a virtual one */
265 findva(ulong pa, ulong zero, ulong top)
272 for(va = zero; va < top; va += OneMeg){
273 /* search the L1 entry */
274 entry = l1table[va>>20];
275 switch(entry & L1TypeMask){
277 return 0; /* no holes */
279 start = entry & L1SectBaseMask;
280 end = start + OneMeg;
281 if(pa >= start && pa < end)
282 return va | (pa & (OneMeg-1));
288 /* search the L2 entry */
289 t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
290 for(i = 0; i < OneMeg; i += BY2PG){
291 entry = t[i>>PGSHIFT];
293 /* found unused entry on level 2 table */
294 if((entry & L2TypeMask) != L2SmallPage)
297 start = entry & L2PageBaseMask;
299 if(pa >= start && pa < end)
300 return va | (BY2PG*i) | (pa & (BY2PG-1));
310 /* try the easy stuff first (the first case is true most of the time) */
311 if(pa >= PHYSDRAM0 && pa <= PHYSDRAM0+(DRAMTOP-DRAMZERO))
312 return DRAMZERO+(pa-PHYSDRAM0);
313 if(pa >= PHYSFLASH0 && pa <= PHYSFLASH0+(FLASHTOP-FLASHZERO))
314 return FLASHZERO+(pa-PHYSFLASH0);
315 if(pa >= PHYSNULL0 && pa <= PHYSNULL0+(NULLTOP-NULLZERO))
316 return NULLZERO+(pa-PHYSNULL0);
319 return 0; /* this shouldn't happen */
321 /* walk the map for the special regs and extended memory */
322 va = findva(pa, EMEMZERO, EMEMTOP);
325 return findva(pa, REGZERO, REGTOP);
329 * Return the number of bytes that can be accessed via KADDR(pa).
330 * If pa is not a valid argument to KADDR, return 0.
337 * We'll find out if anyone still has one
340 if(pa >= PHYSDRAM0 && pa <= PHYSDRAM0+(DRAMTOP-DRAMZERO))
341 return PHYSDRAM0+(DRAMTOP-DRAMZERO) - pa;
346 * table to map fault.c bits to physical bits
348 static ulong mmubits[16] =
350 [PTEVALID] L2SmallPage|L2Cached|L2Buffered|L2UserRO,
351 [PTEVALID|PTEWRITE] L2SmallPage|L2Cached|L2Buffered|L2UserRW,
352 [PTEVALID|PTEUNCACHED] L2SmallPage|L2UserRO,
353 [PTEVALID|PTEUNCACHED|PTEWRITE] L2SmallPage|L2UserRW,
355 [PTEKERNEL|PTEVALID] L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
356 [PTEKERNEL|PTEVALID|PTEWRITE] L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
357 [PTEKERNEL|PTEVALID|PTEUNCACHED] L2SmallPage|L2KernelRW,
358 [PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE] L2SmallPage|L2KernelRW,
362 * add an entry to the current map
365 putmmu(ulong va, ulong pa, Page *pg)
368 ulong *t, *l1p, *l2p;
373 /* clear out the current entry */
374 mmuinvalidateaddr(va);
376 l2pg = up->l1page[va>>20];
380 up->mmufree = l2pg->next;
384 pexit("out of memory", 1);
386 l2pg->va = VA(kmap(l2pg));
387 up->l1page[va>>20] = l2pg;
388 memset((uchar*)(l2pg->va), 0, BY2PG);
391 /* always point L1 entry to L2 page, can't hurt */
392 l1p = &l1table[va>>20];
393 *l1p = L1PageTable | L1Domain0 | (l2pg->pa & L1PTBaseMask);
394 up->l1table[va>>20] = *l1p;
395 t = (ulong*)l2pg->va;
398 l2p = &t[(va & (OneMeg-1))>>PGSHIFT];
399 *l2p = mmubits[pa & (PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE)]
400 | (pa & ~(PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE));
402 /* write back dirty entries - we need this because the pio() in
403 * fault.c is writing via a different virt addr and won't clean
404 * its changes out of the dcache. Page coloring doesn't work
405 * on this mmu because the virtual cache is set associative
406 * rather than direct mapped.
409 if(pg->cachectl[0] == PG_TXTFLUSH){
410 /* pio() sets PG_TXTFLUSH whenever a text page has been written */
412 pg->cachectl[0] = PG_NOFLUSH;
419 * free up all page tables for this proc
427 for(i = 0; i < Nmeg; i++){
432 pg->next = p->mmufree;
435 memset(p->l1table, 0, sizeof(p->l1table));
439 * this is called with palloc locked so the pagechainhead is kosher
446 /* write back dirty cache entries before changing map */
451 for(pg = p->mmufree; pg; pg = next){
454 panic("mmurelease: pg->ref %d\n", pg->ref);
457 if(p->mmufree && palloc.r.p)
461 memset(l1table, 0, sizeof(p->l1table));
462 cachewbregion((ulong)l1table, sizeof(p->l1table));
468 if(m->mmupid == p->pid && p->newtlb == 0)
472 /* write back dirty cache entries and invalidate all cache entries */
480 /* move in new map */
481 memmove(l1table, p->l1table, sizeof(p->l1table));
483 /* make sure map is in memory */
484 cachewbregion((ulong)l1table, sizeof(p->l1table));
486 /* lose any possible stale tlb entries */
507 switch(e & L1TypeMask){
509 iprint("l1: %#p[%#lux] = %#lux invalid\n", l1table, va>>20, e);
512 iprint("l1: %#p[%#lux] = %#lux pt\n", l1table, va>>20, e);
517 iprint("l2: %#lux[%#lux] = %#lux\n", e, va, d);
520 iprint("l1: %#p[%#lux] = %#lux section\n", l1table, va>>20, e);
526 checkmmu(ulong, ulong)
531 countpagerefs(ulong*, int)