2 #include "../port/lib.h"
9 #define L1X(va) FEXT((va), 20, 12)
10 #define L2X(va) FEXT((va), 12, 8)
13 L1lo = UZERO/MiB, /* L1X(UZERO)? */
14 L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
17 #define ISHOLE(pte) ((pte) == 0)
19 /* dump level 1 page table at virtual addr l1 */
24 uintptr pa, startva, startpa;
28 // pa -= MACHSIZE+1024; /* put level 2 entries below level 1 */
32 endva = startva = startpa = 0;
34 /* dump first level of ptes */
35 for (va = i = 0; i < 4096; i++) {
38 type = pte & (Fine|Section|Coarse);
40 if (endva != 0) { /* open range? close it */
41 print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
42 startva, endva-1, startpa, rngtype);
46 if (endva == 0) { /* no open range? start one */
51 endva = va + MB; /* continue the open range */
52 // if (type == Coarse) {
53 // // could dump the l2 table for this l1 entry
58 if (endva != 0) /* close an open range */
59 print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
60 startva, endva-1, startpa, rngtype);
63 /* identity map the megabyte containing va, uncached */
65 idmap(PTE *l1, ulong va)
68 l1[L1X(va)] = va | Dom0 | L1AP(Krw) | Section;
71 /* map `mbs' megabytes from virt to phys */
73 mmumap(uintptr virt, uintptr phys, int mbs)
81 for (off = 0; mbs-- > 0; off += MB)
82 l1[L1X(virt + off)] = (phys + off) | Dom0 | L1AP(Krw) | Section;
88 /* identity map `mbs' megabytes from phys */
90 mmuidmap(uintptr phys, int mbs)
92 mmumap(phys, phys, mbs);
104 /* redundant with l.s; only covers first MB of 17MB */
105 l1[L1X(VIRTIO)] = PHYSIO|Dom0|L1AP(Krw)|Section;
107 idmap(l1, PHYSETHER); /* igep 9221 ethernet regs */
108 idmap(l1, PHYSL4PROT);
114 /* map high vectors to start of dram, but only 4K, not 1MB */
115 pa -= MACHSIZE+2*1024;
118 /* vectors step on u-boot, but so do page tables */
119 l2[L2X(HVECTORS)] = PHYSDRAM|L2AP(Krw)|Small;
120 l1[L1X(HVECTORS)] = pa|Dom0|Coarse; /* vectors -> ttb-machsize-2k */
128 // mmudump(l1); /* DEBUG */
132 mmul2empty(Proc* proc, int clear)
139 for(page = *l2; page != nil; page = page->next){
141 memset(UINT2PTR(page->va), 0, BY2PG);
142 l1[page->daddr] = Fault;
145 *l2 = proc->mmul2cache;
146 proc->mmul2cache = proc->mmul2;
154 /* there's a bug in here */
157 /* clean out any user mappings still in l1 */
158 if(m->mmul1lo > L1lo){
160 m->mmul1[L1lo] = Fault;
162 memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
165 if(m->mmul1hi < L1hi){
166 l1 = &m->mmul1[m->mmul1hi];
167 if((L1hi - m->mmul1hi) == 1)
170 memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
174 memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
179 mmuswitch(Proc* proc)
185 /* do kprocs get here and if so, do they need to? */
186 if(m->mmupid == proc->pid && !proc->newtlb)
188 m->mmupid = proc->pid;
190 /* write back dirty and invalidate l1 caches */
200 /* move in new map */
202 for(page = proc->mmul2; page != nil; page = page->next){
204 l1[x] = PPN(page->pa)|Dom0|Coarse;
205 /* know here that L1lo < x < L1hi */
206 if(x+1 - m->mmul1lo < m->mmul1hi - x)
212 /* make sure map is in memory */
213 /* could be smarter about how much? */
214 cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
216 /* lose any possible stale tlb entries */
219 //print("mmuswitch l1lo %d l1hi %d %d\n",
220 // m->mmul1lo, m->mmul1hi, proc->kp);
235 mmurelease(Proc* proc)
239 /* write back dirty and invalidate l1 caches */
243 for(page = proc->mmul2cache; page != nil; page = next){
246 panic("mmurelease: page->ref %ld", page->ref);
249 if(proc->mmul2cache != nil)
251 proc->mmul2cache = nil;
255 /* make sure map is in memory */
256 /* could be smarter about how much? */
257 cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
259 /* lose any possible stale tlb entries */
264 putmmu(uintptr va, uintptr pa, Page* page)
272 //print("putmmu(%#p, %#p, %#p) ", va, pa, page->pa);
273 //print("mmul1 %#p l1 %#p *l1 %#ux x %d pid %d\n",
274 // m->mmul1, l1, *l1, x, up->pid);
276 /* wasteful - l2 pages only have 256 entries - fix */
277 if(up->mmul2cache == nil){
278 /* auxpg since we don't need much? memset if so */
279 pg = newpage(1, 0, 0);
280 pg->va = VA(kmap(pg));
284 up->mmul2cache = pg->next;
285 memset(UINT2PTR(pg->va), 0, BY2PG);
288 pg->next = up->mmul2;
291 /* force l2 page to memory */
292 cachedwbse((void *)pg->va, BY2PG);
294 *l1 = PPN(pg->pa)|Dom0|Coarse;
295 cachedwbse(l1, sizeof *l1);
296 //print("l1 %#p *l1 %#ux x %d pid %d\n", l1, *l1, x, up->pid);
298 if(x >= m->mmul1lo && x < m->mmul1hi){
299 if(x+1 - m->mmul1lo < m->mmul1hi - x)
305 pte = UINT2PTR(KADDR(PPN(*l1)));
306 //print("pte %#p index %ld was %#ux\n", pte, L2X(va), *(pte+L2X(va)));
308 /* protection bits are
311 * PTEWRITE|PTEUNCACHED|PTEVALID;
314 if(!(pa & PTEUNCACHED))
315 x |= Cached|Buffered;
320 pte[L2X(va)] = PPN(pa)|x;
321 cachedwbse(&pte[L2X(va)], sizeof pte[0]);
323 /* clear out the current entry */
324 mmuinvalidateaddr(PPN(va));
326 /* write back dirty entries - we need this because the pio() in
327 * fault.c is writing via a different virt addr and won't clean
328 * its changes out of the dcache. Page coloring doesn't work
329 * on this mmu because the virtual cache is set associative
330 * rather than direct mapped.
337 //print("putmmu %#p %#p %#p\n", va, pa, PPN(pa)|x);
341 mmuuncache(void* v, usize size)
348 * Simple helper for ucalloc().
349 * Uncache a Section, must already be
353 assert(!(va & (1*MiB-1)) && size == 1*MiB);
357 if((*pte & (Fine|Section|Coarse)) != Section)
359 *pte &= ~(Cached|Buffered);
360 mmuinvalidateaddr(va);
361 cachedwbinvse(pte, 4);
367 mmukmap(uintptr va, uintptr pa, usize size)
375 assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
381 *pte = pa|Dom0|L1AP(Krw)|Section;
382 mmuinvalidateaddr(va);
383 cachedwbinvse(pte, 4);
389 mmukunmap(uintptr va, uintptr pa, usize size)
397 assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
401 if(*pte != (pa|Dom0|L1AP(Krw)|Section))
404 mmuinvalidateaddr(va);
405 cachedwbinvse(pte, 4);
411 * Return the number of bytes that can be accessed via KADDR(pa).
412 * If pa is not a valid argument to KADDR, return 0.
417 if(pa >= PHYSDRAM && pa < PHYSDRAM+memsize)
418 return PHYSDRAM+memsize - pa;
424 vmap(uintptr pa, usize size)
430 * XXX - replace with new vm stuff.
431 * Crock after crock - the first 4MB is mapped with 2MB pages
432 * so catch that and return good values because the current mmukmap
436 return UINT2PTR(kseg0|pa);
442 size = ROUNDUP(size, BY2PG);
445 pae = mmukmap(va, pa, size);
446 if(pae == 0 || pae-size != pa)
447 panic("vmap(%#p, %ld) called from %#p: mmukmap fails %#p",
448 pa+o, osize, getcallerpc(&pa), pae);
450 return UINT2PTR(va+o);
455 vunmap(void* v, usize size)
458 * XXX - replace with new vm stuff.
459 * Can't do this until do real vmap for all space that
460 * might be used, e.g. stuff below 1MB which is currently
461 * mapped automagically at boot but that isn't used (or
462 * at least shouldn't be used) by the kernel.
463 upafree(PADDR(v), size);
470 * Everything is in domain 0;
471 * domain 0 access bits in the DAC register are set
472 * to Client, which means access is controlled by the
473 * permission values set in the PTE.
475 * L1 access control for the kernel is set to 1 (RW,
476 * no user mode access);
477 * L2 access control for the kernel is set to 1 (ditto)
479 * L1 user mode access is never set;
480 * L2 access control for user mode is set to either
481 * 2 (RO) or 3 (RW) depending on whether text or data,
483 * (To get kernel RO set AP to 0 and S bit in control
485 * Coarse L1 page-tables are used. They have 256 entries
486 * and so consume 1024 bytes per table.
487 * Small L2 page-tables are used. They have 1024 entries
488 * and so consume 4096 bytes per table.
490 * 4KiB. That's the size of 1) a page, 2) the
491 * size allocated for an L2 page-table page (note only 1KiB
492 * is needed per L2 page - to be dealt with later) and
493 * 3) the size of the area in L1 needed to hold the PTEs
494 * to map 1GiB of user space (0 -> 0x3fffffff, 1024 entries).