2 #include "../port/lib.h"
9 #define L1X(va) FEXT((va), 20, 12)
10 #define L2X(va) FEXT((va), 12, 8)
13 L1lo = UZERO/MiB, /* L1X(UZERO)? */
14 L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
17 #define ISHOLE(pte) ((pte) == 0)
19 /* dump level 1 page table at virtual addr l1 */
24 uintptr pa, startva, startpa;
28 // pa -= MACHSIZE+1024; /* put level 2 entries below level 1 */
32 endva = startva = startpa = 0;
34 /* dump first level of ptes */
35 for (va = i = 0; i < 4096; i++) {
38 type = pte & (Fine|Section|Coarse);
40 if (endva != 0) { /* open range? close it */
41 print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
42 startva, endva-1, startpa, rngtype);
46 if (endva == 0) { /* no open range? start one */
51 endva = va + MB; /* continue the open range */
52 // if (type == Coarse) {
53 // // could dump the l2 table for this l1 entry
58 if (endva != 0) /* close an open range */
59 print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
60 startva, endva-1, startpa, rngtype);
63 /* identity map the megabyte containing va, uncached */
65 idmap(PTE *l1, ulong va)
68 l1[L1X(va)] = va | Dom0 | L1AP(Krw) | Section;
71 /* map `mbs' megabytes from virt to phys */
73 mmumap(uintptr virt, uintptr phys, int mbs)
81 for (off = 0; mbs-- > 0; off += MB)
82 l1[L1X(virt + off)] = (phys + off) | Dom0 | L1AP(Krw) | Section;
88 /* identity map `mbs' megabytes from phys */
90 mmuidmap(uintptr phys, int mbs)
92 mmumap(phys, phys, mbs);
104 /* redundant with l.s; only covers first MB of 17MB */
105 l1[L1X(VIRTIO)] = PHYSIO|Dom0|L1AP(Krw)|Section;
107 idmap(l1, PHYSETHER); /* igep 9221 ethernet regs */
108 idmap(l1, PHYSL4PROT);
114 /* map high vectors to start of dram, but only 4K, not 1MB */
115 pa -= MACHSIZE+2*1024;
118 /* vectors step on u-boot, but so do page tables */
119 l2[L2X(HVECTORS)] = PHYSDRAM|L2AP(Krw)|Small;
120 l1[L1X(HVECTORS)] = pa|Dom0|Coarse; /* vectors -> ttb-machsize-2k */
128 // mmudump(l1); /* DEBUG */
132 mmul2empty(Proc* proc, int clear)
139 for(page = *l2; page != nil; page = page->next){
141 memset((void*)page->va, 0, BY2PG);
142 l1[page->daddr] = Fault;
145 *l2 = proc->mmul2cache;
146 proc->mmul2cache = proc->mmul2;
154 /* there's a bug in here */
157 /* clean out any user mappings still in l1 */
158 if(m->mmul1lo > L1lo){
160 m->mmul1[L1lo] = Fault;
162 memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
165 if(m->mmul1hi < L1hi){
166 l1 = &m->mmul1[m->mmul1hi];
167 if((L1hi - m->mmul1hi) == 1)
170 memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
174 memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
179 mmuswitch(Proc* proc)
185 /* do kprocs get here and if so, do they need to? */
186 if(m->mmupid == proc->pid && !proc->newtlb)
188 m->mmupid = proc->pid;
190 /* write back dirty and invalidate l1 caches */
200 /* move in new map */
202 for(page = proc->mmul2; page != nil; page = page->next){
204 l1[x] = PPN(page->pa)|Dom0|Coarse;
205 /* know here that L1lo < x < L1hi */
206 if(x+1 - m->mmul1lo < m->mmul1hi - x)
212 /* make sure map is in memory */
213 /* could be smarter about how much? */
214 cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
216 /* lose any possible stale tlb entries */
219 //print("mmuswitch l1lo %d l1hi %d %d\n",
220 // m->mmul1lo, m->mmul1hi, proc->kp);
235 mmurelease(Proc* proc)
237 /* write back dirty and invalidate l1 caches */
242 freepages(proc->mmul2cache, nil, 0);
243 proc->mmul2cache = nil;
247 /* make sure map is in memory */
248 /* could be smarter about how much? */
249 cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
251 /* lose any possible stale tlb entries */
256 putmmu(uintptr va, uintptr pa, Page* page)
264 //print("putmmu(%#p, %#p, %#p) ", va, pa, page->pa);
265 //print("mmul1 %#p l1 %#p *l1 %#ux x %d pid %d\n",
266 // m->mmul1, l1, *l1, x, up->pid);
268 /* wasteful - l2 pages only have 256 entries - fix */
269 if(up->mmul2cache == nil){
270 /* auxpg since we don't need much? memset if so */
271 pg = newpage(1, 0, 0);
272 pg->va = VA(kmap(pg));
276 up->mmul2cache = pg->next;
277 memset((void*)pg->va, 0, BY2PG);
280 pg->next = up->mmul2;
283 /* force l2 page to memory */
284 cachedwbse((void *)pg->va, BY2PG);
286 *l1 = PPN(pg->pa)|Dom0|Coarse;
287 cachedwbse(l1, sizeof *l1);
288 //print("l1 %#p *l1 %#ux x %d pid %d\n", l1, *l1, x, up->pid);
290 if(x >= m->mmul1lo && x < m->mmul1hi){
291 if(x+1 - m->mmul1lo < m->mmul1hi - x)
297 pte = KADDR(PPN(*l1));
298 //print("pte %#p index %ld was %#ux\n", pte, L2X(va), *(pte+L2X(va)));
300 /* protection bits are
303 * PTEWRITE|PTEUNCACHED|PTEVALID;
306 if(!(pa & PTEUNCACHED))
307 x |= Cached|Buffered;
312 pte[L2X(va)] = PPN(pa)|x;
313 cachedwbse(&pte[L2X(va)], sizeof pte[0]);
315 /* clear out the current entry */
316 mmuinvalidateaddr(PPN(va));
318 /* write back dirty entries - we need this because the pio() in
319 * fault.c is writing via a different virt addr and won't clean
320 * its changes out of the dcache. Page coloring doesn't work
321 * on this mmu because the virtual cache is set associative
322 * rather than direct mapped.
329 //print("putmmu %#p %#p %#p\n", va, pa, PPN(pa)|x);
333 mmuuncache(void* v, usize size)
340 * Simple helper for ucalloc().
341 * Uncache a Section, must already be
345 assert(!(va & (1*MiB-1)) && size == 1*MiB);
349 if((*pte & (Fine|Section|Coarse)) != Section)
351 *pte &= ~(Cached|Buffered);
352 mmuinvalidateaddr(va);
353 cachedwbinvse(pte, 4);
359 mmukmap(uintptr va, uintptr pa, usize size)
367 assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
373 *pte = pa|Dom0|L1AP(Krw)|Section;
374 mmuinvalidateaddr(va);
375 cachedwbinvse(pte, 4);
381 mmukunmap(uintptr va, uintptr pa, usize size)
389 assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
393 if(*pte != (pa|Dom0|L1AP(Krw)|Section))
396 mmuinvalidateaddr(va);
397 cachedwbinvse(pte, 4);
403 * Return the number of bytes that can be accessed via KADDR(pa).
404 * If pa is not a valid argument to KADDR, return 0.
409 if(pa >= PHYSDRAM && pa < PHYSDRAM+memsize)
410 return PHYSDRAM+memsize - pa;
416 vmap(uintptr pa, usize size)
422 * XXX - replace with new vm stuff.
423 * Crock after crock - the first 4MB is mapped with 2MB pages
424 * so catch that and return good values because the current mmukmap
428 return (void*)(kseg0|pa);
434 size = PGROUND(size);
437 pae = mmukmap(va, pa, size);
438 if(pae == 0 || pae-size != pa)
439 panic("vmap(%#p, %ld) called from %#p: mmukmap fails %#p",
440 pa+o, osize, getcallerpc(&pa), pae);
442 return (void*)(va+o);
447 vunmap(void* v, usize size)
450 * XXX - replace with new vm stuff.
451 * Can't do this until do real vmap for all space that
452 * might be used, e.g. stuff below 1MB which is currently
453 * mapped automagically at boot but that isn't used (or
454 * at least shouldn't be used) by the kernel.
455 upafree(PADDR(v), size);
462 * Everything is in domain 0;
463 * domain 0 access bits in the DAC register are set
464 * to Client, which means access is controlled by the
465 * permission values set in the PTE.
467 * L1 access control for the kernel is set to 1 (RW,
468 * no user mode access);
469 * L2 access control for the kernel is set to 1 (ditto)
471 * L1 user mode access is never set;
472 * L2 access control for user mode is set to either
473 * 2 (RO) or 3 (RW) depending on whether text or data,
475 * (To get kernel RO set AP to 0 and S bit in control
477 * Coarse L1 page-tables are used. They have 256 entries
478 * and so consume 1024 bytes per table.
479 * Small L2 page-tables are used. They have 1024 entries
480 * and so consume 4096 bytes per table.
482 * 4KiB. That's the size of 1) a page, 2) the
483 * size allocated for an L2 page-table page (note only 1KiB
484 * is needed per L2 page - to be dealt with later) and
485 * 3) the size of the area in L1 needed to hold the PTEs
486 * to map 1GiB of user space (0 -> 0x3fffffff, 1024 entries).