2 #include "../port/lib.h"
10 #define L1X(va) FEXT((va), 20, 12)
11 #define L2X(va) FEXT((va), 12, 8)
14 L1lo = UZERO/MiB, /* L1X(UZERO)? */
15 L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
18 #define ISHOLE(pte) ((pte) == 0)
20 /* dump level 1 page table at virtual addr l1 */
25 uintptr pa, startva, startpa;
30 endva = startva = startpa = 0;
32 /* dump first level of ptes */
33 for (va = i = 0; i < 4096; i++) {
36 type = pte & (Fine|Section|Coarse);
38 if (endva != 0) { /* open range? close it */
39 iprint("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
40 startva, endva-1, startpa, rngtype);
44 if (endva == 0) { /* no open range? start one */
49 endva = va + MB; /* continue the open range */
53 if (endva != 0) /* close an open range */
54 iprint("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
55 startva, endva-1, startpa, rngtype);
59 extern uchar sandbox[64*1024+BY2PG];
62 /* identity map `mbs' megabytes from phys */
64 mmuidmap(uintptr phys, int mbs)
72 for (fpa = phys; mbs-- > 0; fpa += MiB)
73 l1[L1X(fpa)] = fpa|Dom0|L1AP(Krw)|Section;
91 * map high vectors to start of dram, but only 4K, not 1MB.
93 pa -= MACHSIZE+2*1024;
96 /* vectors step on u-boot, but so do page tables */
97 l2[L2X(HVECTORS)] = PHYSDRAM|L2AP(Krw)|Small;
98 l1[L1X(HVECTORS)] = pa|Dom0|Coarse; /* vectors -> ttb-machsize-2k */
100 /* double map vectors at virtual 0 so reset will see them */
104 l2[L2X(0)] = PHYSDRAM|L2AP(Krw)|Small;
105 l1[L1X(0)] = pa|Dom0|Coarse;
108 * set up L2 ptes for PHYSIO (i/o registers), with smaller pages to
109 * enable user-mode access to a few devices.
113 /* identity map by default */
114 for (i = 0; i < 1024/4; i++)
115 l2[L2X(VIRTIO + i*BY2PG)] = (PHYSIO + i*BY2PG)|L2AP(Krw)|Small;
119 * rest is to let rae experiment with the crypto hardware
121 /* access to cycle counter */
122 l2[L2X(soc.clock)] = soc.clock | L2AP(Urw)|Small;
123 /* cesa registers; also visible in user space */
124 for (i = 0; i < 16; i++)
125 l2[L2X(soc.cesa + i*BY2PG)] = (soc.cesa + i*BY2PG) |
127 /* crypto sram; remapped to unused space and visible in user space */
128 l2[L2X(PHYSIO + 0xa0000)] = PHYSCESASRAM | L2AP(Urw)|Small;
129 /* 64k of scratch dram */
130 for (i = 0; i < 16; i++)
131 l2[L2X(PHYSIO + 0xb0000 + i*BY2PG)] =
132 (PADDR((uintptr)sandbox & ~(BY2PG-1)) + i*BY2PG) |
136 l1[L1X(VIRTIO)] = pa|Dom0|Coarse;
144 // mmudump(l1); /* DEBUG. too early to print */
148 mmul2empty(Proc* proc, int clear)
155 for(page = *l2; page != nil; page = page->next){
157 memset((void*)page->va, 0, BY2PG);
158 l1[page->daddr] = Fault;
161 *l2 = proc->mmul2cache;
162 proc->mmul2cache = proc->mmul2;
169 #ifdef notdef /* there's a bug in here */
172 /* clean out any user mappings still in l1 */
173 if(m->mmul1lo > L1lo){
175 m->mmul1[L1lo] = Fault;
177 memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
180 if(m->mmul1hi < L1hi){
181 l1 = &m->mmul1[m->mmul1hi];
182 if((L1hi - m->mmul1hi) == 1)
185 memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
189 memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
194 mmuswitch(Proc* proc)
200 /* do kprocs get here and if so, do they need to? */
201 if(m->mmupid == proc->pid && !proc->newtlb)
203 m->mmupid = proc->pid;
205 /* write back dirty and invalidate l1 caches */
215 /* move in new map */
217 for(page = proc->mmul2; page != nil; page = page->next){
219 l1[x] = PPN(page->pa)|Dom0|Coarse;
220 /* know here that L1lo < x < L1hi */
221 if(x+1 - m->mmul1lo < m->mmul1hi - x)
227 /* make sure map is in memory */
228 /* could be smarter about how much? */
229 cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
230 l2cacheuwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
232 /* lose any possible stale tlb entries */
236 //print("mmuswitch l1lo %d l1hi %d %d\n",
237 // m->mmul1lo, m->mmul1hi, proc->kp);
253 mmurelease(Proc* proc)
257 /* write back dirty and invalidate l1 caches */
261 for(page = proc->mmul2cache; page != nil; page = next){
264 panic("mmurelease: page->ref %lud", page->ref);
267 if(proc->mmul2cache != nil)
269 proc->mmul2cache = nil;
273 /* make sure map is in memory */
274 /* could be smarter about how much? */
275 cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
276 l2cacheuwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
278 /* lose any possible stale tlb entries */
283 putmmu(uintptr va, uintptr pa, Page* page)
291 //print("putmmu(%#p, %#p, %#p) ", va, pa, page->pa);
292 //print("mmul1 %#p l1 %#p *l1 %#ux x %d pid %d\n",
293 // m->mmul1, l1, *l1, x, up->pid);
295 /* wasteful - l2 pages only have 256 entries - fix */
296 if(up->mmul2cache == nil){
297 /* auxpg since we don't need much? memset if so */
298 pg = newpage(1, 0, 0);
299 pg->va = VA(kmap(pg));
303 up->mmul2cache = pg->next;
304 memset((void*)pg->va, 0, BY2PG);
307 pg->next = up->mmul2;
310 /* force l2 page to memory */
311 cachedwbse((void *)pg->va, BY2PG);
312 l2cacheuwbse((void *)pg->va, BY2PG);
314 *l1 = PPN(pg->pa)|Dom0|Coarse;
315 cachedwbse(l1, sizeof *l1);
316 l2cacheuwbse(l1, sizeof *l1);
317 //print("l1 %#p *l1 %#ux x %d pid %d\n", l1, *l1, x, up->pid);
319 if(x >= m->mmul1lo && x < m->mmul1hi){
320 if(x+1 - m->mmul1lo < m->mmul1hi - x)
326 pte = (void*)KADDR(PPN(*l1));
327 //print("pte %#p index %ld %#ux\n", pte, L2X(va), *(pte+L2X(va)));
329 /* protection bits are
332 * PTEWRITE|PTEUNCACHED|PTEVALID;
335 if(!(pa & PTEUNCACHED))
336 x |= Cached|Buffered;
341 pte[L2X(va)] = PPN(pa)|x;
342 cachedwbse(&pte[L2X(va)], sizeof pte[0]);
343 l2cacheuwbse(&pte[L2X(va)], sizeof pte[0]);
345 /* clear out the current entry */
346 mmuinvalidateaddr(PPN(va));
349 * write back dirty entries - we need this because pio() in
350 * fault.c is writing via a different virt addr and won't clean
351 * its changes out of the dcache. Page coloring doesn't work
352 * on this mmu because the l1 virtual cache is set associative
353 * rather than direct mapped.
360 //print("putmmu %#p %#p %#p\n", va, pa, PPN(pa)|x);
364 mmuuncache(void* v, usize size)
371 * Simple helper for ucalloc().
372 * Uncache a Section, must already be
376 assert(!(va & (1*MiB-1)) && size == 1*MiB);
380 if((*pte & (Fine|Section|Coarse)) != Section)
382 *pte &= ~(Cached|Buffered);
383 mmuinvalidateaddr(va);
385 l2cacheuwbse(pte, 4);
391 mmukmap(uintptr va, uintptr pa, usize size)
399 assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
405 *pte = pa|Dom0|L1AP(Krw)|Section;
406 mmuinvalidateaddr(va);
408 l2cacheuwbse(pte, 4);
414 mmukunmap(uintptr va, uintptr pa, usize size)
422 assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
426 if(*pte != (pa|Dom0|L1AP(Krw)|Section))
429 mmuinvalidateaddr(va);
431 l2cacheuwbse(pte, 4);
437 * Return the number of bytes that can be accessed via KADDR(pa).
438 * If pa is not a valid argument to KADDR, return 0.
443 if(pa < PHYSDRAM + 512*MiB) /* assumes PHYSDRAM is 0 */
444 return PHYSDRAM + 512*MiB - pa;
450 vmap(uintptr pa, usize size)
456 * XXX - replace with new vm stuff.
457 * Crock after crock - the first 4MB is mapped with 2MB pages
458 * so catch that and return good values because the current mmukmap
462 return (void*)(kseg0|pa);
468 size = PGROUND(size);
471 pae = mmukmap(va, pa, size);
472 if(pae == 0 || pae-size != pa)
473 panic("vmap(%#p, %ld) called from %#p: mmukmap fails %#p",
474 pa+o, osize, getcallerpc(&pa), pae);
476 return (void*)(va+o);
481 vunmap(void* v, usize size)
484 * XXX - replace with new vm stuff.
485 * Can't do this until do real vmap for all space that
486 * might be used, e.g. stuff below 1MB which is currently
487 * mapped automagically at boot but that isn't used (or
488 * at least shouldn't be used) by the kernel.
489 upafree(PADDR(v), size);
496 * Everything is in domain 0;
497 * domain 0 access bits in the DAC register are set
498 * to Client, which means access is controlled by the
499 * permission values set in the PTE.
501 * L1 access control for the kernel is set to 1 (RW,
502 * no user mode access);
503 * L2 access control for the kernel is set to 1 (ditto)
505 * L1 user mode access is never set;
506 * L2 access control for user mode is set to either
507 * 2 (RO) or 3 (RW) depending on whether text or data,
509 * (To get kernel RO set AP to 0 and S bit in control
511 * Coarse L1 page-tables are used. They have 256 entries
512 * and so consume 1024 bytes per table.
513 * Small L2 page-tables are used. They have 1024 entries
514 * and so consume 4096 bytes per table.
516 * 4KiB. That's the size of 1) a page, 2) the
517 * size allocated for an L2 page-table page (note only 1KiB
518 * is needed per L2 page - to be dealt with later) and
519 * 3) the size of the area in L1 needed to hold the PTEs
520 * to map 1GiB of user space (0 -> 0x3fffffff, 1024 entries).