2 #include "../port/lib.h"
9 #define FEXT(d, o, w) (((d)>>(o)) & ((1<<(w))-1))
10 #define L1X(va) FEXT((va), 20, 12)
11 #define L2X(va) FEXT((va), 12, 8)
14 L1lo = UZERO/MiB, /* L1X(UZERO)? */
15 L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
28 * map all of ram at KZERO
31 for(pa = PHYSDRAM; pa < PHYSDRAM+DRAMSIZE; pa += MiB){
32 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|Cached|Buffered;
37 * identity map first MB of ram so mmu can be enabled
39 l1[L1X(PHYSDRAM)] = PHYSDRAM|Dom0|L1AP(Krw)|Section|Cached|Buffered;
45 for(pa = PHYSIO; pa < PHYSIO+IOSIZE; pa += MiB){
46 l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section;
51 * double map exception vectors at top of virtual memory
54 l1[L1X(va)] = (uintptr)l2|Dom0|Coarse;
55 l2[L2X(va)] = PHYSDRAM|L2AP(Krw)|Small;
67 * undo identity map of first MB of ram
69 l1[L1X(PHYSDRAM)] = 0;
70 cachedwbse(&l1[L1X(PHYSDRAM)], sizeof(PTE));
75 mmul2empty(Proc* proc, int clear)
82 for(page = *l2; page != nil; page = page->next){
84 memset(UINT2PTR(page->va), 0, BY2PG);
85 l1[page->daddr] = Fault;
88 *l2 = proc->mmul2cache;
89 proc->mmul2cache = proc->mmul2;
97 /* there's a bug in here */
100 /* clean out any user mappings still in l1 */
101 if(m->mmul1lo > L1lo){
103 m->mmul1[L1lo] = Fault;
105 memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
108 if(m->mmul1hi < L1hi){
109 l1 = &m->mmul1[m->mmul1hi];
110 if((L1hi - m->mmul1hi) == 1)
113 memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
117 memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
122 mmuswitch(Proc* proc)
128 /* do kprocs get here and if so, do they need to? */
129 if(m->mmupid == proc->pid && !proc->newtlb)
131 m->mmupid = proc->pid;
133 /* write back dirty and invalidate l1 caches */
143 /* move in new map */
145 for(page = proc->mmul2; page != nil; page = page->next){
147 l1[x] = PPN(page->pa)|Dom0|Coarse;
148 /* know here that L1lo < x < L1hi */
149 if(x+1 - m->mmul1lo < m->mmul1hi - x)
155 /* make sure map is in memory */
156 /* could be smarter about how much? */
157 cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
159 /* lose any possible stale tlb entries */
175 mmurelease(Proc* proc)
179 /* write back dirty and invalidate l1 caches */
183 for(page = proc->mmul2cache; page != nil; page = next){
186 panic("mmurelease: page->ref %d", page->ref);
189 if(proc->mmul2cache && palloc.r.p)
191 proc->mmul2cache = nil;
195 /* make sure map is in memory */
196 /* could be smarter about how much? */
197 cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
199 /* lose any possible stale tlb entries */
204 putmmu(uintptr va, uintptr pa, Page* page)
213 /* wasteful - l2 pages only have 256 entries - fix */
214 if(up->mmul2cache == nil){
215 /* auxpg since we don't need much? memset if so */
216 pg = newpage(1, 0, 0);
217 pg->va = VA(kmap(pg));
221 up->mmul2cache = pg->next;
222 memset(UINT2PTR(pg->va), 0, BY2PG);
225 pg->next = up->mmul2;
228 /* force l2 page to memory */
229 cachedwbse((void *)pg->va, BY2PG);
231 *l1 = PPN(pg->pa)|Dom0|Coarse;
232 cachedwbse(l1, sizeof *l1);
234 if(x >= m->mmul1lo && x < m->mmul1hi){
235 if(x+1 - m->mmul1lo < m->mmul1hi - x)
241 pte = UINT2PTR(KADDR(PPN(*l1)));
243 /* protection bits are
246 * PTEWRITE|PTEUNCACHED|PTEVALID;
249 if(!(pa & PTEUNCACHED))
250 x |= Cached|Buffered;
255 pte[L2X(va)] = PPN(pa)|x;
256 cachedwbse(&pte[L2X(va)], sizeof pte[0]);
258 /* clear out the current entry */
259 mmuinvalidateaddr(PPN(va));
261 /* write back dirty entries - we need this because the pio() in
262 * fault.c is writing via a different virt addr and won't clean
263 * its changes out of the dcache. Page coloring doesn't work
264 * on this mmu because the virtual cache is set associative
265 * rather than direct mapped.
268 if(page->cachectl[0] == PG_TXTFLUSH){
269 /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
271 page->cachectl[0] = PG_NOFLUSH;
273 checkmmu(va, PPN(pa));
277 * Return the number of bytes that can be accessed via KADDR(pa).
278 * If pa is not a valid argument to KADDR, return 0.
283 if(pa < PHYSDRAM + memsize) /* assumes PHYSDRAM is 0 */
284 return PHYSDRAM + memsize - pa;
289 mmukmap(uintptr va, uintptr pa, usize size)
295 assert((va & (MiB-1)) == 0);
299 pte = pte0 = &m->mmul1[L1X(va)];
300 for(n = 0; n < size; n += MiB)
304 for(n = 0; n < size; n += MiB){
305 *pte++ = (pa+n)|Dom0|L1AP(Krw)|Section;
306 mmuinvalidateaddr(va+n);
308 cachedwbse(pte0, pte - pte0);
314 checkmmu(uintptr va, uintptr pa)