void
mmu0init(uintptr *l1)
{
- uintptr va, pa, pe;
-
- /* 0 identity map */
- pe = PHYSDRAM + soc.dramsize;
- if(pe > (uintptr)-KZERO)
- pe = (uintptr)-KZERO;
-
- for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(1))
- l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
- | PTEKERNEL | PTESH(SHARE_INNER);
- if(PTLEVELS > 2)
- for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(2))
- l1[PTL1X(pa, 2)] = (uintptr)&l1[L1TABLEX(pa, 1)] | PTEVALID | PTETABLE;
- if(PTLEVELS > 3)
- for(pa = PHYSDRAM; pa < pe; pa += PGLSZ(3))
- l1[PTL1X(pa, 3)] = (uintptr)&l1[L1TABLEX(pa, 2)] | PTEVALID | PTETABLE;
+ uintptr va, pa, pe, attr;
/* KZERO */
- for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
- l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
- | PTEKERNEL | PTESH(SHARE_INNER);
- if(PTLEVELS > 2)
- for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
- l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
- if(PTLEVELS > 3)
- for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
- l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
+ attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTESH(SHARE_INNER);
+ pe = -KZERO;
+ for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
+ l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
+ l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | attr;
+ }
/* VIRTIO */
- pe = -VIRTIO + soc.physio;
- for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
- l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
- | PTEKERNEL | PTESH(SHARE_OUTER) | PTEDEVICE;
+ attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
+ pe = soc.physio + soc.iosize;
+ for(pa = soc.physio, va = soc.virtio; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
+ if(((pa|va) & PGLSZ(1)-1) != 0){
+ l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
+ for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
+ assert(l1[PTLX(va, 0)] == 0);
+ l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
+ }
+ break;
+ }
+ l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
+ }
+
+ /* ARMLOCAL */
+ attr = PTEWRITE | PTEAF | PTEKERNEL | PTEUXN | PTEPXN | PTESH(SHARE_OUTER) | PTEDEVICE;
+ pe = soc.armlocal + MB;
+ for(pa = soc.armlocal, va = ARMLOCAL; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
+ if(((pa|va) & PGLSZ(1)-1) != 0){
+ l1[PTL1X(va, 1)] = (uintptr)l1 | PTEVALID | PTETABLE;
+ for(; pa < pe && ((va|pa) & PGLSZ(1)-1) != 0; pa += PGLSZ(0), va += PGLSZ(0)){
+ assert(l1[PTLX(va, 0)] == 0);
+ l1[PTLX(va, 0)] = pa | PTEVALID | PTEPAGE | attr;
+ }
+ break;
+ }
+ l1[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | attr;
+ }
+
if(PTLEVELS > 2)
- for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
+ for(va = KSEG0; va != 0; va += PGLSZ(2))
l1[PTL1X(va, 2)] = (uintptr)&l1[L1TABLEX(va, 1)] | PTEVALID | PTETABLE;
if(PTLEVELS > 3)
- for(pa = soc.physio, va = VIRTIO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
+ for(va = KSEG0; va != 0; va += PGLSZ(3))
l1[PTL1X(va, 3)] = (uintptr)&l1[L1TABLEX(va, 2)] | PTEVALID | PTETABLE;
}
{
uintptr va, pa, pe;
- pe = PHYSDRAM + soc.dramsize;
- if(pe > (uintptr)-KZERO)
- pe = (uintptr)-KZERO;
-
- for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
+ pe = -KZERO;
+ for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
if(PTL1X(pa, 1) != PTL1X(va, 1))
l1[PTL1X(pa, 1)] = 0;
- }
if(PTLEVELS > 2)
- for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2)){
+ for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
if(PTL1X(pa, 2) != PTL1X(va, 2))
l1[PTL1X(pa, 2)] = 0;
- }
if(PTLEVELS > 3)
- for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3)){
+ for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
if(PTL1X(pa, 3) != PTL1X(va, 3))
l1[PTL1X(pa, 3)] = 0;
- }
}
void
{
uintptr va, pa, pe;
- mmuswitch(nil);
- flushtlb();
-
pe = PHYSDRAM + soc.dramsize;
if(pe > (uintptr)-KZERO)
pe = (uintptr)-KZERO;
-
- for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1)){
- if(PTL1X(pa, 1) != PTL1X(va, 1))
- l1[PTL1X(pa, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
- | PTEKERNEL | PTESH(SHARE_INNER);
- }
+ for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(1), va += PGLSZ(1))
+ l1[PTL1X(pa, 1)] = l1[PTL1X(va, 1)];
if(PTLEVELS > 2)
- for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2)){
- if(PTL1X(pa, 2) != PTL1X(va, 2))
- l1[PTL1X(pa, 2)] = PADDR(&l1[L1TABLEX(pa, 1)]) | PTEVALID | PTETABLE;
- }
+ for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(2), va += PGLSZ(2))
+ l1[PTL1X(pa, 2)] = l1[PTL1X(va, 2)];
if(PTLEVELS > 3)
- for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3)){
- if(PTL1X(pa, 3) != PTL1X(va, 3))
- l1[PTL1X(pa, 3)] = PADDR(&l1[L1TABLEX(pa, 2)]) | PTEVALID | PTETABLE;
- }
+ for(pa = PHYSDRAM, va = KZERO; pa < pe; pa += PGLSZ(3), va += PGLSZ(3))
+ l1[PTL1X(pa, 3)] = l1[PTL1X(va, 3)];
setttbr(PADDR(&l1[L1TABLEX(0, PTLEVELS-1)]));
+ flushtlb();
}
void
mmu1init(void)
{
- m->mmul1 = mallocalign(L1SIZE+L1TOPSIZE, BY2PG, L1SIZE, 0);
- if(m->mmul1 == nil)
- panic("mmu1init: no memory for mmul1");
- memset(m->mmul1, 0, L1SIZE+L1TOPSIZE);
+ m->mmutop = mallocalign(L1TOPSIZE, BY2PG, 0, 0);
+ if(m->mmutop == nil)
+ panic("mmu1init: no memory for mmutop");
+ memset(m->mmutop, 0, L1TOPSIZE);
mmuswitch(nil);
}
+/* KZERO maps the first 1GB of ram */
uintptr
paddr(void *va)
{
return nil;
}
-void
-kmapinval(void)
+/* KMAP maps all of ram (up to 4GB) */
+static void*
+kmapaddr(uintptr pa)
{
+ if(pa < (uintptr)-KZERO)
+ return (void*)(pa + KZERO);
+ return (void*)(pa + KMAP);
}
KMap*
kmap(Page *p)
{
- return kaddr(p->pa);
+ return kmapaddr(p->pa);
}
void
{
}
+void
+kmapinval(void)
+{
+}
+
+#define INITMAP (ROUND((uintptr)end + BY2PG, PGLSZ(1))-KZERO)
+
+static void*
+rampage(void)
+{
+ uintptr pa;
+
+ if(conf.npage)
+ return mallocalign(BY2PG, BY2PG, 0, 0);
+
+ pa = conf.mem[0].base;
+ assert((pa % BY2PG) == 0);
+ assert(pa < INITMAP);
+ conf.mem[0].base += BY2PG;
+ return KADDR(pa);
+}
+
+static void
+l1map(uintptr va, uintptr pa, uintptr pe, uintptr attr)
+{
+ uintptr *l1, *l0;
+
+ assert(pa < pe);
+
+ va &= -BY2PG;
+ pa &= -BY2PG;
+ pe = PGROUND(pe);
+
+ attr |= PTEKERNEL | PTEAF;
+
+ l1 = (uintptr*)L1;
+
+ while(pa < pe){
+ if(l1[PTL1X(va, 1)] == 0 && (pe-pa) >= PGLSZ(1) && ((va|pa) & PGLSZ(1)-1) == 0){
+ l1[PTL1X(va, 1)] = PTEVALID | PTEBLOCK | pa | attr;
+ va += PGLSZ(1);
+ pa += PGLSZ(1);
+ continue;
+ }
+ if(l1[PTL1X(va, 1)] & PTEVALID) {
+ assert((l1[PTL1X(va, 1)] & PTETABLE) == PTETABLE);
+ l0 = KADDR(l1[PTL1X(va, 1)] & -PGLSZ(0));
+ } else {
+ l0 = rampage();
+ memset(l0, 0, BY2PG);
+ l1[PTL1X(va, 1)] = PTEVALID | PTETABLE | PADDR(l0);
+ }
+ assert(l0[PTLX(va, 0)] == 0);
+ l0[PTLX(va, 0)] = PTEVALID | PTEPAGE | pa | attr;
+ va += BY2PG;
+ pa += BY2PG;
+ }
+}
+
+static void
+kmapram(uintptr base, uintptr limit)
+{
+ if(base < (uintptr)-KZERO && limit > (uintptr)-KZERO){
+ kmapram(base, (uintptr)-KZERO);
+ kmapram((uintptr)-KZERO, limit);
+ return;
+ }
+ if(base < INITMAP)
+ base = INITMAP;
+ if(base >= limit || limit <= INITMAP)
+ return;
+
+ l1map((uintptr)kmapaddr(base), base, limit,
+ PTEWRITE | PTEPXN | PTEUXN | PTESH(SHARE_INNER));
+}
+
+void
+meminit(void)
+{
+ uvlong memsize = 0;
+ uintptr pa, va;
+ char *p, *e;
+ int i;
+
+ if(p = getconf("*maxmem")){
+ memsize = strtoull(p, &e, 0) - PHYSDRAM;
+ for(i = 1; i < nelem(conf.mem); i++){
+ if(e <= p || *e != ' ')
+ break;
+ p = ++e;
+ conf.mem[i].base = strtoull(p, &e, 0);
+ if(e <= p || *e != ' ')
+ break;
+ p = ++e;
+ conf.mem[i].limit = strtoull(p, &e, 0);
+ }
+ }
+
+ if (memsize < INITMAP) /* sanity */
+ memsize = INITMAP;
+
+ getramsize(&conf.mem[0]);
+ if(conf.mem[0].limit == 0){
+ conf.mem[0].base = PHYSDRAM;
+ conf.mem[0].limit = PHYSDRAM + memsize;
+ }else if(p != nil)
+ conf.mem[0].limit = conf.mem[0].base + memsize;
+
+ /*
+ * now we know the real memory regions, unmap
+ * everything above INITMAP and map again with
+ * the proper sizes.
+ */
+ coherence();
+ for(va = INITMAP+KZERO; va != 0; va += PGLSZ(1)){
+ pa = va-KZERO;
+ ((uintptr*)L1)[PTL1X(pa, 1)] = 0;
+ ((uintptr*)L1)[PTL1X(va, 1)] = 0;
+ }
+ flushtlb();
+
+ pa = PGROUND((uintptr)end)-KZERO;
+ for(i=0; i<nelem(conf.mem); i++){
+ if(conf.mem[i].limit <= conf.mem[i].base
+ || conf.mem[i].base >= PHYSDRAM + soc.dramsize){
+ conf.mem[i].base = conf.mem[i].limit = 0;
+ continue;
+ }
+ if(conf.mem[i].limit > PHYSDRAM + soc.dramsize)
+ conf.mem[i].limit = PHYSDRAM + soc.dramsize;
+
+ /* take kernel out of allocatable space */
+ if(pa > conf.mem[i].base && pa < conf.mem[i].limit)
+ conf.mem[i].base = pa;
+
+ kmapram(conf.mem[i].base, conf.mem[i].limit);
+ }
+ flushtlb();
+
+ /* rampage() is now done, count up the pages for each bank */
+ for(i=0; i<nelem(conf.mem); i++)
+ conf.mem[i].npage = (conf.mem[i].limit - conf.mem[i].base)/BY2PG;
+}
+
uintptr
mmukmap(uintptr va, uintptr pa, usize size)
{
- uintptr a, pe, off, attr;
+ uintptr attr, off;
if(va == 0)
return 0;
+ off = pa & BY2PG-1;
+
attr = va & PTEMA(7);
- va &= -PGLSZ(1);
- off = pa % PGLSZ(1);
- a = va + off;
- pe = (pa + size + (PGLSZ(1)-1)) & -PGLSZ(1);
- while(pa < pe){
- ((uintptr*)L1)[PTL1X(va, 1)] = pa | PTEVALID | PTEBLOCK | PTEWRITE | PTEAF
- | PTEKERNEL | PTESH(SHARE_OUTER) | attr;
- pa += PGLSZ(1);
- va += PGLSZ(1);
- }
+ attr |= PTEWRITE | PTEUXN | PTEPXN | PTESH(SHARE_OUTER);
+
+ va &= -BY2PG;
+ pa &= -BY2PG;
+
+ l1map(va, pa, pa + off + size, attr);
flushtlb();
- return a;
+
+ return va + off;
+}
+
+void*
+vmap(uintptr pa, int size)
+{
+ static uintptr base = VMAP;
+ uintptr pe = pa + size;
+ uintptr va;
+
+ va = base;
+ base += PGROUND(pe) - (pa & -BY2PG);
+
+ return (void*)mmukmap(va | PTEDEVICE, pa, size);
+}
+
+void
+vunmap(void *, int)
+{
}
static uintptr*
int i, x;
x = PTLX(va, PTLEVELS-1);
- table = &m->mmul1[L1TABLEX(va, PTLEVELS-1)];
+ table = m->mmutop;
for(i = PTLEVELS-2; i >= level; i--){
pte = table[x];
if(pte & PTEVALID) {
if(pte & (0xFFFFULL<<48))
iprint("strange pte %#p va %#p\n", pte, va);
pte &= ~(0xFFFFULL<<48 | BY2PG-1);
- table = KADDR(pte);
} else {
pg = up->mmufree;
if(pg == nil)
if((pg->next = up->mmuhead[i+1]) == nil)
up->mmutail[i+1] = pg;
up->mmuhead[i+1] = pg;
- memset(KADDR(pg->pa), 0, BY2PG);
+ pte = pg->pa;
+ memset(kmapaddr(pte), 0, BY2PG);
coherence();
- table[x] = pg->pa | PTEVALID | PTETABLE;
- table = KADDR(pg->pa);
+ table[x] = pte | PTEVALID | PTETABLE;
}
+ table = kmapaddr(pte);
x = PTLX(va, (uintptr)i);
}
return &table[x];
{
/*
* Prevent the following scenario:
- * pX sleeps on cpuA, leaving its page tables in mmul1
+ * pX sleeps on cpuA, leaving its page tables in mmutop
* pX wakes up on cpuB, and exits, freeing its page tables
* pY on cpuB allocates a freed page table page and overwrites with data
* cpuA takes an interrupt, and is now running with bad page tables
* In theory this shouldn't hurt because only user address space tables
- * are affected, and mmuswitch will clear mmul1 before a user process is
+ * are affected, and mmuswitch will clear mmutop before a user process is
* dispatched. But empirically it correlates with weird problems, eg
* resetting of the core clock at 0x4000001C which confuses local timers.
*/
s = splhi();
while((pte = mmuwalk(va, 0)) == nil){
spllo();
- assert(up->mmufree == nil);
up->mmufree = newpage(0, nil, 0);
splhi();
}
flushasidvall((uvlong)up->asid<<48 | va>>12);
else
flushasidva((uvlong)up->asid<<48 | va>>12);
- *pte = pa | PTEPAGE | PTEUSER | PTENG | PTEAF | PTESH(SHARE_INNER);
+ *pte = pa | PTEPAGE | PTEUSER | PTEPXN | PTENG | PTEAF |
+ (((pa & PTEMA(7)) == PTECACHED)? PTESH(SHARE_INNER): PTESH(SHARE_OUTER));
if(pg->txtflush & (1UL<<m->machno)){
/* pio() sets PG_TXTFLUSH whenever a text pg has been written */
- cachedwbinvse((void*)KADDR(pg->pa), BY2PG);
+ cachedwbinvse(kmap(pg), BY2PG);
cacheiinvse((void*)va, BY2PG);
pg->txtflush &= ~(1UL<<m->machno);
}
Page *t;
for(va = UZERO; va < USTKTOP; va += PGLSZ(PTLEVELS-1))
- m->mmul1[PTL1X(va, PTLEVELS-1)] = 0;
+ m->mmutop[PTLX(va, PTLEVELS-1)] = 0;
if(p == nil){
- setttbr(PADDR(&m->mmul1[L1TABLEX(0, PTLEVELS-1)]));
+ setttbr(PADDR(m->mmutop));
return;
}
p->newtlb = 0;
}
- for(t = p->mmuhead[PTLEVELS-1]; t != nil; t = t->next){
- va = t->va;
- m->mmul1[PTL1X(va, PTLEVELS-1)] = t->pa | PTEVALID | PTETABLE;
- }
-
if(allocasid(p))
flushasid((uvlong)p->asid<<48);
- setttbr((uvlong)p->asid<<48 | PADDR(&m->mmul1[L1TABLEX(0, PTLEVELS-1)]));
+ setttbr((uvlong)p->asid<<48 | PADDR(m->mmutop));
+
+ for(t = p->mmuhead[PTLEVELS-1]; t != nil; t = t->next){
+ va = t->va;
+ m->mmutop[PTLX(va, PTLEVELS-1)] = t->pa | PTEVALID | PTETABLE;
+ }
}
void