if(*table & PTESIZE)
panic("mmuwalk2: va %luX entry %luX", va, *table);
if(!(*table & PTEVALID)){
- /*
- * Have to call low-level allocator from
- * memory.c if we haven't set up the xalloc
- * tables yet.
- */
- if(conf.mem[0].npage != 0)
- map = xspanalloc(BY2PG, BY2PG, 0);
- else
- map = rampage();
- if(map == nil)
- panic("mmuwalk xspanalloc failed");
+ map = rampage();
+ memset(map, 0, BY2PG);
*table = PADDR(map)|PTEWRITE|PTEVALID;
}
table = KADDR(PPN(*table));
static int findhole(ulong *a, int n, int count);
static ulong vmapalloc(ulong size);
+static int pdbmap(ulong *, ulong, ulong, int);
static void pdbunmap(ulong*, ulong, int);
/*
void
vunmap(void *v, int size)
{
- int i;
ulong va, o;
- Mach *nm;
- Proc *p;
/*
* might not be aligned
putcr3(PADDR(MACHP(0)->pdb));
return;
}
- for(i=0; i<conf.nproc; i++){
- p = proctab(i);
- if(p->state == Dead)
- continue;
- if(p != up)
- p->newtlb = 1;
- }
- for(i=0; i<conf.nmach; i++){
- nm = MACHP(i);
- if(nm != m)
- nm->flushmmu = 1;
- }
+ procflushothers();
flushmmu();
- for(i=0; i<conf.nmach; i++){
- nm = MACHP(i);
- if(nm != m)
- while(active.machs[nm->machno] && nm->flushmmu)
- ;
- }
}
/*
* Add kernel mappings for pa -> va for a section of size bytes.
*/
-int
+static int
pdbmap(ulong *pdb, ulong pa, ulong va, int size)
{
int pse;
}
}
+void
+pmap(ulong pa, ulong va, int size)
+{
+ pdbmap(MACHP(0)->pdb, pa, va, size);
+}
+
+void
+punmap(ulong va, int size)
+{
+ pdbunmap(MACHP(0)->pdb, va, size);
+ mmuflushtlb(PADDR(m->pdb));
+}
+
/*
* Handle a fault by bringing vmap up to date.
* Only copy pdb entries and they never go away,