[KESEG16] EXEC16SEGM(0), /* kernel code 16-bit */
};
-static int didmmuinit;
static void taskswitch(ulong, ulong);
static void memglobal(void);
#define VPTX(va) (((ulong)(va))>>12)
#define vpd (vpt+VPTX(VPT))
-void
-mmuinit0(void)
-{
- memmove(m->gdt, gdt, sizeof gdt);
-}
+enum {
+ /* PAT entry used for write combining */
+ PATWC = 7,
+};
void
mmuinit(void)
{
ulong x, *p;
ushort ptr[3];
-
- didmmuinit = 1;
+ vlong v;
if(0) print("vpt=%#.8ux vpd=%#p kmap=%#.8ux\n",
VPT, vpd, KMAP);
memglobal();
m->pdb[PDX(VPT)] = PADDR(m->pdb)|PTEWRITE|PTEVALID;
- m->tss = malloc(sizeof(Tss));
- memset(m->tss, 0, sizeof(Tss));
+ m->tss = mallocz(sizeof(Tss), 1);
+ if(m->tss == nil)
+ panic("mmuinit: no memory for Tss");
m->tss->iomap = 0xDFFF<<16;
/*
taskswitch(PADDR(m->pdb), (ulong)m + BY2PG);
ltr(TSSSEL);
+
+ /* IA32_PAT write combining */
+ if((MACHP(0)->cpuiddx & Pat) != 0
+ && rdmsr(0x277, &v) != -1){
+ v &= ~(255LL<<(PATWC*8));
+ v |= 1LL<<(PATWC*8); /* WC */
+ wrmsr(0x277, v);
+ }
}
/*
void
flushpg(ulong va)
{
- if(X86FAMILY(m->cpuidax) >= 4)
+ if(m->cpuidfamily >= 4)
invlpg(va);
else
putcr3(getcr3());
mmuswitch(Proc* proc)
{
ulong *pdb;
+ ulong x;
+ int n;
if(proc->newtlb){
mmuptefree(proc);
proc->newtlb = 0;
}
- if(proc->mmupdb){
+ if(proc->mmupdb != nil){
pdb = tmpmap(proc->mmupdb);
pdb[PDX(MACHADDR)] = m->pdb[PDX(MACHADDR)];
tmpunmap(pdb);
taskswitch(proc->mmupdb->pa, (ulong)(proc->kstack+KSTACK));
}else
taskswitch(PADDR(m->pdb), (ulong)(proc->kstack+KSTACK));
+
+ memmove(&m->gdt[PROCSEG0], proc->gdt, sizeof(proc->gdt));
+ if((x = (ulong)proc->ldt) && (n = proc->nldt) > 0){
+ m->gdt[LDTSEG].d0 = (x<<16)|((n * sizeof(Segdesc)) - 1);
+ m->gdt[LDTSEG].d1 = (x&0xFF000000)|((x>>16)&0xFF)|SEGLDT|SEGPL(0)|SEGP;
+ lldt(LDTSEL);
+ } else
+ lldt(NULLSEL);
}
/*
if(islo())
panic("mmurelease: islo");
taskswitch(PADDR(m->pdb), (ulong)m + BY2PG);
- if(proc->kmaptable){
+ if(proc->kmaptable != nil){
if(proc->mmupdb == nil)
panic("mmurelease: no mmupdb");
- if(--proc->kmaptable->ref)
- panic("mmurelease: kmap ref %d", proc->kmaptable->ref);
+ if(--proc->kmaptable->ref != 0)
+ panic("mmurelease: kmap ref %ld", proc->kmaptable->ref);
if(proc->nkmap)
panic("mmurelease: nkmap %d", proc->nkmap);
/*
* move kmaptable to free list.
*/
pagechainhead(proc->kmaptable);
- proc->kmaptable = 0;
+ proc->kmaptable = nil;
}
- if(proc->mmupdb){
+ if(proc->mmupdb != nil){
mmuptefree(proc);
mmupdbfree(proc, proc->mmupdb);
- proc->mmupdb = 0;
+ proc->mmupdb = nil;
}
- for(page = proc->mmufree; page; page = next){
+ for(page = proc->mmufree; page != nil; page = next){
next = page->next;
- if(--page->ref)
- panic("mmurelease: page->ref %d", page->ref);
+ if(--page->ref != 0)
+ panic("mmurelease: page->ref %ld", page->ref);
pagechainhead(page);
}
- if(proc->mmufree && palloc.r.p)
- wakeup(&palloc.r);
- proc->mmufree = 0;
+ if(proc->mmufree != nil)
+ pagechaindone();
+ proc->mmufree = nil;
+ if(proc->ldt != nil){
+ free(proc->ldt);
+ proc->ldt = nil;
+ proc->nldt = 0;
+ }
}
/*
* Update the mmu in response to a user fault. pa may have PTEWRITE set.
*/
void
-putmmu(ulong va, ulong pa, Page*)
+putmmu(uintptr va, uintptr pa, Page*)
{
int old, s;
Page *page;
* Error checking only.
*/
void
-checkmmu(ulong va, ulong pa)
+checkmmu(uintptr va, uintptr pa)
{
if(up->mmupdb == 0)
return;
if(!(vpd[PDX(va)]&PTEVALID) || !(vpt[VPTX(va)]&PTEVALID))
return;
if(PPN(vpt[VPTX(va)]) != pa)
- print("%ld %s: va=%#08lux pa=%#08lux pte=%#08lux\n",
+ print("%ld %s: va=%#p pa=%#p pte=%#08lux\n",
up->pid, up->text,
va, pa, vpt[VPTX(va)]);
}
if(*table & PTESIZE)
panic("mmuwalk2: va %luX entry %luX", va, *table);
if(!(*table & PTEVALID)){
- /*
- * Have to call low-level allocator from
- * memory.c if we haven't set up the xalloc
- * tables yet.
- */
- if(didmmuinit)
- map = xspanalloc(BY2PG, BY2PG, 0);
- else
- map = rampage();
- if(map == nil)
- panic("mmuwalk xspanalloc failed");
+ map = rampage();
+ memset(map, 0, BY2PG);
*table = PADDR(map)|PTEWRITE|PTEVALID;
}
table = KADDR(PPN(*table));
static int findhole(ulong *a, int n, int count);
static ulong vmapalloc(ulong size);
+static int pdbmap(ulong *, ulong, ulong, int);
static void pdbunmap(ulong*, ulong, int);
/*
void
vunmap(void *v, int size)
{
- int i;
ulong va, o;
- Mach *nm;
- Proc *p;
/*
* might not be aligned
* boot. In that case it suffices to flush the MACH(0) TLB
* and return.
*/
- if(!active.thunderbirdsarego){
+ if(up == nil){
putcr3(PADDR(MACHP(0)->pdb));
return;
}
- for(i=0; i<conf.nproc; i++){
- p = proctab(i);
- if(p->state == Dead)
- continue;
- if(p != up)
- p->newtlb = 1;
- }
- for(i=0; i<conf.nmach; i++){
- nm = MACHP(i);
- if(nm != m)
- nm->flushmmu = 1;
- }
+ procflushothers();
flushmmu();
- for(i=0; i<conf.nmach; i++){
- nm = MACHP(i);
- if(nm != m)
- while((active.machs&(1<<nm->machno)) && nm->flushmmu)
- ;
- }
}
/*
* Add kernel mappings for pa -> va for a section of size bytes.
*/
-int
+static int
pdbmap(ulong *pdb, ulong pa, ulong va, int size)
{
int pse;
flag = pa&0xFFF;
pa &= ~0xFFF;
- if((MACHP(0)->cpuiddx & 0x08) && (getcr4() & 0x10))
+ if((MACHP(0)->cpuiddx & Pse) && (getcr4() & 0x10))
pse = 1;
else
pse = 0;
panic("vunmap: not mapped");
if(*table & PTESIZE){
if(va & 4*MB-1)
- panic("vunmap: misaligned: %#p\n", va);
+ panic("vunmap: misaligned: %#p", va);
*table = 0;
va += 4*MB;
continue;
}
}
+void
+pmap(ulong pa, ulong va, int size)
+{
+ pdbmap(MACHP(0)->pdb, pa, va, size);
+}
+
+void
+punmap(ulong va, int size)
+{
+ pdbunmap(MACHP(0)->pdb, va, size);
+ mmuflushtlb(PADDR(m->pdb));
+}
+
/*
* Handle a fault by bringing vmap up to date.
* Only copy pdb entries and they never go away,
void*
kaddr(ulong pa)
{
- if(pa > (ulong)-KZERO)
+ if(pa >= (ulong)-KZERO)
panic("kaddr: pa=%#.8lux", pa);
return (void*)(pa+KZERO);
}
/*
* More debugging.
*/
-void
-countpagerefs(ulong *ref, int print)
-{
- int i, n;
- Mach *mm;
- Page *pg;
- Proc *p;
-
- n = 0;
- for(i=0; i<conf.nproc; i++){
- p = proctab(i);
- if(p->mmupdb){
- if(print){
- if(ref[pagenumber(p->mmupdb)])
- iprint("page %#.8lux is proc %d (pid %lud) pdb\n",
- p->mmupdb->pa, i, p->pid);
- continue;
- }
- if(ref[pagenumber(p->mmupdb)]++ == 0)
- n++;
- else
- iprint("page %#.8lux is proc %d (pid %lud) pdb but has other refs!\n",
- p->mmupdb->pa, i, p->pid);
- }
- if(p->kmaptable){
- if(print){
- if(ref[pagenumber(p->kmaptable)])
- iprint("page %#.8lux is proc %d (pid %lud) kmaptable\n",
- p->kmaptable->pa, i, p->pid);
- continue;
- }
- if(ref[pagenumber(p->kmaptable)]++ == 0)
- n++;
- else
- iprint("page %#.8lux is proc %d (pid %lud) kmaptable but has other refs!\n",
- p->kmaptable->pa, i, p->pid);
- }
- for(pg=p->mmuused; pg; pg=pg->next){
- if(print){
- if(ref[pagenumber(pg)])
- iprint("page %#.8lux is on proc %d (pid %lud) mmuused\n",
- pg->pa, i, p->pid);
- continue;
- }
- if(ref[pagenumber(pg)]++ == 0)
- n++;
- else
- iprint("page %#.8lux is on proc %d (pid %lud) mmuused but has other refs!\n",
- pg->pa, i, p->pid);
- }
- for(pg=p->mmufree; pg; pg=pg->next){
- if(print){
- if(ref[pagenumber(pg)])
- iprint("page %#.8lux is on proc %d (pid %lud) mmufree\n",
- pg->pa, i, p->pid);
- continue;
- }
- if(ref[pagenumber(pg)]++ == 0)
- n++;
- else
- iprint("page %#.8lux is on proc %d (pid %lud) mmufree but has other refs!\n",
- pg->pa, i, p->pid);
- }
- }
- if(!print)
- iprint("%d pages in proc mmu\n", n);
- n = 0;
- for(i=0; i<conf.nmach; i++){
- mm = MACHP(i);
- for(pg=mm->pdbpool; pg; pg=pg->next){
- if(print){
- if(ref[pagenumber(pg)])
- iprint("page %#.8lux is in cpu%d pdbpool\n",
- pg->pa, i);
- continue;
- }
- if(ref[pagenumber(pg)]++ == 0)
- n++;
- else
- iprint("page %#.8lux is in cpu%d pdbpool but has other refs!\n",
- pg->pa, i);
- }
- }
- if(!print){
- iprint("%d pages in mach pdbpools\n", n);
- for(i=0; i<conf.nmach; i++)
- iprint("cpu%d: %d pdballoc, %d pdbfree\n",
- i, MACHP(i)->pdballoc, MACHP(i)->pdbfree);
- }
-}
-
void
checkfault(ulong, ulong)
{
return -KZERO - pa;
}
+/*
+ * mark pages as write combining (used for framebuffer)
+ */
+void
+patwc(void *a, int n)
+{
+ ulong *pte, mask, attr, va;
+ vlong v;
+ int z;
+
+ /* check if pat is usable */
+ if((MACHP(0)->cpuiddx & Pat) == 0
+ || rdmsr(0x277, &v) == -1
+ || ((v >> PATWC*8) & 7) != 1)
+ return;
+
+ /* set the bits for all pages in range */
+ for(va = (ulong)a; n > 0; n -= z, va += z){
+ pte = mmuwalk(MACHP(0)->pdb, va, 1, 0);
+ if(pte && (*pte & (PTEVALID|PTESIZE)) == (PTEVALID|PTESIZE)){
+ z = 4*MB - (va & (4*MB-1));
+ mask = 3<<3 | 1<<12;
+ } else {
+ pte = mmuwalk(MACHP(0)->pdb, va, 2, 0);
+ if(pte == 0 || (*pte & PTEVALID) == 0)
+ panic("patwc: va=%#p", va);
+ z = BY2PG - (va & (BY2PG-1));
+ mask = 3<<3 | 1<<7;
+ }
+ attr = (((PATWC&3)<<3) | ((PATWC&4)<<5) | ((PATWC&4)<<10));
+ *pte = (*pte & ~mask) | (attr & mask);
+ }
+}