2 #include "../port/lib.h"
9 * Simple segment descriptors with no translation.
11 #define EXECSEGM(p) { 0, SEGL|SEGP|SEGPL(p)|SEGEXEC }
12 #define DATASEGM(p) { 0, SEGB|SEGG|SEGP|SEGPL(p)|SEGDATA|SEGW }
13 #define EXEC32SEGM(p) { 0xFFFF, SEGG|SEGD|(0xF<<16)|SEGP|SEGPL(p)|SEGEXEC|SEGR }
14 #define DATA32SEGM(p) { 0xFFFF, SEGB|SEGG|(0xF<<16)|SEGP|SEGPL(p)|SEGDATA|SEGW }
18 [NULLSEG] { 0, 0}, /* null descriptor */
19 [KESEG] EXECSEGM(0), /* kernel code */
20 [KDSEG] DATASEGM(0), /* kernel data */
21 [UE32SEG] EXEC32SEGM(3), /* user code 32 bit*/
22 [UDSEG] DATA32SEGM(3), /* user data/stack */
23 [UESEG] EXECSEGM(3), /* user code */
40 MAPBITS = 8*sizeof(m->mmumap[0]),
42 /* PAT entry used for write combining */
47 loadptr(u16int lim, uintptr off, void (*load)(void*))
62 taskswitch(uintptr stack)
67 tss->rsp0[0] = (u32int)stack;
68 tss->rsp0[1] = stack >> 32;
69 tss->rsp1[0] = (u32int)stack;
70 tss->rsp1[1] = stack >> 32;
71 tss->rsp2[0] = (u32int)stack;
72 tss->rsp2[1] = stack >> 32;
76 static void kernelro(void);
85 /* zap double map done by l.s */
92 m->tss = mallocz(sizeof(Tss), 1);
94 panic("mmuinit: no memory for Tss");
95 m->tss->iomap = 0xDFFF;
97 x = (uintptr)m + MACHSIZE;
99 m->tss->ist[i+1] = x>>32;
103 * We used to keep the GDT in the Mach structure, but it
104 * turns out that that slows down access to the rest of the
105 * page. Since the Mach structure is accessed quite often,
106 * it pays off anywhere from a factor of 1.25 to 2 on real
107 * hardware to separate them (the AMDs are more sensitive
108 * than Intels in this regard). Under VMware it pays off
109 * a factor of about 10 to 100.
111 memmove(m->gdt, gdt, sizeof gdt);
114 m->gdt[TSSSEG+0].d0 = (x<<16)|(sizeof(Tss)-1);
115 m->gdt[TSSSEG+0].d1 = (x&0xFF000000)|((x>>16)&0xFF)|SEGTSS|SEGPL(0)|SEGP;
116 m->gdt[TSSSEG+1].d0 = x>>32;
117 m->gdt[TSSSEG+1].d1 = 0;
119 loadptr(sizeof(gdt)-1, (uintptr)m->gdt, lgdt);
120 loadptr(sizeof(Segdesc)*512-1, (uintptr)IDTADDR, lidt);
121 taskswitch((uintptr)m + MACHSIZE);
125 wrmsr(GSbase, (uvlong)&machp[m->machno]);
126 wrmsr(KernelGSbase, 0ull);
128 /* enable syscall extension */
133 wrmsr(Star, ((uvlong)UE32SEL << 48) | ((uvlong)KESEL << 32));
134 wrmsr(Lstar, (uvlong)syscallentry);
135 wrmsr(Sfmask, 0x200);
137 /* IA32_PAT write combining */
138 if((MACHP(0)->cpuiddx & Pat) != 0
139 && rdmsr(0x277, &v) != -1){
140 v &= ~(255LL<<(PATWC*8));
141 v |= 1LL<<(PATWC*8); /* WC */
147 * These could go back to being macros once the kernel is debugged,
148 * but the extra checking is nice to have.
153 if(pa >= (uintptr)-KZERO)
154 panic("kaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
155 return (void*)(pa+KZERO);
168 panic("paddr: va=%#p pc=%#p", va, getcallerpc(&v));
180 m->mmufree = p->next;
186 mmupool.free = p->next;
192 p = malloc(n * sizeof(MMU));
194 panic("mmualloc: out of memory for MMU");
195 p->page = mallocalign(n * PTSZ, BY2PG, 0, 0);
197 panic("mmualloc: out of memory for MMU pages");
199 p[i].page = p[i-1].page + (1<<PTSHIFT);
204 p[n-1].next = mmupool.free;
205 mmupool.free = p->next;
207 mmupool.nfree += n-1;
216 mmucreate(uintptr *table, uintptr va, int level, int index)
218 uintptr *page, flags;
221 flags = PTEWRITE|PTEVALID;
224 assert((va < USTKTOP) || (va >= KMAP && va < KMAP+KMAPSIZE));
232 if((p->next = up->mmuhead) == nil)
235 m->mmumap[index/MAPBITS] |= 1ull<<(index%MAPBITS);
237 up->mmutail->next = p;
246 up->kmaptail->next = p;
255 memset(page, 0, PTSZ);
256 table[index] = PADDR(page) | flags;
261 mmuwalk(uintptr* table, uintptr va, int level, int create)
267 for(i = 2; i >= level; i--){
273 if(pte >= (uintptr)-KZERO)
274 table = (void*)(pte + VMAP);
276 table = (void*)(pte + KZERO);
280 table = mmucreate(table, va, i, x);
288 ptecount(uintptr va, int level)
290 return (1<<PTSHIFT) - (va & PGLSZ(level+1)-1) / PGLSZ(level);
294 ptesplit(uintptr* table, uintptr va)
296 uintptr *pte, pa, off;
298 pte = mmuwalk(table, va, 1, 0);
299 if(pte == nil || (*pte & PTESIZE) == 0 || (va & PGLSZ(1)-1) == 0)
303 panic("ptesplit: out of memory\n");
305 pa = *pte & ~PTESIZE;
306 for(off = 0; off < PGLSZ(1); off += PGLSZ(0))
307 table[PTLX(va + off, 0)] = pa + off;
308 *pte = PADDR(table) | PTEVALID|PTEWRITE;
313 * map kernel text segment readonly
314 * and everything else no-execute.
319 uintptr *pte, psz, va;
321 ptesplit(m->pml4, APBOOTSTRAP);
322 ptesplit(m->pml4, KTZERO);
323 ptesplit(m->pml4, (uintptr)etext-1);
325 for(va = KZERO; va != 0; va += psz){
327 pte = mmuwalk(m->pml4, va, 0, 0);
331 pte = mmuwalk(m->pml4, va, 1, 0);
336 if((*pte & PTEVALID) == 0)
338 if(va >= KTZERO && va < (uintptr)etext)
340 else if(va != (APBOOTSTRAP & -BY2PG))
347 pmap(uintptr *pml4, uintptr pa, uintptr va, vlong size)
349 uintptr *pte, *ptee, flags;
352 if(size <= 0 || va < VMAP)
353 panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size);
360 if(size >= PGLSZ(1) && (va % PGLSZ(1)) == 0)
362 l = (flags & PTESIZE) != 0;
364 pte = mmuwalk(pml4, va, l, 1);
366 pte = mmuwalk(pml4, va, ++l, 0);
367 if(pte && (*pte & PTESIZE)){
369 z = va & (PGLSZ(l)-1);
375 panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size);
377 ptee = pte + ptecount(va, l);
378 while(size > 0 && pte < ptee){
395 pte[PTLX(KMAP, 3)] = 0;
398 pte[PTLX(UTZERO, 3)] = 0;
399 pte[PTLX(USTKTOP-1, 3)] = 0;
400 m->mmumap[PTLX(UTZERO, 3)/MAPBITS] &= ~(1ull<<(PTLX(UTZERO, 3)%MAPBITS));
401 m->mmumap[PTLX(USTKTOP-1, 3)/MAPBITS] &= ~(1ull<<(PTLX(USTKTOP-1, 3)%MAPBITS));
403 for(i = 0; i < nelem(m->mmumap); pte += MAPBITS, i++){
404 if((w = m->mmumap[i]) == 0)
407 for(x = 0; w != 0; w >>= 1, x++){
422 if(m->mmucount+proc->mmucount < 256){
423 p->next = m->mmufree;
424 m->mmufree = proc->mmuhead;
425 m->mmucount += proc->mmucount;
428 p->next = mmupool.free;
429 mmupool.free = proc->mmuhead;
430 mmupool.nfree += proc->mmucount;
433 proc->mmuhead = proc->mmutail = nil;
449 mmuswitch(Proc *proc)
458 if((p = proc->kmaphead) != nil)
459 m->pml4[PTLX(KMAP, 3)] = PADDR(p->page) | PTEWRITE|PTEVALID;
460 for(p = proc->mmuhead; p != nil && p->level == PML4E; p = p->next){
461 m->mmumap[p->index/MAPBITS] |= 1ull<<(p->index%MAPBITS);
462 m->pml4[p->index] = PADDR(p->page) | PTEUSER|PTEWRITE|PTEVALID;
464 taskswitch((uintptr)proc->kstack+KSTACK);
468 mmurelease(Proc *proc)
473 if((p = proc->kmaptail) != nil){
474 if((p->next = proc->mmuhead) == nil)
476 proc->mmuhead = proc->kmaphead;
477 proc->mmucount += proc->kmapcount;
479 proc->kmaphead = proc->kmaptail = nil;
480 proc->kmapcount = proc->kmapindex = 0;
483 taskswitch((uintptr)m+MACHSIZE);
487 putmmu(uintptr va, uintptr pa, Page *)
493 pte = mmuwalk(m->pml4, va, 0, 1);
495 panic("putmmu: bug: va=%#p pa=%#p", va, pa);
504 * Double-check the user MMU.
505 * Error checking only.
508 checkmmu(uintptr va, uintptr pa)
512 pte = mmuwalk(m->pml4, va, 0, 0);
513 if(pte != 0 && (*pte & PTEVALID) != 0 && PPN(*pte) != pa)
514 print("%ld %s: va=%#p pa=%#p pte=%#p\n",
515 up->pid, up->text, va, pa, *pte);
529 uintptr *pte, pa, va;
533 if(cankaddr(pa) != 0)
534 return (KMap*)KADDR(pa);
537 va = KMAP + (((uintptr)up->kmapindex++ << PGSHIFT) & (KMAPSIZE-1));
538 pte = mmuwalk(m->pml4, va, 0, 1);
539 if(pte == 0 || (*pte & PTEVALID) != 0)
540 panic("kmap: pa=%#p va=%#p", pa, va);
541 *pte = pa | PTEWRITE|PTENOEXEC|PTEVALID;
558 pte = mmuwalk(m->pml4, va, 0, 0);
559 if(pte == 0 || (*pte & PTEVALID) == 0)
560 panic("kunmap: va=%#p", va);
566 * Add a device mapping to the vmap range.
567 * note that the VMAP and KZERO PDPs are shared
568 * between processors (see mpstartap) so no
569 * synchronization is being done.
572 vmap(uintptr pa, int size)
577 if(pa+size > VMAPSIZE)
581 * might be asking for less than a page.
587 pmap(m->pml4, pa | PTEUNCACHED|PTEWRITE|PTENOEXEC|PTEVALID, va, size);
588 return (void*)(va+o);
594 paddr(v); /* will panic on error */
598 * mark pages as write combining (used for framebuffer)
601 patwc(void *a, int n)
603 uintptr *pte, mask, attr, va;
607 /* check if pat is usable */
608 if((MACHP(0)->cpuiddx & Pat) == 0
609 || rdmsr(0x277, &v) == -1
610 || ((v >> PATWC*8) & 7) != 1)
613 /* set the bits for all pages in range */
614 for(va = (uintptr)a; n > 0; n -= z, va += z){
616 pte = mmuwalk(m->pml4, va, l, 0);
618 pte = mmuwalk(m->pml4, va, ++l, 0);
619 if(pte == 0 || (*pte & PTEVALID) == 0)
620 panic("patwc: va=%#p", va);
623 mask = l == 0 ? 3<<3 | 1<<7 : 3<<3 | 1<<12;
624 attr = (((PATWC&3)<<3) | ((PATWC&4)<<5) | ((PATWC&4)<<10));
625 *pte = (*pte & ~mask) | (attr & mask);
630 * The palloc.pages array and mmupool can be a large chunk
631 * out of the 2GB window above KZERO, so we allocate from
632 * upages and map in the VMAP window before pageinit()
638 uintptr va, base, top;
644 for(i=0; i<nelem(palloc.mem); i++){
648 nt = np / 50; /* 2% for mmupool */
651 nt = (uvlong)nt*BY2PG / (sizeof(MMU)+PTSZ);
652 tsize = (uvlong)nt * (sizeof(MMU)+PTSZ);
654 psize = (uvlong)np * BY2PG;
655 psize += sizeof(Page) + BY2PG;
656 psize = (psize / (sizeof(Page)+BY2PG)) * sizeof(Page);
659 psize = ROUND(psize, PGLSZ(1));
661 for(i=0; i<nelem(palloc.mem); i++){
663 base = ROUND(pm->base, PGLSZ(1));
664 top = pm->base + (uvlong)pm->npage * BY2PG;
665 if((base + psize) <= VMAPSIZE && (vlong)(top - base) >= psize){
666 pm->base = base + psize;
667 pm->npage = (top - pm->base)/BY2PG;
670 pmap(m->pml4, base | PTEGLOBAL|PTEWRITE|PTENOEXEC|PTEVALID, va, psize);
672 palloc.pages = (void*)(va + tsize);
674 mmupool.nfree = mmupool.nalloc = nt;
675 mmupool.free = (void*)(va + (uvlong)nt*PTSZ);
677 mmupool.free[i].page = (uintptr*)va;
678 mmupool.free[i].next = &mmupool.free[i+1];
681 mmupool.free[i-1].next = nil;