2 #include "../port/lib.h"
9 * Simple segment descriptors with no translation.
11 #define EXECSEGM(p) { 0, SEGL|SEGP|SEGPL(p)|SEGEXEC }
12 #define DATASEGM(p) { 0, SEGB|SEGG|SEGP|SEGPL(p)|SEGDATA|SEGW }
13 #define EXEC32SEGM(p) { 0xFFFF, SEGG|SEGD|(0xF<<16)|SEGP|SEGPL(p)|SEGEXEC|SEGR }
14 #define DATA32SEGM(p) { 0xFFFF, SEGB|SEGG|(0xF<<16)|SEGP|SEGPL(p)|SEGDATA|SEGW }
18 [NULLSEG] { 0, 0}, /* null descriptor */
19 [KESEG] EXECSEGM(0), /* kernel code */
20 [KDSEG] DATASEGM(0), /* kernel data */
21 [UE32SEG] EXEC32SEGM(3), /* user code 32 bit*/
22 [UDSEG] DATA32SEGM(3), /* user data/stack */
23 [UESEG] EXECSEGM(3), /* user code */
40 MAPBITS = 8*sizeof(m->mmumap[0]),
44 loadptr(u16int lim, uintptr off, void (*load)(void*))
59 taskswitch(uintptr stack)
64 tss->rsp0[0] = (u32int)stack;
65 tss->rsp0[1] = stack >> 32;
66 tss->rsp1[0] = (u32int)stack;
67 tss->rsp1[1] = stack >> 32;
68 tss->rsp2[0] = (u32int)stack;
69 tss->rsp2[1] = stack >> 32;
80 /* zap double map done by l.s */
84 m->tss = mallocz(sizeof(Tss), 1);
86 panic("mmuinit: no memory for Tss");
87 m->tss->iomap = 0xDFFF;
89 x = (uintptr)m + MACHSIZE;
91 m->tss->ist[i+1] = x>>32;
95 * We used to keep the GDT in the Mach structure, but it
96 * turns out that that slows down access to the rest of the
97 * page. Since the Mach structure is accessed quite often,
98 * it pays off anywhere from a factor of 1.25 to 2 on real
99 * hardware to separate them (the AMDs are more sensitive
100 * than Intels in this regard). Under VMware it pays off
101 * a factor of about 10 to 100.
103 memmove(m->gdt, gdt, sizeof gdt);
106 m->gdt[TSSSEG+0].d0 = (x<<16)|(sizeof(Tss)-1);
107 m->gdt[TSSSEG+0].d1 = (x&0xFF000000)|((x>>16)&0xFF)|SEGTSS|SEGPL(0)|SEGP;
108 m->gdt[TSSSEG+1].d0 = x>>32;
109 m->gdt[TSSSEG+1].d1 = 0;
111 loadptr(sizeof(gdt)-1, (uintptr)m->gdt, lgdt);
112 loadptr(sizeof(Segdesc)*512-1, (uintptr)IDTADDR, lidt);
113 taskswitch((uintptr)m + MACHSIZE);
116 wrmsr(0xc0000100, 0ull); /* 64 bit fsbase */
117 wrmsr(0xc0000101, (uvlong)&machp[m->machno]); /* 64 bit gsbase */
118 wrmsr(0xc0000102, 0ull); /* kernel gs base */
120 /* enable syscall extension */
121 rdmsr(0xc0000080, &v);
123 wrmsr(0xc0000080, v);
126 wrmsr(0xc0000081, ((uvlong)UE32SEL << 48) | ((uvlong)KESEL << 32));
129 wrmsr(0xc0000082, (uvlong)syscallentry);
131 /* SYSCALL flags mask */
132 wrmsr(0xc0000084, 0x200);
136 * These could go back to being macros once the kernel is debugged,
137 * but the extra checking is nice to have.
142 if(pa >= (uintptr)-KZERO)
143 panic("kaddr: pa=%#p pc=%#p", pa, getcallerpc(&pa));
144 return (void*)(pa+KZERO);
157 panic("paddr: va=%#p pc=%#p", va, getcallerpc(&v));
169 m->mmufree = p->next;
175 mmupool.free = p->next;
181 p = malloc(n * sizeof(MMU));
183 panic("mmualloc: out of memory for MMU");
184 p->page = mallocalign(n * PTSZ, BY2PG, 0, 0);
186 panic("mmualloc: out of memory for MMU pages");
188 p[i].page = p[i-1].page + (1<<PTSHIFT);
193 p[n-1].next = mmupool.free;
194 mmupool.free = p->next;
196 mmupool.nfree += n-1;
205 mmucreate(uintptr *table, uintptr va, int level, int index)
207 uintptr *page, flags;
210 flags = PTEWRITE|PTEVALID;
213 assert((va < TSTKTOP) || (va >= KMAP && va < KMAP+KMAPSIZE));
221 if((p->next = up->mmuhead) == nil)
224 m->mmumap[index/MAPBITS] |= 1ull<<(index%MAPBITS);
226 up->mmutail->next = p;
235 up->kmaptail->next = p;
241 } else if(conf.mem[0].npage != 0) {
242 page = mallocalign(PTSZ, BY2PG, 0, 0);
246 memset(page, 0, PTSZ);
247 table[index] = PADDR(page) | flags;
252 mmuwalk(uintptr* table, uintptr va, int level, int create)
258 for(i = 2; i >= level; i--){
263 table = KADDR(PPN(pte));
267 table = mmucreate(table, va, i, x);
275 ptecount(uintptr va, int level)
277 return (1<<PTSHIFT) - (va & PGLSZ(level+1)-1) / PGLSZ(level);
281 pmap(uintptr *pml4, uintptr pa, uintptr va, vlong size)
283 uintptr *pte, *ptee, flags;
286 if(size <= 0 || va < VMAP)
287 panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size);
294 if(size >= PGLSZ(1) && (va % PGLSZ(1)) == 0)
296 l = (flags & PTESIZE) != 0;
298 pte = mmuwalk(pml4, va, l, 1);
300 pte = mmuwalk(pml4, va, ++l, 0);
301 if(pte && (*pte & PTESIZE)){
303 z = va & (PGLSZ(l)-1);
309 panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size);
311 ptee = pte + ptecount(va, l);
312 while(size > 0 && pte < ptee){
329 pte[PTLX(KMAP, 3)] = 0;
332 pte[PTLX(UTZERO, 3)] = 0;
333 pte[PTLX(TSTKTOP, 3)] = 0;
334 m->mmumap[PTLX(UTZERO, 3)/MAPBITS] &= ~(1ull<<(PTLX(UTZERO, 3)%MAPBITS));
335 m->mmumap[PTLX(TSTKTOP, 3)/MAPBITS] &= ~(1ull<<(PTLX(TSTKTOP, 3)%MAPBITS));
337 for(i = 0; i < nelem(m->mmumap); pte += MAPBITS, i++){
338 if((w = m->mmumap[i]) == 0)
341 for(x = 0; w != 0; w >>= 1, x++){
356 if(m->mmucount+proc->mmucount < 256){
357 p->next = m->mmufree;
358 m->mmufree = proc->mmuhead;
359 m->mmucount += proc->mmucount;
362 p->next = mmupool.free;
363 mmupool.free = proc->mmuhead;
364 mmupool.nfree += proc->mmucount;
367 proc->mmuhead = proc->mmutail = nil;
383 mmuswitch(Proc *proc)
392 if((p = proc->kmaphead) != nil)
393 m->pml4[PTLX(KMAP, 3)] = PADDR(p->page) | PTEWRITE|PTEVALID;
394 for(p = proc->mmuhead; p != nil && p->level == PML4E; p = p->next){
395 m->mmumap[p->index/MAPBITS] |= 1ull<<(p->index%MAPBITS);
396 m->pml4[p->index] = PADDR(p->page) | PTEUSER|PTEWRITE|PTEVALID;
398 taskswitch((uintptr)proc->kstack+KSTACK);
402 mmurelease(Proc *proc)
407 if((p = proc->kmaptail) != nil){
408 if((p->next = proc->mmuhead) == nil)
410 proc->mmuhead = proc->kmaphead;
411 proc->mmucount += proc->kmapcount;
413 proc->kmaphead = proc->kmaptail = nil;
414 proc->kmapcount = proc->kmapindex = 0;
417 taskswitch((uintptr)m+MACHSIZE);
421 putmmu(uintptr va, uintptr pa, Page *)
427 pte = mmuwalk(m->pml4, va, 0, 1);
429 panic("putmmu: bug: va=%#p pa=%#p", va, pa);
431 *pte = pa | PTEVALID|PTEUSER;
438 * Double-check the user MMU.
439 * Error checking only.
442 checkmmu(uintptr va, uintptr pa)
446 pte = mmuwalk(m->pml4, va, 0, 0);
447 if(pte != 0 && (*pte & PTEVALID) != 0 && PPN(*pte) != pa)
448 print("%ld %s: va=%#p pa=%#p pte=%#p\n",
449 up->pid, up->text, va, pa, *pte);
461 countpagerefs(ulong *ref, int print)
469 uintptr *pte, pa, va;
473 if(cankaddr(pa) != 0)
474 return (KMap*)KADDR(pa);
477 va = KMAP + ((uintptr)up->kmapindex << PGSHIFT);
478 pte = mmuwalk(m->pml4, va, 0, 1);
479 if(pte == 0 || *pte & PTEVALID)
480 panic("kmap: pa=%#p va=%#p", pa, va);
481 *pte = pa | PTEWRITE|PTEVALID;
482 up->kmapindex = (up->kmapindex + 1) % (1<<PTSHIFT);
483 if(up->kmapindex == 0)
500 pte = mmuwalk(m->pml4, va, 0, 0);
501 if(pte == 0 || (*pte & PTEVALID) == 0)
502 panic("kunmap: va=%#p", va);
508 * Add a device mapping to the vmap range.
511 vmap(uintptr pa, int size)
518 * might be asking for less than a page.
524 pmap(m->pml4, pa | PTEUNCACHED|PTEWRITE|PTEVALID, va, size);
525 return (void*)(va+o);
531 paddr(v); /* will panic on error */
535 * vmapsync() is currently unused as the VMAP and KZERO PDPs
536 * are shared between processors. (see mpstartap)
541 uintptr *pte1, *pte2;
544 if(va < VMAP || m->machno == 0)
547 for(level=0; level<2; level++){
548 pte1 = mmuwalk(MACHP(0)->pml4, va, level, 0);
549 if(pte1 && *pte1 & PTEVALID){
550 pte2 = mmuwalk(m->pml4, va, level, 1);