2 #include "../port/lib.h"
6 #include "../port/error.h"
9 extern int vmxon(u64int);
10 extern int vmxoff(void);
11 extern int vmclear(u64int);
12 extern int vmptrld(u64int);
13 extern int vmlaunch(Ureg *, int);
14 extern int vmread(u32int, uintptr *);
15 extern int vmwrite(u32int, uintptr);
16 extern int invept(u32int, uvlong, uvlong);
17 extern int invvpid(u32int, uvlong, uvlong);
19 static vlong procb_ctls, pinb_ctls;
22 VMX_BASIC_MSR = 0x480,
23 VMX_PINB_CTLS_MSR = 0x481,
24 VMX_PROCB_CTLS_MSR = 0x482,
25 VMX_VMEXIT_CTLS_MSR = 0x483,
26 VMX_VMENTRY_CTLS_MSR = 0x484,
28 VMX_CR0_FIXED0 = 0x486,
29 VMX_CR0_FIXED1 = 0x487,
30 VMX_CR4_FIXED0 = 0x488,
31 VMX_CR4_FIXED1 = 0x489,
32 VMX_VMCS_ENUM = 0x48A,
33 VMX_PROCB_CTLS2_MSR = 0x48B,
34 VMX_TRUE_PINB_CTLS_MSR = 0x48D,
35 VMX_TRUE_PROCB_CTLS_MSR = 0x48E,
36 VMX_TRUE_EXIT_CTLS_MSR = 0x48F,
37 VMX_TRUE_ENTRY_CTLS_MSR = 0x490,
38 VMX_VMFUNC_MSR = 0x491,
47 PROCB_EXITINVLPG = 1<<9,
48 PROCB_EXITMWAIT = 1<<10,
49 PROCB_EXITRDPMC = 1<<11,
50 PROCB_EXITRDTSC = 1<<12,
51 PROCB_EXITCR3LD = 1<<15,
52 PROCB_EXITCR3ST = 1<<16,
53 PROCB_EXITCR8LD = 1<<19,
54 PROCB_EXITCR8ST = 1<<20,
55 PROCB_EXITMOVDR = 1<<23,
57 PROCB_MONTRAP = 1<<27,
58 PROCB_MSRBITMAP = 1<<28,
59 PROCB_EXITMONITOR = 1<<29,
60 PROCB_EXITPAUSE = 1<<30,
61 PROCB_USECTLS2 = 1<<31,
71 PFAULT_MATCH = 0x4008,
76 VMEXIT_ST_DEBUG = 1<<2,
78 VMEXIT_LD_IA32_PERF_GLOBAL_CTRL = 1<<12,
79 VMEXIT_ST_IA32_PAT = 1<<18,
80 VMEXIT_LD_IA32_PAT = 1<<19,
81 VMEXIT_ST_IA32_EFER = 1<<20,
82 VMEXIT_LD_IA32_EFER = 1<<21,
84 VMEXIT_MSRSTCNT = 0x400e,
85 VMEXIT_MSRLDCNT = 0x4010,
86 VMEXIT_MSRSTADDR = 0x2006,
87 VMEXIT_MSRLDADDR = 0x2008,
88 VMENTRY_MSRLDADDR = 0x200A,
90 VMENTRY_CTLS = 0x4012,
91 VMENTRY_LD_DEBUG = 1<<2,
92 VMENTRY_GUEST64 = 1<<9,
93 VMENTRY_LD_IA32_PERF_GLOBAL_CTRL = 1<<13,
94 VMENTRY_LD_IA32_PAT = 1<<14,
95 VMENTRY_LD_IA32_EFER = 1<<15,
97 VMENTRY_MSRLDCNT = 0x4014,
98 VMENTRY_INTRINFO = 0x4016,
99 VMENTRY_INTRCODE = 0x4018,
100 VMENTRY_INTRILEN = 0x401a,
115 GUEST_ESLIMIT = 0x4800,
116 GUEST_CSLIMIT = 0x4802,
117 GUEST_SSLIMIT = 0x4804,
118 GUEST_DSLIMIT = 0x4806,
119 GUEST_FSLIMIT = 0x4808,
120 GUEST_GSLIMIT = 0x480A,
121 GUEST_LDTRLIMIT = 0x480C,
122 GUEST_TRLIMIT = 0x480E,
123 GUEST_GDTRLIMIT = 0x4810,
124 GUEST_IDTRLIMIT = 0x4812,
125 GUEST_ESPERM = 0x4814,
126 GUEST_CSPERM = 0x4816,
127 GUEST_SSPERM = 0x4818,
128 GUEST_DSPERM = 0x481A,
129 GUEST_FSPERM = 0x481C,
130 GUEST_GSPERM = 0x481E,
131 GUEST_LDTRPERM = 0x4820,
132 GUEST_TRPERM = 0x4822,
133 GUEST_CR0MASK = 0x6000,
134 GUEST_CR4MASK = 0x6002,
135 GUEST_CR0SHADOW = 0x6004,
136 GUEST_CR4SHADOW = 0x6006,
137 GUEST_ESBASE = 0x6806,
138 GUEST_CSBASE = 0x6808,
139 GUEST_SSBASE = 0x680A,
140 GUEST_DSBASE = 0x680C,
141 GUEST_FSBASE = 0x680E,
142 GUEST_GSBASE = 0x6810,
143 GUEST_LDTRBASE = 0x6812,
144 GUEST_TRBASE = 0x6814,
145 GUEST_GDTRBASE = 0x6816,
146 GUEST_IDTRBASE = 0x6818,
150 GUEST_RFLAGS = 0x6820,
151 GUEST_IA32_DEBUGCTL = 0x2802,
152 GUEST_IA32_PAT = 0x2804,
153 GUEST_IA32_EFER = 0x2806,
154 GUEST_IA32_PERF_GLOBAL_CTRL = 0x2808,
166 HOST_FSBASE = 0x6C06,
167 HOST_GSBASE = 0x6C08,
168 HOST_TRBASE = 0x6C0A,
173 HOST_IA32_PAT = 0x2C00,
174 HOST_IA32_EFER = 0x2C02,
175 HOST_IA32_PERF_GLOBAL_CTRL = 0x2C04,
177 GUEST_CANINTR = 0x4824,
179 VM_INSTRERR = 0x4400,
180 VM_EXREASON = 0x4402,
181 VM_EXINTRINFO = 0x4404,
182 VM_EXINTRCODE = 0x4406,
183 VM_IDTVECINFO = 0x4408,
184 VM_IDTVECCODE = 0x440A,
185 VM_EXINSTRLEN = 0x440C,
186 VM_EXINSTRINFO = 0x440E,
187 VM_EXQUALIF = 0x6400,
205 CR0RSVD = 0x1ffaffc0,
206 CR4RSVD = 0xff889000,
212 CR0KERNEL = CR0RSVD | 0x30 | (uintptr)0xFFFFFFFF00000000ULL,
213 CR4KERNEL = CR4RSVD | CR4VMXE | CR4SMXE | CR4MCE | CR4PKE | (uintptr)0xFFFFFFFF00000000ULL
220 typedef struct Vmx Vmx;
221 typedef struct VmCmd VmCmd;
222 typedef struct VmMem VmMem;
223 typedef struct VmIntr VmIntr;
234 u32int info, code, ilen;
250 uintptr dr[8]; /* DR7 is also kept in VMCS */
265 VmCmd *firstcmd, **lastcmd;
277 VmIntr exc, irq, irqack;
279 u64int *msrhost, *msrguest;
293 int (*cmd)(VmCmd *, va_list);
300 static char Equit[] = "vmx: ending";
302 static char *statenames[] = {
304 [VMXINACTIVE] "inactive",
307 [VMXRUNNING] "running",
315 vmcsread(u32int addr)
321 rc = vmread(addr, (uintptr *) &val);
322 if(rc >= 0 && sizeof(uintptr) == 4 && (addr & 0x6000) == 0x2000)
323 rc = vmread(addr | 1, (uintptr *) &val + 1);
326 snprint(errbuf, sizeof(errbuf), "vmcsread failed (%#.4ux)", addr);
333 vmcswrite(u32int addr, u64int val)
337 rc = vmwrite(addr, val);
338 if(rc >= 0 && sizeof(uintptr) == 4 && (addr & 0x6000) == 0x2000)
339 rc = vmwrite(addr | 1, val >> 32);
342 snprint(errbuf, sizeof(errbuf), "vmcswrite failed (%#.4ux = %#.16ullx)", addr, val);
353 v = strtoull(s, &p, 0);
354 if(p == s || *p != 0) error("invalid value");
359 cr0fakeread(char *p, char *e)
361 uvlong guest, mask, shadow;
363 guest = vmcsread(GUEST_CR0);
364 mask = vmcsread(GUEST_CR0MASK);
365 shadow = vmcsread(GUEST_CR0SHADOW);
366 return seprint(p, e, "%#.*ullx", sizeof(uintptr) * 2, guest & ~mask | shadow & mask);
370 cr4fakeread(char *p, char *e)
372 uvlong guest, mask, shadow;
374 guest = vmcsread(GUEST_CR4);
375 mask = vmcsread(GUEST_CR4MASK);
376 shadow = vmcsread(GUEST_CR4SHADOW);
377 return seprint(p, e, "%#.*ullx", sizeof(uintptr) * 2, guest & ~mask | shadow & mask);
383 uvlong cr0, efer, nefer, ectrl;
385 if(sizeof(uintptr) != 8) return;
386 cr0 = vmcsread(GUEST_CR0);
387 efer = vmcsread(GUEST_IA32_EFER);
388 nefer = efer & ~0x400 | efer << 2 & cr0 >> 21 & 0x400;
389 if(efer == nefer) return;
390 vmcswrite(GUEST_IA32_EFER, nefer);
391 ectrl = vmcsread(VMENTRY_CTLS);
392 ectrl = ectrl & ~0x200 | nefer >> 1 & 0x200;
393 vmcswrite(VMENTRY_CTLS, ectrl);
397 cr0realwrite(char *s)
402 vmcswrite(GUEST_CR0, vmcsread(GUEST_CR0) & CR0KERNEL | v & ~CR0KERNEL);
408 cr0maskwrite(char *s)
413 vmcswrite(GUEST_CR0MASK, v | CR0KERNEL);
423 vmcswrite(GUEST_IA32_EFER, v);
429 cr4realwrite(char *s)
434 vmcswrite(GUEST_CR4, vmcsread(GUEST_CR4) & CR4KERNEL | v & ~CR4KERNEL);
439 cr4maskwrite(char *s)
444 vmcswrite(GUEST_CR4MASK, v | CR4KERNEL);
453 v = (u32int) parseval(s);
454 vmcswrite(GUEST_DR7, vmx.dr[7] = (u32int) v);
470 vmx.dr[6] = (u32int) v;
474 typedef struct GuestReg GuestReg;
477 u8int size; /* in bytes; 0 means == uintptr */
479 char *(*read)(char *, char *);
480 int (*write)(char *);
482 #define VMXVAR(x) ~(ulong)&(((Vmx*)0)->x)
483 #define UREG(x) VMXVAR(ureg.x)
484 static GuestReg guestregs[] = {
485 {GUEST_RIP, 0, "pc"},
486 {GUEST_RSP, 0, "sp"},
487 {GUEST_RFLAGS, 0, "flags"},
498 {UREG(r10), 0, "r10"},
499 {UREG(r11), 0, "r11"},
500 {UREG(r12), 0, "r12"},
501 {UREG(r13), 0, "r13"},
502 {UREG(r14), 0, "r14"},
503 {UREG(r15), 0, "r15"},
505 {GUEST_GDTRBASE, 0, "gdtrbase"},
506 {GUEST_GDTRLIMIT, 4, "gdtrlimit"},
507 {GUEST_IDTRBASE, 0, "idtrbase"},
508 {GUEST_IDTRLIMIT, 4, "idtrlimit"},
510 {GUEST_CSBASE, 0, "csbase"},
511 {GUEST_CSLIMIT, 4, "cslimit"},
512 {GUEST_CSPERM, 4, "csperm"},
514 {GUEST_DSBASE, 0, "dsbase"},
515 {GUEST_DSLIMIT, 4, "dslimit"},
516 {GUEST_DSPERM, 4, "dsperm"},
518 {GUEST_ESBASE, 0, "esbase"},
519 {GUEST_ESLIMIT, 4, "eslimit"},
520 {GUEST_ESPERM, 4, "esperm"},
522 {GUEST_FSBASE, 0, "fsbase"},
523 {GUEST_FSLIMIT, 4, "fslimit"},
524 {GUEST_FSPERM, 4, "fsperm"},
526 {GUEST_GSBASE, 0, "gsbase"},
527 {GUEST_GSLIMIT, 4, "gslimit"},
528 {GUEST_GSPERM, 4, "gsperm"},
530 {GUEST_SSBASE, 0, "ssbase"},
531 {GUEST_SSLIMIT, 4, "sslimit"},
532 {GUEST_SSPERM, 4, "ssperm"},
534 {GUEST_TRBASE, 0, "trbase"},
535 {GUEST_TRLIMIT, 4, "trlimit"},
536 {GUEST_TRPERM, 4, "trperm"},
537 {GUEST_LDTR, 2, "ldtr"},
538 {GUEST_LDTRBASE, 0, "ldtrbase"},
539 {GUEST_LDTRLIMIT, 4, "ldtrlimit"},
540 {GUEST_LDTRPERM, 4, "ldtrperm"},
541 {GUEST_CR0, 0, "cr0real", nil, cr0realwrite},
542 {GUEST_CR0SHADOW, 0, "cr0fake", cr0fakeread},
543 {GUEST_CR0MASK, 0, "cr0mask", nil, cr0maskwrite},
544 {VMXVAR(cr2), 0, "cr2"},
545 {GUEST_CR3, 0, "cr3"},
546 {GUEST_CR4, 0, "cr4real", nil, cr4realwrite},
547 {GUEST_CR4SHADOW, 0, "cr4fake", cr4fakeread},
548 {GUEST_CR4MASK, 0, "cr4mask", nil, cr4maskwrite},
549 {GUEST_IA32_PAT, 8, "pat"},
550 {GUEST_IA32_EFER, 8, "efer", nil, eferwrite},
551 {VMXVAR(dr[0]), 0, "dr0"},
552 {VMXVAR(dr[1]), 0, "dr1"},
553 {VMXVAR(dr[2]), 0, "dr2"},
554 {VMXVAR(dr[3]), 0, "dr3"},
555 {VMXVAR(dr[6]), 0, "dr6", nil, dr6write},
556 {GUEST_DR7, 0, "dr7", nil, dr7write},
557 {VM_INSTRERR, 4, "instructionerror", nil, readonly},
558 {VM_EXREASON, 4, "exitreason", nil, readonly},
559 {VM_EXQUALIF, 0, "exitqualification", nil, readonly},
560 {VM_EXINTRINFO, 4, "exitinterruptinfo", nil, readonly},
561 {VM_EXINTRCODE, 4, "exitinterruptcode", nil, readonly},
562 {VM_EXINSTRLEN, 4, "exitinstructionlen", nil, readonly},
563 {VM_EXINSTRINFO, 4, "exitinstructioninfo", nil, readonly},
564 {VM_GUESTVA, 0, "exitva", nil, readonly},
565 {VM_GUESTPA, 0, "exitpa", nil, readonly},
566 {VM_IDTVECINFO, 4, "idtinterruptinfo", nil, readonly},
567 {VM_IDTVECCODE, 4, "idtinterruptcode", nil, readonly},
571 vmokpage(u64int addr)
573 return (addr & 0xfff) == 0 && addr >> 48 == 0;
584 if(tab == nil) error(Egreg);
585 for(i = 3; i >= 1; i--){
586 tab += addr >> 12 + 9 * i & 0x1ff;
589 nt = mallocalign(BY2PG, BY2PG, 0, 0);
590 if(nt == nil) error(Enomem);
591 memset(nt, 0, BY2PG);
592 v = PADDR(nt) | 0x407;
595 tab = KADDR(v & ~0xfff);
597 return tab + (addr >> 12 & 0x1ff);
601 eptfree(uvlong *tab, int level)
606 if(tab == nil) error(Egreg);
608 for(i = 0; i < 512; i++){
610 if((v & 3) == 0) continue;
611 t = KADDR(v & ~0xfff);
612 eptfree(t, level + 1);
621 epttranslate(VmMem *mp)
625 if(mp->seg != nil && (mp->seg->type & SG_TYPE) != SG_FIXED || (mp->lo & 0xfff) != 0 || (mp->hi & 0xfff) != 0 || (uint)mp->attr >= 0x1000)
628 if(mp->seg->base + mp->off + (mp->hi - mp->lo) > mp->seg->top)
630 hpa = mp->seg->map[0]->pages[0]->pa + mp->off;
633 for(p = mp->lo; p < mp->hi; p += BY2PG)
634 *eptwalk(p) = hpa + (p - mp->lo) + mp->attr;
635 vmx.onentry |= FLUSHEPT;
638 static char *mtype[] = {"uc", "wc", "02", "03", "wt", "wp", "wb", "07"};
641 cmdgetmeminfo(VmCmd *, va_list va)
648 p0 = va_arg(va, char *);
649 e = va_arg(va, char *);
651 for(mp = vmx.mem.next; mp != &vmx.mem; mp = mp->next){
652 attr[0] = (mp->attr & 1) != 0 ? 'r' : '-';
653 attr[1] = (mp->attr & 2) != 0 ? 'w' : '-';
654 attr[2] = (mp->attr & 4) != 0 ? 'x' : '-';
656 *(ushort*)mt = *(u16int*)mtype[mp->attr >> 3 & 7];
657 mt[2] = (mp->attr & 0x40) != 0 ? '!' : 0;
659 p = seprint(p, e, "%s %s %#llux %#llux %p %#llux\n", attr, mt, mp->lo, mp->hi, mp->seg, (uvlong)mp->off);
665 cmdclearmeminfo(VmCmd *, va_list)
669 eptfree(vmx.pml4, 0);
670 for(mp = vmx.mem.next; mp != &vmx.mem; mp = mn){
674 vmx.mem.prev = &vmx.mem;
675 vmx.mem.next = &vmx.mem;
676 vmx.onentry |= FLUSHEPT;
680 extern Segment* (*_globalsegattach)(char*);
683 cmdsetmeminfo(VmCmd *, va_list va)
685 char *p0, *p, *q, *r;
693 p0 = va_arg(va, char *);
701 mp = malloc(sizeof(VmMem));
706 rc = tokenize(p, f, nelem(f));
708 if(rc == 0) goto next;
709 if(rc != 4 && rc != 6) error("number of fields wrong");
710 memset(mp, 0, sizeof(VmMem));
711 for(q = f[0]; *q != 0; q++)
713 case 'r': if((mp->attr & 1) != 0) goto tinval; mp->attr |= 1; break;
714 case 'w': if((mp->attr & 2) != 0) goto tinval; mp->attr |= 2; break;
715 case 'x': if((mp->attr & 4) != 0) goto tinval; mp->attr |= 0x404; break;
717 default: tinval: error("invalid access field");
719 for(j = 0; j < 8; j++)
720 if(strncmp(mtype[j], f[1], 2) == 0){
724 if(j == 8 || strlen(f[1]) > 3) error("invalid memory type");
725 if(f[1][2] == '!') mp->attr |= 0x40;
726 else if(f[1][2] != 0) error("invalid memory type");
727 mp->lo = strtoull(f[2], &r, 0);
728 if(*r != 0 || !vmokpage(mp->lo)) error("invalid low guest physical address");
729 mp->hi = strtoull(f[3], &r, 0);
730 if(*r != 0 || !vmokpage(mp->hi) || mp->hi <= mp->lo) error("invalid high guest physical address");
731 mp->off = strtoull(f[5], &r, 0);
732 if(*r != 0 || !vmokpage(mp->off)) error("invalid offset");
733 if((mp->attr & 7) != 0){
734 if(rc != 6) error("number of fields wrong");
735 mp->seg = _globalsegattach(f[4]);
736 if(mp->seg == nil) error("no such segment");
737 if(mp->seg->base + mp->off + (mp->hi - mp->lo) > mp->seg->top) error("out of bounds");
740 mp->prev = vmx.mem.prev;
759 if((regs[2] & 1<<5) == 0) return;
760 /* check if disabled by BIOS */
761 if(rdmsr(0x3a, &msr) < 0) return;
763 if((msr & 1) == 0){ /* msr still unlocked */
764 wrmsr(0x3a, msr | 5);
765 if(rdmsr(0x3a, &msr) < 0)
771 if(rdmsr(VMX_PROCB_CTLS_MSR, &msr) < 0) return;
772 if((vlong)msr >= 0) return;
773 if(rdmsr(VMX_PROCB_CTLS2_MSR, &msr) < 0) return;
774 if((msr >> 32 & PROCB_EPT) == 0 || (msr >> 32 & PROCB_VPID) == 0) return;
775 vmx.state = VMXINACTIVE;
776 vmx.lastcmd = &vmx.firstcmd;
777 vmx.mem.next = &vmx.mem;
778 vmx.mem.prev = &vmx.mem;
791 vmxaddmsr(u32int msr, u64int gval)
795 if(vmx.nmsr >= MAXMSR)
796 error("too many MSRs");
798 vmx.msrhost[i] = msr;
799 rdmsr(msr, (vlong *) &vmx.msrhost[i+1]);
800 vmx.msrguest[i] = msr;
801 vmx.msrguest[i+1] = gval;
802 vmcswrite(VMENTRY_MSRLDCNT, vmx.nmsr);
803 vmcswrite(VMEXIT_MSRSTCNT, vmx.nmsr);
804 vmcswrite(VMEXIT_MSRLDCNT, vmx.nmsr);
808 vmxtrapmsr(u32int msr, enum { TRAPRD = 1, TRAPWR = 2 } state)
812 if(msr >= 0x2000 && (u32int)(msr - 0xc0000000) >= 0x2000)
814 msr = msr & 0x1fff | msr >> 18 & 0x2000;
816 if((state & TRAPRD) != 0)
817 vmx.msrbits[msr / 32] |= m;
819 vmx.msrbits[msr / 32] &= ~m;
820 if((state & TRAPWR) != 0)
821 vmx.msrbits[msr / 32 + 512] |= m;
823 vmx.msrbits[msr / 32 + 512] &= ~m;
832 memset(&vmx.ureg, 0, sizeof(vmx.ureg));
836 if(rdmsr(VMX_BASIC_MSR, &msr) < 0) error("rdmsr(VMX_BASIC_MSR) failed");
837 if((msr & 1ULL<<55) != 0){
838 if(rdmsr(VMX_TRUE_PROCB_CTLS_MSR, &procb_ctls) < 0) error("rdmsr(VMX_TRUE_PROCB_CTLS_MSR) failed");
839 if(rdmsr(VMX_TRUE_PINB_CTLS_MSR, &pinb_ctls) < 0) error("rdmsr(VMX_TRUE_PINB_CTLS_MSR) failed");
841 if(rdmsr(VMX_PROCB_CTLS_MSR, &procb_ctls) < 0) error("rdmsr(VMX_PROCB_CTLS_MSR) failed");
842 if(rdmsr(VMX_PINB_CTLS_MSR, &pinb_ctls) < 0) error("rdmsr(VMX_PINB_CTLS_MSR) failed");
845 if(rdmsr(VMX_PINB_CTLS_MSR, &msr) < 0) error("rdmsr(VMX_PINB_CTLS_MSR failed");
846 x = (u32int)pinb_ctls | 1<<1 | 1<<2 | 1<<4; /* currently reserved default1 bits */
847 x |= PINB_EXITIRQ | PINB_EXITNMI;
848 x &= pinb_ctls >> 32;
849 vmcswrite(PINB_CTLS, x);
851 if(rdmsr(VMX_PROCB_CTLS_MSR, &msr) < 0) error("rdmsr(VMX_PROCB_CTLS_MSR failed");
852 x = (u32int)procb_ctls | 1<<1 | 7<<4 | 1<<8 | 1<<13 | 1<<14 | 1<<26; /* currently reserved default1 bits */
853 x |= PROCB_EXITHLT | PROCB_EXITMWAIT;
854 x |= PROCB_EXITMOVDR | PROCB_EXITIO | PROCB_EXITMONITOR | PROCB_MSRBITMAP;
857 vmcswrite(PROCB_CTLS, x);
859 if(rdmsr(VMX_PROCB_CTLS2_MSR, &msr) < 0) error("rdmsr(VMX_PROCB_CTLS2_MSR failed");
860 x = PROCB_EPT | PROCB_VPID | PROCB_UNRESTR;
862 vmcswrite(PROCB_CTLS2, x);
864 if(rdmsr(VMX_VMEXIT_CTLS_MSR, &msr) < 0) error("rdmsr(VMX_VMEXIT_CTLS_MSR failed");
866 if(sizeof(uintptr) == 8) x |= VMEXIT_HOST64;
867 x |= VMEXIT_LD_IA32_PAT | VMEXIT_LD_IA32_EFER | VMEXIT_ST_DEBUG | VMEXIT_ST_IA32_EFER;
869 vmcswrite(VMEXIT_CTLS, x);
871 if(rdmsr(VMX_VMENTRY_CTLS_MSR, &msr) < 0) error("rdmsr(VMX_VMENTRY_CTLS_MSR failed");
873 x |= VMENTRY_LD_IA32_PAT | VMENTRY_LD_IA32_EFER | VMENTRY_LD_DEBUG;
875 vmcswrite(VMENTRY_CTLS, x);
877 vmcswrite(CR3_TARGCNT, 0);
878 vmcswrite(VMENTRY_INTRINFO, 0);
879 vmcswrite(VMCS_LINK, -1);
881 vmcswrite(HOST_CS, KESEL);
882 vmcswrite(HOST_DS, KDSEL);
883 vmcswrite(HOST_ES, KDSEL);
884 vmcswrite(HOST_FS, KDSEL);
885 vmcswrite(HOST_GS, KDSEL);
886 vmcswrite(HOST_SS, KDSEL);
887 vmcswrite(HOST_TR, TSSSEL);
888 vmcswrite(HOST_CR0, getcr0() & ~0xe);
889 vmcswrite(HOST_CR3, getcr3());
890 vmcswrite(HOST_CR4, getcr4());
892 vmcswrite(HOST_FSBASE, msr);
894 vmcswrite(HOST_GSBASE, msr);
895 vmcswrite(HOST_TRBASE, (uintptr) m->tss);
896 vmcswrite(HOST_GDTR, (uintptr) m->gdt);
897 vmcswrite(HOST_IDTR, IDTADDR);
898 if(rdmsr(0x277, &msr) < 0) error("rdmsr(IA32_PAT) failed");
899 vmcswrite(HOST_IA32_PAT, msr);
900 if(rdmsr(Efer, &msr) < 0) error("rdmsr(IA32_EFER) failed");
901 vmcswrite(HOST_IA32_EFER, msr);
903 vmcswrite(EXC_BITMAP, 1<<18|1<<1);
904 vmcswrite(PFAULT_MASK, 0);
905 vmcswrite(PFAULT_MATCH, 0);
907 vmcswrite(GUEST_CSBASE, 0);
908 vmcswrite(GUEST_DSBASE, 0);
909 vmcswrite(GUEST_ESBASE, 0);
910 vmcswrite(GUEST_FSBASE, 0);
911 vmcswrite(GUEST_GSBASE, 0);
912 vmcswrite(GUEST_SSBASE, 0);
913 vmcswrite(GUEST_CSLIMIT, -1);
914 vmcswrite(GUEST_DSLIMIT, -1);
915 vmcswrite(GUEST_ESLIMIT, -1);
916 vmcswrite(GUEST_FSLIMIT, -1);
917 vmcswrite(GUEST_GSLIMIT, -1);
918 vmcswrite(GUEST_SSLIMIT, -1);
919 vmcswrite(GUEST_CSPERM, (SEGG|SEGD|SEGP|SEGPL(0)|SEGEXEC|SEGR) >> 8 | 1);
920 vmcswrite(GUEST_DSPERM, (SEGG|SEGB|SEGP|SEGPL(0)|SEGDATA|SEGW) >> 8 | 1);
921 vmcswrite(GUEST_ESPERM, (SEGG|SEGB|SEGP|SEGPL(0)|SEGDATA|SEGW) >> 8 | 1);
922 vmcswrite(GUEST_FSPERM, (SEGG|SEGB|SEGP|SEGPL(0)|SEGDATA|SEGW) >> 8 | 1);
923 vmcswrite(GUEST_GSPERM, (SEGG|SEGB|SEGP|SEGPL(0)|SEGDATA|SEGW) >> 8 | 1);
924 vmcswrite(GUEST_SSPERM, (SEGG|SEGB|SEGP|SEGPL(0)|SEGDATA|SEGW) >> 8 | 1);
925 vmcswrite(GUEST_LDTRPERM, 1<<16);
927 vmcswrite(GUEST_CR0MASK, CR0KERNEL);
928 vmcswrite(GUEST_CR4MASK, CR4KERNEL);
929 vmcswrite(GUEST_CR0, getcr0() & CR0KERNEL | 0x31);
930 vmcswrite(GUEST_CR3, 0);
931 vmcswrite(GUEST_CR4, getcr4() & CR4KERNEL);
932 vmcswrite(GUEST_CR0SHADOW, getcr0() & CR0KERNEL | 0x31);
933 vmcswrite(GUEST_CR4SHADOW, getcr4() & ~CR4VMXE & CR4KERNEL);
935 vmcswrite(GUEST_IA32_PAT, 0x0007040600070406ULL);
936 vmcswrite(GUEST_IA32_EFER, 0);
938 vmcswrite(GUEST_TRBASE, 0);
939 vmcswrite(GUEST_TRLIMIT, 0xffff);
940 vmcswrite(GUEST_TRPERM, (SEGTSS|SEGPL(0)|SEGP) >> 8 | 2);
942 vmx.pml4 = mallocalign(BY2PG, BY2PG, 0, 0);
943 memset(vmx.pml4, 0, BY2PG);
944 vmcswrite(VM_EPTP, PADDR(vmx.pml4) | 3<<3);
946 vmcswrite(VM_VPID, vmx.vpid);
948 vmcswrite(GUEST_RFLAGS, 2);
950 vmx.onentry = FLUSHVPID | FLUSHEPT;
952 vmx.fp = mallocalign(512, 512, 0, 0);
958 vmx.msrhost = mallocalign(MAXMSR*16, 16, 0, 0);
959 vmx.msrguest = mallocalign(MAXMSR*16, 16, 0, 0);
960 vmx.msrbits = mallocalign(4096, 4096, 0, 0);
961 if(vmx.msrhost == nil || vmx.msrguest == nil || vmx.msrbits == nil)
963 memset(vmx.msrbits, -1, 4096);
965 vmcswrite(VMENTRY_MSRLDADDR, PADDR(vmx.msrguest));
966 vmcswrite(VMEXIT_MSRSTADDR, PADDR(vmx.msrguest));
967 vmcswrite(VMEXIT_MSRLDADDR, PADDR(vmx.msrhost));
968 vmcswrite(MSR_BITMAP, PADDR(vmx.msrbits));
970 if(sizeof(uintptr) == 8){
974 vmxaddmsr(Sfmask, 0);
975 vmxaddmsr(KernelGSbase, 0);
977 vmxtrapmsr(Lstar, 0);
978 vmxtrapmsr(Cstar, 0);
979 vmxtrapmsr(Sfmask, 0);
980 vmxtrapmsr(FSbase, 0);
981 vmxtrapmsr(GSbase, 0);
982 vmxtrapmsr(KernelGSbase, 0);
989 static uchar *vmcs; /* also vmxon region */
994 putcr4(getcr4() | 0x2000); /* set VMXE */
995 putcr0(getcr0() | 0x20); /* set NE */
997 if(rdmsr(VMX_CR0_FIXED0, &msr) < 0) error("rdmsr(VMX_CR0_FIXED0) failed");
998 if(rdmsr(VMX_CR0_FIXED1, &msr2) < 0) error("rdmsr(VMX_CR0_FIXED1) failed");
999 if((cr & ~msr & ~msr2 | ~cr & msr & msr2) != 0) error("invalid CR0 value");
1001 if(rdmsr(VMX_CR4_FIXED0, &msr) < 0) error("rdmsr(VMX_CR4_FIXED0) failed");
1002 if(rdmsr(VMX_CR4_FIXED1, &msr2) < 0) error("rdmsr(VMX_CR4_FIXED1) failed");
1003 if((cr & ~msr & ~msr2 | ~cr & msr & msr2) != 0) error("invalid CR4 value");
1006 vmcs = mallocalign(8192, 4096, 0, 0);
1010 memset(vmcs, 0, 8192);
1011 rdmsr(VMX_BASIC_MSR, &x);
1013 *(ulong*)&vmcs[4096] = x;
1014 if(vmxon(PADDR(vmcs + 4096)) < 0)
1015 error("vmxon failed");
1017 if(vmclear(PADDR(vmcs)) < 0)
1018 error("vmclear failed");
1019 if(vmptrld(PADDR(vmcs)) < 0)
1020 error("vmptrld failed");
1025 cmdrelease(VmCmd *p, int f)
1028 p->flags |= CMDFDONE | f;
1034 killcmds(VmCmd *notme)
1038 for(p = vmx.postponed; p != nil; p = pn){
1041 if(p == notme) continue;
1042 kstrcpy(p->errstr, Equit, ERRMAX);
1043 cmdrelease(p, CMDFFAIL);
1045 vmx.postponed = nil;
1046 ilock(&vmx.cmdlock);
1047 for(p = vmx.firstcmd; p != nil; p = pn){
1050 if(p == notme) continue;
1051 kstrcpy(p->errstr, Equit, ERRMAX);
1052 cmdrelease(p, CMDFFAIL);
1055 vmx.lastcmd = &vmx.firstcmd;
1056 iunlock(&vmx.cmdlock);
1060 cmdquit(VmCmd *p, va_list va)
1062 vmx.state = VMXENDING;
1065 if(vmx.pml4 != nil){
1066 cmdclearmeminfo(p, va);
1082 vmx.state = VMXINACTIVE;
1093 reason = vmcsread(VM_EXREASON);
1094 if((reason & 1<<31) == 0)
1095 switch(reason & 0xffff){
1096 case 1: /* external interrupt */
1099 case 5: /* IO SMI */
1101 case 7: /* IRQ window */
1102 case 8: /* NMI window */
1105 if((vmx.onentry & STEP) != 0){
1106 vmx.state = VMXREADY;
1108 vmx.onentry &= ~STEP;
1113 if((vmx.onentry & STEP) != 0){
1114 print("VMX: exit reason %#x when expected step...\n", reason & 0xffff);
1115 vmx.onentry &= ~STEP;
1116 vmx.got |= GOTSTEP|GOTSTEPERR;
1118 vmx.state = VMXREADY;
1123 cmdgetregs(VmCmd *, va_list va)
1131 p0 = va_arg(va, char *);
1132 e = va_arg(va, char *);
1134 for(r = guestregs; r < guestregs + nelem(guestregs); r++)
1136 p = seprint(p, e, "%s ", r->name);
1138 p = strecpy(p, e, "\n");
1141 val = vmcsread(r->offset);
1143 val = *(uintptr*)((uchar*)&vmx + ~r->offset);
1145 if(s == 0) s = sizeof(uintptr);
1146 p = seprint(p, e, "%s %#.*llux\n", r->name, s * 2, val);
1152 setregs(char *p0, char rs, char *fs)
1166 rc = getfields(p, f, nelem(f), 1, fs);
1168 if(rc == 0) continue;
1169 if(rc != 2) error("number of fields wrong");
1171 for(r = guestregs; r < guestregs + nelem(guestregs); r++)
1172 if(strcmp(r->name, f[0]) == 0)
1174 if(r == guestregs + nelem(guestregs))
1175 error("unknown register");
1176 if(r->write != nil){
1180 val = strtoull(f[1], &rp, 0);
1182 if(sz == 0) sz = sizeof(uintptr);
1183 if(rp == f[1] || *rp != 0) error("invalid value");
1185 vmcswrite(r->offset, val);
1187 assert((u32int)~r->offset + sz <= sizeof(Vmx));
1189 case 1: *(u8int*)((u8int*)&vmx + (u32int)~r->offset) = val; break;
1190 case 2: *(u16int*)((u8int*)&vmx + (u32int)~r->offset) = val; break;
1191 case 4: *(u32int*)((u8int*)&vmx + (u32int)~r->offset) = val; break;
1192 case 8: *(u64int*)((u8int*)&vmx + (u32int)~r->offset) = val; break;
1193 default: error(Egreg);
1201 cmdsetregs(VmCmd *, va_list va)
1203 return setregs(va_arg(va, char *), '\n', " \t");
1207 cmdgetfpregs(VmCmd *, va_list va)
1211 p = va_arg(va, uchar *);
1212 memmove(p, vmx.fp, sizeof(FPsave));
1213 return sizeof(FPsave);
1217 cmdsetfpregs(VmCmd *, va_list va)
1223 p = va_arg(va, uchar *);
1224 n = va_arg(va, ulong);
1225 off = va_arg(va, vlong);
1226 if(off < 0 || off >= sizeof(FPsave)) n = 0;
1227 else if(off + n > sizeof(FPsave)) n = sizeof(FPsave) - n;
1228 memmove((uchar*)vmx.fp + off, p, n);
1233 cmdgo(VmCmd *, va_list va)
1237 if(vmx.state != VMXREADY)
1238 error("VM not ready");
1239 r = va_arg(va, char *);
1240 if(r != nil) setregs(r, ';', "=");
1241 vmx.state = VMXRUNNING;
1246 cmdstop(VmCmd *, va_list)
1248 if(vmx.state != VMXREADY && vmx.state != VMXRUNNING)
1249 error("VM not ready or running");
1250 vmx.state = VMXREADY;
1255 cmdstatus(VmCmd *, va_list va)
1257 kstrcpy(va_arg(va, char *), vmx.errstr, ERRMAX);
1261 static char *exitreasons[] = {
1262 [0] "exc", [1] "extirq", [2] "triplef", [3] "initsig", [4] "sipi", [5] "smiio", [6] "smiother", [7] "irqwin",
1263 [8] "nmiwin", [9] "taskswitch", [10] ".cpuid", [11] ".getsec", [12] ".hlt", [13] ".invd", [14] ".invlpg", [15] ".rdpmc",
1264 [16] ".rdtsc", [17] ".rsm", [18] ".vmcall", [19] ".vmclear", [20] ".vmlaunch", [21] ".vmptrld", [22] ".vmptrst", [23] ".vmread",
1265 [24] ".vmresume", [25] ".vmwrite", [26] ".vmxoff", [27] ".vmxon", [28] "movcr", [29] ".movdr", [30] "io", [31] ".rdmsr",
1266 [32] ".wrmsr", [33] "entrystate", [34] "entrymsr", [36] ".mwait", [37] "monitortrap", [39] ".monitor",
1267 [40] ".pause", [41] "mcheck", [43] "tpr", [44] "apicacc", [45] "eoi", [46] "gdtr_idtr", [47] "ldtr_tr",
1268 [48] "eptfault", [49] "eptinval", [50] ".invept", [51] ".rdtscp", [52] "preempt", [53] ".invvpid", [54] ".wbinvd", [55] ".xsetbv",
1269 [56] "apicwrite", [57] ".rdrand", [58] ".invpcid", [59] ".vmfunc", [60] ".encls", [61] ".rdseed", [62] "pmlfull", [63] ".xsaves",
1273 static char *except[] = {
1274 [0] "#de", [1] "#db", [3] "#bp", [4] "#of", [5] "#br", [6] "#ud", [7] "#nm",
1275 [8] "#df", [10] "#ts", [11] "#np", [12] "#ss", [13] "#gp", [14] "#pf",
1276 [16] "#mf", [17] "#ac", [18] "#mc", [19] "#xm", [20] "#ve",
1280 cmdwait(VmCmd *cp, va_list va)
1283 u32int reason, intr;
1289 p0 = p = va_arg(va, char *);
1290 e = va_arg(va, char *);
1291 if((vmx.got & GOTIRQACK) != 0){
1292 p = seprint(p, e, "*ack %d\n", vmx.irqack.info & 0xff);
1293 vmx.got &= ~GOTIRQACK;
1296 if((vmx.got & GOTEXIT) == 0){
1297 cp->flags |= CMDFPOSTP;
1300 vmx.got &= ~GOTEXIT;
1301 reason = vmcsread(VM_EXREASON);
1302 qual = vmcsread(VM_EXQUALIF);
1304 intr = vmcsread(VM_EXINTRINFO);
1305 if((reason & 1<<31) != 0)
1306 p = seprint(p, e, "!");
1307 if(rno == 0 && (intr & 1<<31) != 0){
1308 if((intr & 0xff) >= nelem(except) || except[intr & 0xff] == nil)
1309 p = seprint(p, e, "#%d ", intr & 0xff);
1311 p = seprint(p, e, "%s ", except[intr & 0xff]);
1312 }else if(rno >= nelem(exitreasons) || exitreasons[rno] == nil)
1313 p = seprint(p, e, "?%d ", rno);
1315 p = seprint(p, e, "%s ", exitreasons[rno]);
1316 p = seprint(p, e, "%#ullx pc %#ullx sp %#ullx ilen %#ullx iinfo %#ullx", qual, vmcsread(GUEST_RIP), vmcsread(GUEST_RSP), vmcsread(VM_EXINSTRLEN), vmcsread(VM_EXINSTRINFO));
1317 if((intr & 1<<11) != 0) p = seprint(p, e, " excode %#ullx", vmcsread(VM_EXINTRCODE));
1318 if(rno == 48 && (qual & 0x80) != 0) p = seprint(p, e, " va %#ullx", vmcsread(VM_GUESTVA));
1319 if(rno == 48 || rno == 49) p = seprint(p, e, " pa %#ullx", vmcsread(VM_GUESTPA));
1320 if(rno == 30) p = seprint(p, e, " ax %#ullx", (uvlong)vmx.ureg.ax);
1321 p = seprint(p, e, "\n");
1326 cmdstep(VmCmd *cp, va_list va)
1330 if((vmx.got & GOTSTEP) != 0 || (vmx.onentry & STEP) != 0)
1332 if(vmx.state != VMXREADY){
1333 print("pre-step in state %s\n", statenames[vmx.state]);
1336 vmx.stepmap = va_arg(va, VmMem *);
1337 vmx.onentry |= STEP;
1338 vmx.state = VMXRUNNING;
1339 cp->flags |= CMDFPOSTP;
1342 if(vmx.state != VMXREADY){
1343 print("post-step in state %s\n", statenames[vmx.state]);
1344 vmx.onentry &= ~STEP;
1345 vmx.got &= ~(GOTSTEP|GOTSTEPERR);
1348 if((vmx.got & GOTSTEP) == 0){
1349 cp->flags |= CMDFPOSTP;
1352 if((vmx.got & GOTSTEPERR) != 0){
1353 vmx.got &= ~(GOTSTEP|GOTSTEPERR);
1354 error("step failed");
1356 vmx.got &= ~(GOTSTEP|GOTSTEPERR);
1363 eventparse(char *p, VmIntr *vi)
1368 memset(vi, 0, sizeof(VmIntr));
1373 memset(vi, 0, sizeof(VmIntr));
1378 if(r != nil) *r++ = 0;
1379 for(i = 0; i < nelem(except); i++)
1380 if(except[i] != nil && strcmp(except[i], q) == 0)
1386 if(i == nelem(except)){
1387 i = strtoul(q, &q, 10);
1388 if(*q != 0 || i > 255) error(Ebadctl);
1391 if((vi->info & 0x7ff) == 3 || (vi->info & 0x7ff) == 4)
1393 if(r == nil) goto out;
1395 vi->code = strtoul(r, &r, 0);
1399 vi->ilen = strtoul(r + 1, &r, 0);
1400 if(*r != 0) error(Ebadctl);
1407 cmdexcept(VmCmd *cp, va_list va)
1409 if(cp->scratched) error(Eintr);
1410 if((vmx.onentry & POSTEX) != 0){
1411 cp->flags |= CMDFPOSTP;
1414 eventparse(va_arg(va, char *), &vmx.exc);
1415 vmx.onentry |= POSTEX;
1420 cmdirq(VmCmd *, va_list va)
1425 p = va_arg(va, char *);
1427 vmx.onentry &= ~POSTIRQ;
1431 vmx.onentry |= POSTIRQ;
1442 ilock(&vmx.cmdlock);
1443 rc = vmx.firstcmd != nil;
1444 iunlock(&vmx.cmdlock);
1449 markcmddone(VmCmd *p, VmCmd ***pp)
1451 if((p->flags & (CMDFFAIL|CMDFPOSTP)) == CMDFPOSTP){
1455 p->flags = p->flags & ~CMDFPOSTP;
1461 markppcmddone(VmCmd **pp)
1466 if((p->flags & (CMDFFAIL|CMDFPOSTP)) == CMDFPOSTP)
1470 p->flags = p->flags & ~CMDFPOSTP;
1481 for(pp = &vmx.postponed; p = *pp, p != nil; ){
1483 kstrcpy(p->errstr, up->errstr, ERRMAX);
1484 p->flags |= CMDFFAIL;
1485 pp = markppcmddone(pp);
1488 p->flags &= ~CMDFPOSTP;
1489 p->retval = p->cmd(p, p->va);
1491 pp = markppcmddone(pp);
1494 ilock(&vmx.cmdlock);
1497 iunlock(&vmx.cmdlock);
1500 vmx.firstcmd = p->next;
1501 if(vmx.lastcmd == &p->next)
1502 vmx.lastcmd = &vmx.firstcmd;
1503 iunlock(&vmx.cmdlock);
1506 kstrcpy(p->errstr, up->errstr, ERRMAX);
1507 p->flags |= CMDFFAIL;
1508 markcmddone(p, &pp);
1511 if(p->scratched) error(Eintr);
1512 p->retval = p->cmd(p, p->va);
1514 markcmddone(p, &pp);
1521 static uvlong oldmap;
1522 static uvlong *mapptr;
1525 if(vmx.stepmap != nil){
1526 mapptr = eptwalk(vmx.stepmap->lo);
1528 epttranslate(vmx.stepmap);
1531 vmcswrite(PROCB_CTLS, vmcsread(PROCB_CTLS) & ~(uvlong)PROCB_MONTRAP);
1532 if(vmx.stepmap != nil){
1535 vmx.onentry |= FLUSHEPT;
1544 u32int procbctls, defprocbctls;
1552 kstrcpy(vmx.errstr, up->errstr, ERRMAX);
1553 vmx.state = VMXDEAD;
1559 vmx.state = VMXREADY;
1560 defprocbctls = vmcsread(PROCB_CTLS);
1563 if(vmx.state == VMXRUNNING){
1564 procbctls = defprocbctls;
1565 if((vmx.onentry & STEP) != 0){
1566 procbctls |= PROCB_MONTRAP;
1573 if((vmx.onentry & POSTEX) != 0){
1574 vmcswrite(VMENTRY_INTRINFO, vmx.exc.info);
1575 vmcswrite(VMENTRY_INTRCODE, vmx.exc.code);
1576 vmcswrite(VMENTRY_INTRILEN, vmx.exc.ilen);
1577 vmx.onentry &= ~POSTEX;
1579 if((vmx.onentry & POSTIRQ) != 0 && (vmx.onentry & STEP) == 0){
1580 if((vmx.onentry & POSTEX) == 0 && (vmcsread(GUEST_RFLAGS) & 1<<9) != 0 && (vmcsread(GUEST_CANINTR) & 3) == 0){
1581 vmcswrite(VMENTRY_INTRINFO, vmx.irq.info);
1582 vmcswrite(VMENTRY_INTRCODE, vmx.irq.code);
1583 vmcswrite(VMENTRY_INTRILEN, vmx.irq.ilen);
1584 vmx.onentry &= ~POSTIRQ;
1585 vmx.got |= GOTIRQACK;
1586 vmx.irqack = vmx.irq;
1588 procbctls |= PROCB_IRQWIN;
1590 if((vmx.onentry & FLUSHVPID) != 0){
1591 if(invvpid(INVLOCAL, vmx.vpid, 0) < 0)
1592 error("invvpid failed");
1593 vmx.onentry &= ~FLUSHVPID;
1595 if((vmx.onentry & FLUSHEPT) != 0){
1596 if(invept(INVLOCAL, PADDR(vmx.pml4) | 3<<3, 0) < 0)
1597 error("invept failed");
1598 vmx.onentry &= ~FLUSHEPT;
1600 vmcswrite(PROCB_CTLS, procbctls);
1601 vmx.got &= ~GOTEXIT;
1604 if(sizeof(uintptr) == 8){
1606 vmwrite(HOST_FSBASE, v);
1608 if((vmx.dr[7] & ~0xd400) != 0)
1610 fpsserestore(vmx.fp);
1612 rc = vmlaunch(&vmx.ureg, vmx.launched);
1617 error("vmlaunch failed");
1619 if((vmx.onentry & STEP) != 0){
1625 up->psstate = "Idle";
1626 sleep(&vmx.cmdwait, gotcmd, nil);
1642 static Dirtab vmxdir[] = {
1643 ".", { Qdir, 0, QTDIR }, 0, 0550,
1644 "ctl", { Qctl, 0, 0 }, 0, 0660,
1645 "regs", { Qregs, 0, 0 }, 0, 0660,
1646 "status", { Qstatus, 0, 0 }, 0, 0440,
1647 "map", { Qmap, 0, 0 }, 0, 0660,
1648 "wait", { Qwait, 0, 0 }, 0, 0440,
1649 "fpregs", { Qfpregs, 0, 0 }, 0, 0660,
1662 static Cmdtab vmxctlmsg[] = {
1675 return (((VmCmd*)cp)->flags & CMDFDONE) != 0;
1679 vmxcmd(int (*f)(VmCmd *, va_list), ...)
1683 if(vmx.state == VMXINACTIVE)
1685 if(vmx.state == VMXENDING)
1688 memset(&cmd, 0, sizeof(VmCmd));
1689 cmd.errstr = up->errstr;
1691 va_start(cmd.va, f);
1693 ilock(&vmx.cmdlock);
1694 if(vmx.state == VMXENDING){
1695 iunlock(&vmx.cmdlock);
1698 *vmx.lastcmd = &cmd;
1699 vmx.lastcmd = &cmd.next;
1700 iunlock(&vmx.cmdlock);
1704 wakeup(&vmx.cmdwait);
1706 sleep(&cmd, iscmddone, &cmd);
1707 while(!iscmddone(&cmd));
1711 if((cmd.flags & CMDFFAIL) != 0)
1717 vmxattach(char *spec)
1719 if(vmx.state == NOVMX) error(Enodev);
1720 return devattach('X', spec);
1724 vmxwalk(Chan *c, Chan *nc, char **name, int nname)
1726 return devwalk(c, nc, name, nname, vmxdir, nelem(vmxdir), devgen);
1730 vmxstat(Chan *c, uchar *dp, int n)
1732 return devstat(c, dp, n, vmxdir, nelem(vmxdir), devgen);
1736 vmxopen(Chan* c, int omode)
1740 if(c->qid.path != Qdir && !iseve()) error(Eperm);
1741 ch = devopen(c, omode, vmxdir, nelem(vmxdir), devgen);
1742 if(ch->qid.path == Qmap){
1743 if((omode & OTRUNC) != 0)
1744 vmxcmd(cmdclearmeminfo);
1755 vmxread(Chan* c, void* a, long n, vlong off)
1757 static char regbuf[4096];
1758 static char membuf[4096];
1761 switch((ulong)c->qid.path){
1763 return devdirread(c, a, n, vmxdir, nelem(vmxdir), devgen);
1766 vmxcmd(cmdgetregs, regbuf, regbuf + sizeof(regbuf));
1767 return readstr(off, a, n, regbuf);
1770 vmxcmd(cmdgetmeminfo, membuf, membuf + sizeof(membuf));
1771 return readstr(off, a, n, membuf);
1774 char buf[ERRMAX+128];
1775 char errbuf[ERRMAX];
1779 if(status == VMXDEAD){
1780 vmxcmd(cmdstatus, errbuf);
1781 snprint(buf, sizeof(buf), "%s %#q\n", statenames[status], errbuf);
1782 }else if(status >= 0 && status < nelem(statenames))
1783 snprint(buf, sizeof(buf), "%s\n", statenames[status]);
1785 snprint(buf, sizeof(buf), "%d\n", status);
1786 return readstr(off, a, n, buf);
1792 rc = vmxcmd(cmdwait, buf, buf + sizeof(buf));
1794 if(rc > 0) memmove(a, buf, rc);
1799 char buf[sizeof(FPsave)];
1801 vmxcmd(cmdgetfpregs, buf);
1802 if(n < 0 || off < 0 || off >= sizeof(buf)) n = 0;
1803 else if(off + n > sizeof(buf)) n = sizeof(buf) - off;
1804 if(n != 0) memmove(a, buf + off, n);
1815 vmxwrite(Chan* c, void* a, long n, vlong off)
1817 static QLock initlock;
1825 switch((ulong)c->qid.path){
1829 cb = parsecmd(a, n);
1834 ct = lookupcmd(cb, vmxctlmsg, nelem(vmxctlmsg));
1842 if(vmx.state != VMXINACTIVE)
1843 error("vmx already active");
1844 vmx.state = VMXINIT;
1845 kproc("kvmx", vmxproc, nil);
1848 if(vmxcmd(cmdstatus, up->errstr) == VMXDEAD)
1856 if(cb->nf == 2) kstrdup(&s, cb->f[1]);
1857 else if(cb->nf != 1) error(Ebadarg);
1871 for(i = 1; i < cb->nf; i++)
1872 if(strcmp(cb->f[i], "-map") == 0){
1874 if(i+4 > cb->nf) error("missing argument");
1875 memset(&tmpmem, 0, sizeof(tmpmem));
1876 tmpmem.lo = strtoull(cb->f[i+1], &s, 0);
1877 if(*s != 0 || !vmokpage(tmpmem.lo)) error("invalid address");
1878 tmpmem.hi = tmpmem.lo + BY2PG;
1879 tmpmem.attr = 0x407;
1880 tmpmem.seg = _globalsegattach(cb->f[i+2]);
1881 if(tmpmem.seg == nil) error("unknown segment");
1882 tmpmem.off = strtoull(cb->f[i+3], &s, 0);
1883 if(*s != 0 || !vmokpage(tmpmem.off)) error("invalid offset");
1887 vmxcmd(cmdstep, rc ? &tmpmem : nil);
1891 kstrdup(&s, cb->f[1]);
1896 vmxcmd(cmdexcept, s);
1903 kstrdup(&s, cb->f[1]);
1921 if(s == nil) error(Enomem);
1928 rc = vmxcmd((ulong)c->qid.path == Qregs ? cmdsetregs : cmdsetmeminfo, s);
1934 char buf[sizeof(FPsave)];
1936 if(n > sizeof(FPsave)) n = sizeof(FPsave);
1938 return vmxcmd(cmdsetfpregs, buf, n, off);