4 * TODO: we could handle mmu updates more efficiently by
6 * XXX perhaps we should check return values and panic on failure?
9 #include "../port/lib.h"
19 * These functions replace all the inlines that are used on Linux systems
24 int xencall2(int op, ulong arg1);
25 int xencall3(int op, ulong arg1, ulong arg2);
26 int xencall4(int op, ulong arg1, ulong arg2, ulong arg3);
27 int xencall5(int op, ulong arg1, ulong arg2, ulong arg3, ulong arg4);
28 int xencall6(int op, ulong arg1, ulong arg2, ulong arg3, ulong arg4, ulong arg5);
31 HYPERVISOR_update_va_mapping(ulong va, uvlong newval, ulong flags)
35 ret = xencall5(__HYPERVISOR_update_va_mapping, va, newval, newval>>32, flags);
37 panic("update_va_mapping failed");
42 HYPERVISOR_set_timer_op(uvlong timeout)
48 return xencall3(__HYPERVISOR_set_timer_op, lo, hi);
52 HYPERVISOR_set_trap_table(trap_info_t *table)
54 return xencall2(__HYPERVISOR_set_trap_table, (ulong)table);
58 HYPERVISOR_mmu_update(mmu_update_t *req, int count,
59 int *success_count, domid_t domid)
61 return xencall5(__HYPERVISOR_mmu_update, (ulong)req, count, (ulong)success_count, domid);
65 HYPERVISOR_mmuext_op(struct mmuext_op *op, int count, int *scount, domid_t domid)
67 return xencall5(__HYPERVISOR_mmuext_op, (ulong)op, count, (ulong)scount, domid);
71 HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
73 return xencall3(__HYPERVISOR_set_gdt, (ulong)frame_list, entries);
77 HYPERVISOR_stack_switch(ulong ss, ulong esp)
79 return xencall3(__HYPERVISOR_stack_switch, ss, esp);
82 /* XXX match evfunc and fsfunc prototypes? */
84 HYPERVISOR_set_callbacks(ulong evss, ulong evfunc, ulong fsss, ulong fsfunc)
86 return xencall5(__HYPERVISOR_set_callbacks, evss, evfunc, fsss, fsfunc);
90 HYPERVISOR_fpu_taskswitch(void)
92 return xencall1(__HYPERVISOR_fpu_taskswitch);
96 HYPERVISOR_yield(void)
98 return xencall3(__HYPERVISOR_sched_op, SCHEDOP_yield, 0);
102 HYPERVISOR_block(void)
104 return xencall3(__HYPERVISOR_sched_op, SCHEDOP_block, 0);
108 HYPERVISOR_shutdown(int reboot)
110 sched_shutdown_t arg;
112 arg.reason = reboot? SHUTDOWN_reboot : SHUTDOWN_poweroff;
113 return xencall3(__HYPERVISOR_sched_op, SCHEDOP_shutdown, (ulong)&arg);
117 HYPERVISOR_multicall(void *call_list, int nr_calls)
119 return xencall3(__HYPERVISOR_multicall, (ulong)call_list, nr_calls);
124 HYPERVISOR_event_channel_op(void *op)
126 return xencall2(__HYPERVISOR_event_channel_op, (ulong)op);
130 HYPERVISOR_xen_version(int cmd, void *arg)
132 return xencall3(__HYPERVISOR_xen_version, cmd, (ulong)arg);
136 HYPERVISOR_console_io(int cmd, int count, char *str)
138 return xencall4(__HYPERVISOR_console_io, cmd, count, (ulong)str);
142 HYPERVISOR_grant_table_op(int cmd, gnttab_setup_table_t *setup, int count)
144 return xencall4(__HYPERVISOR_grant_table_op, cmd, (ulong)setup, count);
148 HYPERVISOR_memory_op(int cmd, struct xen_memory_reservation *arg)
150 return xencall3(__HYPERVISOR_memory_op, cmd, (ulong)arg);
154 * XXX this comment is leftover from old code. revisit and update.
156 * The use of 'barrier' in the following reflects their use as local-lock
157 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
158 * critical operations are executed. All critical operatiosn must complete
159 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
160 * includes these barriers, for example.
164 * conversions to machine page numbers, pages and addresses
166 #define MFN(pa) (patomfn[(pa)>>PGSHIFT])
167 #define MFNPG(pa) ((uvlong)MFN(pa)<<PGSHIFT)
168 #define PA2MA(pa) (MFNPG(pa) | PGOFF(pa))
169 #define VA2MA(va) PA2MA(PADDR(va))
170 #define VA2MFN(va) MFN(PADDR(va))
172 ulong hypervisor_virt_start;
174 start_info_t *xenstart;
175 shared_info_t *HYPERVISOR_shared_info;
185 mfn = MFN(PADDR(va));
186 LOG(dprint("pdptpin %lux %lux\n", va, mfn);)
187 print("pdptpin %lux %lux\n", va, mfn);
188 /* mark page readonly first */
189 HYPERVISOR_update_va_mapping(va, ((uvlong)mfn<<PGSHIFT)|PTEVALID, UVMF_INVLPG|UVMF_LOCAL);
191 /* L3 here refers to page directory pointer table (PAE mode) */
192 op.cmd = MMUEXT_PIN_L3_TABLE;
194 if (HYPERVISOR_mmuext_op(&op, 1, 0, DOMID_SELF) == 0)
196 HYPERVISOR_update_va_mapping(va, ((uvlong)mfn<<PGSHIFT)|PTEVALID|PTEWRITE, UVMF_INVLPG|UVMF_LOCAL);
206 mfn = MFN(PADDR(va));
207 LOG(dprint("pdpin %lux %lux\n", va, mfn);)
208 /* mark page readonly first */
209 HYPERVISOR_update_va_mapping(va, ((uvlong)mfn<<PGSHIFT)|PTEVALID, UVMF_INVLPG|UVMF_LOCAL);
211 /* to confuse you, L2 here refers to page directories */
212 op.cmd = MMUEXT_PIN_L2_TABLE;
214 if (HYPERVISOR_mmuext_op(&op, 1, 0, DOMID_SELF) == 0)
216 HYPERVISOR_update_va_mapping(va, ((uvlong)mfn<<PGSHIFT)|PTEVALID|PTEWRITE, UVMF_INVLPG|UVMF_LOCAL);
226 mfn = MFN(PADDR(va));
227 LOG(dprint("pin %lux %lux\n", va, mfn);)
228 /* mark page readonly first */
229 HYPERVISOR_update_va_mapping(va, ((uvlong)mfn<<PGSHIFT)|PTEVALID, UVMF_INVLPG|UVMF_LOCAL);
231 /* to confuse you, L1 here refers to page tables */
232 op.cmd = MMUEXT_PIN_L1_TABLE;
234 if (HYPERVISOR_mmuext_op(&op, 1, 0, DOMID_SELF) == 0)
236 HYPERVISOR_update_va_mapping(va, ((uvlong)mfn<<PGSHIFT)|PTEVALID|PTEWRITE, UVMF_INVLPG|UVMF_LOCAL);
246 mfn = MFN(PADDR(va));
247 LOG(dprint("unpin %lux %lux\n", va, mfn);)
248 op.cmd = MMUEXT_UNPIN_TABLE;
250 if(HYPERVISOR_mmuext_op(&op, 1, 0, DOMID_SELF)<0)
251 panic("xenptunpin va=%lux called from %lux", va, getcallerpc(&va));
253 /* mark page read-write */
254 HYPERVISOR_update_va_mapping(va, ((uvlong)mfn<<PGSHIFT)|PTEVALID|PTEWRITE, UVMF_INVLPG|UVMF_LOCAL);
258 xenptswitch(ulong pa)
262 op.cmd = MMUEXT_NEW_BASEPTR;
263 op.arg1.mfn = MFN(pa);
264 if(HYPERVISOR_mmuext_op(&op, 1, 0, DOMID_SELF)<0)
265 panic("xenptswitch");
273 op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
274 HYPERVISOR_mmuext_op(&op, 1, 0, DOMID_SELF);
277 /* update a pte using a machine page frame number */
279 xenupdatema(ulong *ptr, uvlong val)
285 if(HYPERVISOR_mmu_update(&u, 1, 0, DOMID_SELF) < 0)
286 panic("xenupdatema - pte %lux value %llux (was %llux) called from %lux", (ulong)ptr, val, *(uvlong*)ptr, getcallerpc(&ptr));
289 /* update a pte using a guest "physical" page number */
291 xenupdate(ulong *ptr, ulong val)
297 if(HYPERVISOR_mmu_update(&u, 1, 0, DOMID_SELF) < 0)
298 panic("xenupdate - pte %lux value %lux (%llux) called from %lux", (ulong)ptr, val, PA2MA(val), getcallerpc(&ptr));
302 acceptframe(int ref, void *va)
306 mfn = xengrantend(ref);
308 panic("can't accept page frame");
309 LOG(dprint("acceptframe ref %d va %lux mfn %lux\n", ref, (ulong)va, mfn);)
311 mmumapframe((ulong)va, mfn);
315 donateframe(int domid, void *va)
320 struct xen_memory_reservation mem;
323 ref = xengrant(domid, mfn, GTF_accept_transfer);
324 LOG(dprint("grant transfer %lux (%lux) -> %d\n", (ulong)va, mfn, ref);)
325 pte = mmuwalk(m->pdb, (ulong)va, 2, 0);
327 set_xen_guest_handle(mem.extent_start, &mfn);
329 mem.extent_order = 0;
330 mem.address_bits = 0;
331 mem.domid = DOMID_SELF;
332 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &mem) != 1)
333 panic("XENMEM_decrease_reservation");
339 shareframe(int domid, void *va, int write)
346 flags = GTF_permit_access;
348 flags |= GTF_readonly;
349 ref = xengrant(domid, mfn, flags);
350 LOG(dprint("grant shared %lux (%lux) -> %d\n", (ulong)va, mfn, ref);)
355 * Upcall from hypervisor, entered with evtchn_upcall_pending masked.
358 xenupcall(Ureg *ureg)
362 ulong sel1, sel2, n1, n2, port;
365 s = HYPERVISOR_shared_info;
366 vcpu = &HYPERVISOR_shared_info->vcpu_info[0];
368 vcpu->evtchn_upcall_pending = 0;
369 sel1 = xchgl((uint*)&vcpu->evtchn_pending_sel, 0);
373 sel2 = xchgl((uint*)&s->evtchn_pending[n1], 0);
378 ureg->trap = 100+port;
382 if (vcpu->evtchn_upcall_pending)
384 vcpu->evtchn_upcall_mask = 0;
385 if (vcpu->evtchn_upcall_pending == 0)
387 vcpu->evtchn_upcall_mask = 1;
393 * tbdf field is abused to distinguish virqs from channels:
395 * tbdf=BUSUNKNOWN -> irq is a virq to be bound to a channel
396 * tbdf=0 -> irq is a channel number
399 xenintrenable(Vctl *v)
405 if (v->tbdf != BUSUNKNOWN) {
406 op.cmd = EVTCHNOP_bind_virq;
407 op.u.bind_virq.virq = v->irq;
408 op.u.bind_virq.vcpu = m->machno;
409 if(HYPERVISOR_event_channel_op(&op) != 0)
410 panic("xenintrenable: bind %d failed", v->irq);
411 port = op.u.bind_virq.port;
416 HYPERVISOR_shared_info->evtchn_mask[port/32] &= ~(1<<(port%32));
417 if(0)print("xenintrenable %s: irq %d port %d mask[%d] = %#lux\n", v->name, v->irq, port, port/32, HYPERVISOR_shared_info->evtchn_mask[port/32]);
422 xenintrdisable(int irq)
425 panic("xenintrdisable notyet\n");
430 xenintrvecno(int irq)
440 cpu = &HYPERVISOR_shared_info->vcpu_info[m->machno]; // XXX m->shared
441 return (cpu->evtchn_upcall_mask == 0);
445 * Note: Portable code expects spllo <= spl* <= spldone for
446 * accounting purposes. Lets hope the compiler doesn't reorder
452 vcpu_info_t *cpu = &HYPERVISOR_shared_info->vcpu_info[m->machno]; // XXX m->shared
454 if(cpu->evtchn_upcall_mask == 0)
457 cpu->evtchn_upcall_mask = 0;
460 * If an event arrived while masked off,
461 * use a dummy call to trigger delivery
463 if (cpu->evtchn_upcall_pending)
464 HYPERVISOR_xen_version(0, 0);
473 vcpu_info_t *cpu = &HYPERVISOR_shared_info->vcpu_info[m->machno]; // XXX m->shared
476 oldmask = xchgb(&cpu->evtchn_upcall_mask, 1);
477 if (cpu->evtchn_upcall_mask != 1)
479 /* XXX ad-hoc ¨getcallerpc" because we have no arguments */
480 m->splpc = (&dummy)[1];
493 /* marker for profiling in portable code */
499 /* allocate an event channel */
501 xenchanalloc(int dom)
505 op.cmd = EVTCHNOP_alloc_unbound;
506 op.u.alloc_unbound.dom = DOMID_SELF;
507 op.u.alloc_unbound.remote_dom = dom;
508 if (HYPERVISOR_event_channel_op(&op) != 0)
509 panic("xenchanalloc");
510 return op.u.alloc_unbound.port;
513 /* notify over an event channel */
515 xenchannotify(int port)
519 op.cmd = EVTCHNOP_send;
520 op.u.send.port = port;
521 HYPERVISOR_event_channel_op(&op);