#include "pub_core_basics.h"
#include "pub_core_vki.h"
+
+#if defined(ENABLE_XEN)
+
#include "pub_core_vkiscnums.h"
#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
#include "pub_core_threadstate.h"
#include "priv_syswrap-generic.h"
#include "priv_syswrap-xen.h"
-#include <stdint.h>
#include <inttypes.h>
-#define __XEN_TOOLS__
-
-#include <xen/xen.h>
-#include <xen/sysctl.h>
-#include <xen/domctl.h>
-#include <xen/memory.h>
-#include <xen/event_channel.h>
-#include <xen/version.h>
-
-#include <xen/hvm/hvm_op.h>
-
#define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
#define POST(name) static DEFN_POST_TEMPLATE(xen, name)
PRINT("__HYPERVISOR_memory_op ( %ld, %lx )", ARG1, ARG2);
switch (ARG1) {
- case XENMEM_set_memory_map: {
- xen_foreign_memory_map_t *arg =(xen_foreign_memory_map_t *)ARG2;
+ case VKI_XENMEM_set_memory_map: {
+ struct vki_xen_foreign_memory_map *arg =
+ (struct vki_xen_foreign_memory_map *)ARG2;
PRE_MEM_READ("XENMEM_set_memory_map domid",
(Addr)&arg->domid, sizeof(arg->domid));
PRE_MEM_READ("XENMEM_set_memory_map map",
(Addr)&arg->map, sizeof(arg->map));
break;
}
- case XENMEM_increase_reservation:
- case XENMEM_decrease_reservation:
- case XENMEM_populate_physmap: {
+ case VKI_XENMEM_increase_reservation:
+ case VKI_XENMEM_decrease_reservation:
+ case VKI_XENMEM_populate_physmap: {
struct xen_memory_reservation *memory_reservation =
(struct xen_memory_reservation *)ARG2;
char *which;
switch (ARG1) {
- case XENMEM_increase_reservation:
+ case VKI_XENMEM_increase_reservation:
which = "XENMEM_increase_reservation";
break;
- case XENMEM_decrease_reservation:
+ case VKI_XENMEM_decrease_reservation:
which = "XENMEM_decrease_reservation";
PRE_MEM_READ(which,
(Addr)memory_reservation->extent_start.p,
- sizeof(xen_pfn_t) * memory_reservation->nr_extents);
+ sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
break;
- case XENMEM_populate_physmap:
+ case VKI_XENMEM_populate_physmap:
which = "XENMEM_populate_physmap";
PRE_MEM_READ(which,
(Addr)memory_reservation->extent_start.p,
- sizeof(xen_pfn_t) * memory_reservation->nr_extents);
+ sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
break;
default:
which = "XENMEM_unknown";
PRE(mmuext_op)
{
- mmuext_op_t *ops = (mmuext_op_t *)ARG1;
+ struct vki_xen_mmuext_op *ops = (struct vki_xen_mmuext_op *)ARG1;
unsigned int i, nr = ARG2;
-
for (i=0; i<nr; i++) {
- mmuext_op_t *op = ops + i;
+ struct vki_xen_mmuext_op *op = ops + i;
PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
(Addr)&op->cmd, sizeof(op->cmd));
switch(op->cmd) {
- case MMUEXT_PIN_L1_TABLE:
- case MMUEXT_PIN_L2_TABLE:
- case MMUEXT_PIN_L3_TABLE:
- case MMUEXT_PIN_L4_TABLE:
- case MMUEXT_UNPIN_TABLE:
- case MMUEXT_NEW_BASEPTR:
- case MMUEXT_CLEAR_PAGE:
- case MMUEXT_COPY_PAGE:
- case MMUEXT_MARK_SUPER:
- case MMUEXT_UNMARK_SUPER:
+ case VKI_XEN_MMUEXT_PIN_L1_TABLE:
+ case VKI_XEN_MMUEXT_PIN_L2_TABLE:
+ case VKI_XEN_MMUEXT_PIN_L3_TABLE:
+ case VKI_XEN_MMUEXT_PIN_L4_TABLE:
+ case VKI_XEN_MMUEXT_UNPIN_TABLE:
+ case VKI_XEN_MMUEXT_NEW_BASEPTR:
+ case VKI_XEN_MMUEXT_CLEAR_PAGE:
+ case VKI_XEN_MMUEXT_COPY_PAGE:
+ case VKI_XEN_MMUEXT_MARK_SUPER:
+ case VKI_XEN_MMUEXT_UNMARK_SUPER:
PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
(Addr)&op->arg1.mfn,
sizeof(op->arg1.mfn));
break;
- case MMUEXT_INVLPG_LOCAL:
- case MMUEXT_INVLPG_ALL:
- case MMUEXT_SET_LDT:
+ case VKI_XEN_MMUEXT_INVLPG_LOCAL:
+ case VKI_XEN_MMUEXT_INVLPG_ALL:
+ case VKI_XEN_MMUEXT_SET_LDT:
PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
(Addr)&op->arg1.linear_addr,
sizeof(op->arg1.linear_addr));
break;
- case MMUEXT_TLB_FLUSH_LOCAL:
- case MMUEXT_TLB_FLUSH_MULTI:
- case MMUEXT_INVLPG_MULTI:
- case MMUEXT_TLB_FLUSH_ALL:
- case MMUEXT_FLUSH_CACHE:
- case MMUEXT_NEW_USER_BASEPTR:
- case MMUEXT_FLUSH_CACHE_GLOBAL:
+ case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
+ case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
+ case VKI_XEN_MMUEXT_INVLPG_MULTI:
+ case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
+ case VKI_XEN_MMUEXT_FLUSH_CACHE:
+ case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
+ case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
/* None */
break;
}
switch(op->cmd) {
- case MMUEXT_SET_LDT:
+ case VKI_XEN_MMUEXT_SET_LDT:
PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
(Addr)&op->arg2.nr_ents,
sizeof(op->arg2.nr_ents));
break;
- case MMUEXT_TLB_FLUSH_MULTI:
- case MMUEXT_INVLPG_MULTI:
+ case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
+ case VKI_XEN_MMUEXT_INVLPG_MULTI:
/* How many??? */
PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
(Addr)&op->arg2.vcpumask,
sizeof(op->arg2.vcpumask));
break;
- case MMUEXT_COPY_PAGE:
+ case VKI_XEN_MMUEXT_COPY_PAGE:
PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
(Addr)&op->arg2.src_mfn,
sizeof(op->arg2.src_mfn));
break;
- case MMUEXT_PIN_L1_TABLE:
- case MMUEXT_PIN_L2_TABLE:
- case MMUEXT_PIN_L3_TABLE:
- case MMUEXT_PIN_L4_TABLE:
- case MMUEXT_UNPIN_TABLE:
- case MMUEXT_NEW_BASEPTR:
- case MMUEXT_TLB_FLUSH_LOCAL:
- case MMUEXT_INVLPG_LOCAL:
- case MMUEXT_TLB_FLUSH_ALL:
- case MMUEXT_INVLPG_ALL:
- case MMUEXT_FLUSH_CACHE:
- case MMUEXT_NEW_USER_BASEPTR:
- case MMUEXT_CLEAR_PAGE:
- case MMUEXT_FLUSH_CACHE_GLOBAL:
- case MMUEXT_MARK_SUPER:
- case MMUEXT_UNMARK_SUPER:
+ case VKI_XEN_MMUEXT_PIN_L1_TABLE:
+ case VKI_XEN_MMUEXT_PIN_L2_TABLE:
+ case VKI_XEN_MMUEXT_PIN_L3_TABLE:
+ case VKI_XEN_MMUEXT_PIN_L4_TABLE:
+ case VKI_XEN_MMUEXT_UNPIN_TABLE:
+ case VKI_XEN_MMUEXT_NEW_BASEPTR:
+ case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
+ case VKI_XEN_MMUEXT_INVLPG_LOCAL:
+ case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
+ case VKI_XEN_MMUEXT_INVLPG_ALL:
+ case VKI_XEN_MMUEXT_FLUSH_CACHE:
+ case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
+ case VKI_XEN_MMUEXT_CLEAR_PAGE:
+ case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
+ case VKI_XEN_MMUEXT_MARK_SUPER:
+ case VKI_XEN_MMUEXT_UNMARK_SUPER:
/* None */
break;
}
compat ? "_compat" : "", cmd, arg);
switch (cmd) {
- case EVTCHNOP_alloc_unbound: {
- struct evtchn_alloc_unbound *alloc_unbound = arg;
+ case VKI_XEN_EVTCHNOP_alloc_unbound: {
+ struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
(Addr)&alloc_unbound->dom, sizeof(alloc_unbound->dom));
PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
PRE(evtchn_op_compat)
{
- struct evtchn_op *evtchn = (struct evtchn_op *)ARG1;
+ struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
ARG1, sizeof(*evtchn));
PRINT("__HYPERVISOR_xen_version ( %ld, %lx )", ARG1, ARG2);
switch (ARG1) {
- case XENVER_version:
- case XENVER_extraversion:
- case XENVER_compile_info:
- case XENVER_capabilities:
- case XENVER_changeset:
- case XENVER_platform_parameters:
- case XENVER_get_features:
- case XENVER_pagesize:
- case XENVER_guest_handle:
- case XENVER_commandline:
+ case VKI_XENVER_version:
+ case VKI_XENVER_extraversion:
+ case VKI_XENVER_compile_info:
+ case VKI_XENVER_capabilities:
+ case VKI_XENVER_changeset:
+ case VKI_XENVER_platform_parameters:
+ case VKI_XENVER_get_features:
+ case VKI_XENVER_pagesize:
+ case VKI_XENVER_guest_handle:
+ case VKI_XENVER_commandline:
/* No inputs */
break;
{
PRINT("__HYPERVISOR_grant_table_op ( %ld, 0x%lx, %ld )", ARG1, ARG2, ARG3);
switch (ARG1) {
- case GNTTABOP_setup_table: {
- struct gnttab_setup_table *gst = (void *)(intptr_t)ARG2;
- PRE_MEM_READ("GNTTABOP_setup_table dom",
+ case VKI_XEN_GNTTABOP_setup_table: {
+ struct vki_xen_gnttab_setup_table *gst =
+ (struct vki_xen_gnttab_setup_table*)ARG2;
+ PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
(Addr)&gst->dom, sizeof(gst->dom));
- PRE_MEM_READ("GNTTABOP_setup_table nr_frames",
+ PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
(Addr)&gst->nr_frames, sizeof(gst->nr_frames));
break;
}
}
PRE(sysctl) {
- struct xen_sysctl *sysctl = (struct xen_sysctl *)ARG1;
+ struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
PRINT("__HYPERVISOR_sysctl ( %d )", sysctl->cmd);
* uint32_t interface_version;
*/
PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1,
- sizeof(uint32_t) + sizeof(uint32_t));
+ sizeof(vki_uint32_t) + sizeof(vki_uint32_t));
if (!sysctl)
return;
- if (sysctl->interface_version != XEN_SYSCTL_INTERFACE_VERSION) {
- VG_(dmsg)("WARNING: sysctl version %"PRIx32" not supported, "
- "built for %"PRIx32"\n",
- sysctl->interface_version,
- XEN_SYSCTL_INTERFACE_VERSION);
+ switch (sysctl->interface_version)
+ {
+ case 0x00000008:
+ case 0x00000009:
+ break;
+ default:
+ VG_(dmsg)("WARNING: sysctl version %"PRIx32" not supported\n",
+ sysctl->interface_version);
if (VG_(clo_verbosity) > 1) {
VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
}
__PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
switch (sysctl->cmd) {
- case XEN_SYSCTL_getdomaininfolist:
- PRE_XEN_SYSCTL_READ(getdomaininfolist, first_domain);
- PRE_XEN_SYSCTL_READ(getdomaininfolist, max_domains);
- PRE_XEN_SYSCTL_READ(getdomaininfolist, buffer);
+ case VKI_XEN_SYSCTL_getdomaininfolist:
+ switch (sysctl->interface_version)
+ {
+ case 0x00000008:
+ PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, first_domain);
+ PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, max_domains);
+ PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, buffer);
+ break;
+ case 0x00000009:
+ PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, first_domain);
+ PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, max_domains);
+ PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, buffer);
+ break;
+ }
break;
- case XEN_SYSCTL_cpupool_op:
+ case VKI_XEN_SYSCTL_cpupool_op:
PRE_XEN_SYSCTL_READ(cpupool_op, op);
switch(sysctl->u.cpupool_op.op) {
- case XEN_SYSCTL_CPUPOOL_OP_CREATE:
- case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
- case XEN_SYSCTL_CPUPOOL_OP_INFO:
- case XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
- case XEN_SYSCTL_CPUPOOL_OP_RMCPU:
- case XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
+ case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE:
+ case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY:
+ case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO:
+ case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
+ case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU:
+ case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
PRE_XEN_SYSCTL_READ(cpupool_op, cpupool_id);
}
- if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_CREATE)
+ if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE)
PRE_XEN_SYSCTL_READ(cpupool_op, sched_id);
- if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN)
+ if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN)
PRE_XEN_SYSCTL_READ(cpupool_op, domid);
- if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_ADDCPU ||
- sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_RMCPU)
+ if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU ||
+ sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU)
PRE_XEN_SYSCTL_READ(cpupool_op, cpu);
break;
- case XEN_SYSCTL_physinfo:
+ case VKI_XEN_SYSCTL_physinfo:
/* No input params */
break;
- case XEN_SYSCTL_topologyinfo:
+ case VKI_XEN_SYSCTL_topologyinfo:
PRE_XEN_SYSCTL_READ(topologyinfo, max_cpu_index);
PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_core);
PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_socket);
PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_node);
break;
- case XEN_SYSCTL_numainfo:
+ case VKI_XEN_SYSCTL_numainfo:
PRE_XEN_SYSCTL_READ(numainfo, max_node_index);
PRE_XEN_SYSCTL_READ(numainfo, node_to_memsize);
PRE_XEN_SYSCTL_READ(numainfo, node_to_memfree);
PRE(domctl)
{
- struct xen_domctl *domctl = (struct xen_domctl *)ARG1;
+ struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
PRINT("__HYPERVISOR_domctl ( %d ) on dom%d", domctl->cmd, domctl->domain);
/*
* Common part of xen_domctl:
- * uint32_t cmd;
- * uint32_t interface_version;
- * domid_t domain;
+ * vki_uint32_t cmd;
+ * vki_uint32_t interface_version;
+ * vki_xen_domid_t domain;
*/
PRE_MEM_READ("__HYPERVISOR_domctl", ARG1,
- sizeof(uint32_t) + sizeof(uint32_t) + sizeof(domid_t));
+ sizeof(vki_uint32_t) + sizeof(vki_uint32_t)
+ + sizeof(vki_xen_domid_t));
if (!domctl)
return;
- if (domctl->interface_version != XEN_DOMCTL_INTERFACE_VERSION) {
- VG_(dmsg)("WARNING: domctl version %"PRIx32" not supported, "
- "built for %"PRIx32"\n",
- domctl->interface_version,
- XEN_DOMCTL_INTERFACE_VERSION);
+ switch (domctl->interface_version)
+ {
+ case 0x00000007:
+ case 0x00000008:
+ break;
+ default:
+ VG_(dmsg)("WARNING: domctl version %"PRIx32" not supported\n",
+ domctl->interface_version);
if (VG_(clo_verbosity) > 1) {
VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
}
__PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
switch (domctl->cmd) {
- case XEN_DOMCTL_destroydomain:
- case XEN_DOMCTL_pausedomain:
- case XEN_DOMCTL_max_vcpus:
- case XEN_DOMCTL_get_address_size:
- case XEN_DOMCTL_gettscinfo:
- case XEN_DOMCTL_getdomaininfo:
- case XEN_DOMCTL_unpausedomain:
+ case VKI_XEN_DOMCTL_destroydomain:
+ case VKI_XEN_DOMCTL_pausedomain:
+ case VKI_XEN_DOMCTL_max_vcpus:
+ case VKI_XEN_DOMCTL_get_address_size:
+ case VKI_XEN_DOMCTL_gettscinfo:
+ case VKI_XEN_DOMCTL_getdomaininfo:
+ case VKI_XEN_DOMCTL_unpausedomain:
/* No input fields. */
break;
- case XEN_DOMCTL_createdomain:
+ case VKI_XEN_DOMCTL_createdomain:
PRE_XEN_DOMCTL_READ(createdomain, ssidref);
PRE_XEN_DOMCTL_READ(createdomain, handle);
PRE_XEN_DOMCTL_READ(createdomain, flags);
break;
- case XEN_DOMCTL_max_mem:
+ case VKI_XEN_DOMCTL_max_mem:
PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
break;
- case XEN_DOMCTL_set_address_size:
+ case VKI_XEN_DOMCTL_set_address_size:
__PRE_XEN_DOMCTL_READ(set_address_size, address_size, size);
break;
- case XEN_DOMCTL_settscinfo:
+ case VKI_XEN_DOMCTL_settscinfo:
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.tsc_mode);
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.gtsc_khz);
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.incarnation);
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.elapsed_nsec);
break;
- case XEN_DOMCTL_hypercall_init:
+ case VKI_XEN_DOMCTL_hypercall_init:
PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
break;
- case XEN_DOMCTL_getvcpuinfo:
+ case VKI_XEN_DOMCTL_getvcpuinfo:
PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
break;
- case XEN_DOMCTL_scheduler_op:
+ case VKI_XEN_DOMCTL_scheduler_op:
PRE_XEN_DOMCTL_READ(scheduler_op, sched_id);
PRE_XEN_DOMCTL_READ(scheduler_op, cmd);
- if ( domctl->u.scheduler_op.cmd == XEN_DOMCTL_SCHEDOP_putinfo ) {
+ if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_putinfo ) {
switch(domctl->u.scheduler_op.sched_id) {
- case XEN_SCHEDULER_SEDF:
+ case VKI_XEN_SCHEDULER_SEDF:
PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.period);
PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.slice);
PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.latency);
PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.extratime);
PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.weight);
break;
- case XEN_SCHEDULER_CREDIT:
+ case VKI_XEN_SCHEDULER_CREDIT:
PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.weight);
PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.cap);
break;
- case XEN_SCHEDULER_CREDIT2:
+ case VKI_XEN_SCHEDULER_CREDIT2:
PRE_XEN_DOMCTL_READ(scheduler_op, u.credit2.weight);
break;
- case XEN_SCHEDULER_ARINC653:
+ case VKI_XEN_SCHEDULER_ARINC653:
break;
}
}
break;
- case XEN_DOMCTL_getvcpuaffinity:
+ case VKI_XEN_DOMCTL_getvcpuaffinity:
__PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity, vcpu);
break;
- case XEN_DOMCTL_setvcpuaffinity:
+ case VKI_XEN_DOMCTL_setvcpuaffinity:
__PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity, vcpu);
PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
(Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
break;
- case XEN_DOMCTL_getvcpucontext:
+ case VKI_XEN_DOMCTL_getvcpucontext:
__PRE_XEN_DOMCTL_READ(getvcpucontext, vcpucontext, vcpu);
break;
- case XEN_DOMCTL_setvcpucontext:
+ case VKI_XEN_DOMCTL_setvcpucontext:
__PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, vcpu);
__PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, ctxt.p);
break;
- case XEN_DOMCTL_set_cpuid:
+ case VKI_XEN_DOMCTL_set_cpuid:
PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
(Addr)&domctl->u.cpuid, sizeof(domctl->u.cpuid));
break;
- case XEN_DOMCTL_getvcpuextstate:
+ case VKI_XEN_DOMCTL_getvcpuextstate:
__PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, vcpu);
__PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, xfeature_mask);
__PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, size);
__PRE_XEN_HVMOP_READ(_hvm_op, "xen_hvm_" # _hvm_op "_t", _field)
switch (op) {
- case HVMOP_set_param:
- __PRE_XEN_HVMOP_READ(set_param, xen_hvm_param_t, domid);
- __PRE_XEN_HVMOP_READ(set_param, xen_hvm_param_t, index);
- __PRE_XEN_HVMOP_READ(set_param, xen_hvm_param_t, value);
+ case VKI_XEN_HVMOP_set_param:
+ __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, domid);
+ __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, index);
+ __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, value);
break;
- case HVMOP_get_param:
- __PRE_XEN_HVMOP_READ(get_param, xen_hvm_param_t, domid);
- __PRE_XEN_HVMOP_READ(get_param, xen_hvm_param_t, index);
+ case VKI_XEN_HVMOP_get_param:
+ __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, domid);
+ __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, index);
break;
default:
POST(memory_op)
{
switch (ARG1) {
- case XENMEM_set_memory_map:
- case XENMEM_decrease_reservation:
+ case VKI_XENMEM_set_memory_map:
+ case VKI_XENMEM_decrease_reservation:
/* No outputs */
break;
- case XENMEM_increase_reservation:
- case XENMEM_populate_physmap: {
+ case VKI_XENMEM_increase_reservation:
+ case VKI_XENMEM_populate_physmap: {
struct xen_memory_reservation *memory_reservation =
(struct xen_memory_reservation *)ARG2;
POST_MEM_WRITE((Addr)memory_reservation->extent_start.p,
- sizeof(xen_pfn_t) * memory_reservation->nr_extents);
+ sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
break;
}
}
static void post_evtchn_op(ThreadId tid, __vki_u32 cmd, void *arg, int compat)
{
switch (cmd) {
- case EVTCHNOP_alloc_unbound: {
- struct evtchn_alloc_unbound *alloc_unbound = arg;
+ case VKI_XEN_EVTCHNOP_alloc_unbound: {
+ struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
POST_MEM_WRITE((Addr)&alloc_unbound->port, sizeof(alloc_unbound->port));
break;
}
POST(evtchn_op_compat)
{
- struct evtchn_op *evtchn = (struct evtchn_op *)ARG1;
+ struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
post_evtchn_op(tid, evtchn->cmd, &evtchn->u, 1);
}
POST(xen_version)
{
switch (ARG1) {
- case XENVER_version:
+ case VKI_XENVER_version:
/* No outputs */
break;
- case XENVER_extraversion:
- POST_MEM_WRITE((Addr)ARG2, sizeof(xen_extraversion_t));
+ case VKI_XENVER_extraversion:
+ POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_extraversion_t));
break;
- case XENVER_compile_info:
- POST_MEM_WRITE((Addr)ARG2, sizeof(xen_compile_info_t));
+ case VKI_XENVER_compile_info:
+ POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_compile_info));
break;
- case XENVER_capabilities:
- POST_MEM_WRITE((Addr)ARG2, sizeof(xen_capabilities_info_t));
+ case VKI_XENVER_capabilities:
+ POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_capabilities_info_t));
break;
- case XENVER_changeset:
- POST_MEM_WRITE((Addr)ARG2, sizeof(xen_changeset_info_t));
+ case VKI_XENVER_changeset:
+ POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_changeset_info_t));
break;
- case XENVER_platform_parameters:
- POST_MEM_WRITE((Addr)ARG2, sizeof(xen_platform_parameters_t));
+ case VKI_XENVER_platform_parameters:
+ POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_platform_parameters));
break;
- case XENVER_get_features:
- POST_MEM_WRITE((Addr)ARG2, sizeof(xen_feature_info_t));
+ case VKI_XENVER_get_features:
+ POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_feature_info));
break;
- case XENVER_pagesize:
+ case VKI_XENVER_pagesize:
/* No outputs */
break;
- case XENVER_guest_handle:
- POST_MEM_WRITE((Addr)ARG2, sizeof(xen_domain_handle_t));
+ case VKI_XENVER_guest_handle:
+ POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_domain_handle_t));
break;
- case XENVER_commandline:
- POST_MEM_WRITE((Addr)ARG2, sizeof(xen_commandline_t));
+ case VKI_XENVER_commandline:
+ POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_commandline_t));
break;
}
}
POST(grant_table_op)
{
switch (ARG1) {
- case GNTTABOP_setup_table: {
- struct gnttab_setup_table *gst = (void *)(uintptr_t)ARG2;
- PRE_MEM_WRITE("GNTTABOP_setup_table",
+ case VKI_XEN_GNTTABOP_setup_table: {
+ struct vki_xen_gnttab_setup_table *gst =
+ (struct vki_xen_gnttab_setup_table*)ARG2;
+ PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
(Addr)&gst->status, sizeof(gst->status));
- PRE_MEM_WRITE("GNTTABOP_setup_table",
+ PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
(Addr)gst->frame_list.p,
sizeof(*gst->frame_list.p) & gst->nr_frames);
break;
POST(sysctl)
{
- struct xen_sysctl *sysctl = (struct xen_sysctl *)ARG1;
+ struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
- if (!sysctl || sysctl->interface_version != XEN_SYSCTL_INTERFACE_VERSION)
+ switch (sysctl->interface_version)
+ {
+ case 0x00000008:
+ case 0x00000009:
+ break;
+ default:
return;
+ }
#define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) \
POST_MEM_WRITE((Addr)&sysctl->u._union._field, \
__POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
switch (sysctl->cmd) {
- case XEN_SYSCTL_getdomaininfolist:
- POST_XEN_SYSCTL_WRITE(getdomaininfolist, num_domains);
- POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist.buffer.p,
- sizeof(xen_domctl_getdomaininfo_t)
- * sysctl->u.getdomaininfolist.num_domains);
+ case VKI_XEN_SYSCTL_getdomaininfolist:
+ switch (sysctl->interface_version)
+ {
+ case 0x00000008:
+ POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008, num_domains);
+ POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000008.buffer.p,
+ sizeof(*sysctl->u.getdomaininfolist_00000008.buffer.p)
+ * sysctl->u.getdomaininfolist_00000008.num_domains);
+ break;
+ case 0x00000009:
+ POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009, num_domains);
+ POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000009.buffer.p,
+ sizeof(*sysctl->u.getdomaininfolist_00000009.buffer.p)
+ * sysctl->u.getdomaininfolist_00000009.num_domains);
+ break;
+ }
break;
- case XEN_SYSCTL_cpupool_op:
- if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_CREATE ||
- sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_INFO)
+ case VKI_XEN_SYSCTL_cpupool_op:
+ if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE ||
+ sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO)
POST_XEN_SYSCTL_WRITE(cpupool_op, cpupool_id);
- if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_INFO) {
+ if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO) {
POST_XEN_SYSCTL_WRITE(cpupool_op, sched_id);
POST_XEN_SYSCTL_WRITE(cpupool_op, n_dom);
}
- if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_INFO ||
- sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_FREEINFO)
+ if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO ||
+ sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO)
POST_XEN_SYSCTL_WRITE(cpupool_op, cpumap);
break;
- case XEN_SYSCTL_physinfo:
+ case VKI_XEN_SYSCTL_physinfo:
POST_XEN_SYSCTL_WRITE(physinfo, threads_per_core);
POST_XEN_SYSCTL_WRITE(physinfo, cores_per_socket);
POST_XEN_SYSCTL_WRITE(physinfo, nr_cpus);
POST_XEN_SYSCTL_WRITE(physinfo, capabilities);
break;
- case XEN_SYSCTL_topologyinfo:
+ case VKI_XEN_SYSCTL_topologyinfo:
POST_XEN_SYSCTL_WRITE(topologyinfo, max_cpu_index);
if (sysctl->u.topologyinfo.cpu_to_core.p)
POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_core.p,
sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
break;
- case XEN_SYSCTL_numainfo:
+ case VKI_XEN_SYSCTL_numainfo:
POST_XEN_SYSCTL_WRITE(numainfo, max_node_index);
POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memsize.p,
sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
}
POST(domctl){
- struct xen_domctl *domctl = (struct xen_domctl *)ARG1;
+ struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
- if (!domctl || domctl->interface_version != XEN_DOMCTL_INTERFACE_VERSION)
- return;
+ switch (domctl->interface_version) {
+ case 0x00000007:
+ case 0x00000008:
+ break;
+ default:
+ return;
+ }
#define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) \
POST_MEM_WRITE((Addr)&domctl->u._union._field, \
__POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
switch (domctl->cmd) {
- case XEN_DOMCTL_createdomain:
- case XEN_DOMCTL_destroydomain:
- case XEN_DOMCTL_pausedomain:
- case XEN_DOMCTL_max_mem:
- case XEN_DOMCTL_set_address_size:
- case XEN_DOMCTL_settscinfo:
- case XEN_DOMCTL_hypercall_init:
- case XEN_DOMCTL_setvcpuaffinity:
- case XEN_DOMCTL_setvcpucontext:
- case XEN_DOMCTL_set_cpuid:
- case XEN_DOMCTL_unpausedomain:
+ case VKI_XEN_DOMCTL_createdomain:
+ case VKI_XEN_DOMCTL_destroydomain:
+ case VKI_XEN_DOMCTL_pausedomain:
+ case VKI_XEN_DOMCTL_max_mem:
+ case VKI_XEN_DOMCTL_set_address_size:
+ case VKI_XEN_DOMCTL_settscinfo:
+ case VKI_XEN_DOMCTL_hypercall_init:
+ case VKI_XEN_DOMCTL_setvcpuaffinity:
+ case VKI_XEN_DOMCTL_setvcpucontext:
+ case VKI_XEN_DOMCTL_set_cpuid:
+ case VKI_XEN_DOMCTL_unpausedomain:
/* No output fields */
break;
- case XEN_DOMCTL_max_vcpus:
+ case VKI_XEN_DOMCTL_max_vcpus:
POST_XEN_DOMCTL_WRITE(max_vcpus, max);
- case XEN_DOMCTL_get_address_size:
+ case VKI_XEN_DOMCTL_get_address_size:
__POST_XEN_DOMCTL_WRITE(get_address_size, address_size, size);
break;
- case XEN_DOMCTL_gettscinfo:
+ case VKI_XEN_DOMCTL_gettscinfo:
__POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.tsc_mode);
__POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.gtsc_khz);
__POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.incarnation);
__POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.elapsed_nsec);
break;
- case XEN_DOMCTL_getvcpuinfo:
+ case VKI_XEN_DOMCTL_getvcpuinfo:
POST_XEN_DOMCTL_WRITE(getvcpuinfo, online);
POST_XEN_DOMCTL_WRITE(getvcpuinfo, blocked);
POST_XEN_DOMCTL_WRITE(getvcpuinfo, running);
POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu);
break;
- case XEN_DOMCTL_scheduler_op:
- if ( domctl->u.scheduler_op.cmd == XEN_DOMCTL_SCHEDOP_getinfo ) {
+ case VKI_XEN_DOMCTL_scheduler_op:
+ if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
switch(domctl->u.scheduler_op.sched_id) {
- case XEN_SCHEDULER_SEDF:
+ case VKI_XEN_SCHEDULER_SEDF:
POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.period);
POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.slice);
POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.latency);
POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.extratime);
POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.weight);
break;
- case XEN_SCHEDULER_CREDIT:
+ case VKI_XEN_SCHEDULER_CREDIT:
POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.weight);
POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.cap);
break;
- case XEN_SCHEDULER_CREDIT2:
+ case VKI_XEN_SCHEDULER_CREDIT2:
POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit2.weight);
break;
- case XEN_SCHEDULER_ARINC653:
+ case VKI_XEN_SCHEDULER_ARINC653:
break;
}
}
break;
- case XEN_DOMCTL_getvcpuaffinity:
+ case VKI_XEN_DOMCTL_getvcpuaffinity:
POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
break;
- case XEN_DOMCTL_getdomaininfo:
- POST_XEN_DOMCTL_WRITE(getdomaininfo, domain);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, flags);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, tot_pages);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, max_pages);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, shr_pages);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, shared_info_frame);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, cpu_time);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, nr_online_vcpus);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, max_vcpu_id);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, ssidref);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, handle);
- POST_XEN_DOMCTL_WRITE(getdomaininfo, cpupool);
+ case VKI_XEN_DOMCTL_getdomaininfo:
+ switch (domctl->interface_version) {
+ case 0x00000007:
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, domain);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, flags);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, tot_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shr_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shared_info_frame);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpu_time);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, nr_online_vcpus);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_vcpu_id);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, ssidref);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, handle);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpupool);
+ case 0x00000008:
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, domain);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, flags);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, tot_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shr_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, paged_pages);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shared_info_frame);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpu_time);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, nr_online_vcpus);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_vcpu_id);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, ssidref);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, handle);
+ POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpupool);
break;
-
- case XEN_DOMCTL_getvcpucontext:
+ }
+ break;
+ case VKI_XEN_DOMCTL_getvcpucontext:
__POST_XEN_DOMCTL_WRITE(getvcpucontext, vcpucontext, ctxt.p);
break;
- case XEN_DOMCTL_getvcpuextstate:
+ case VKI_XEN_DOMCTL_getvcpuextstate:
__POST_XEN_DOMCTL_WRITE(getvcpuextstate, vcpuextstate, xfeature_mask);
__POST_XEN_DOMCTL_WRITE(getvcpuextstate, vcpuextstate, size);
POST_MEM_WRITE((Addr)domctl->u.vcpuextstate.buffer.p,
__PRE_XEN_HVMOP_READ(_hvm_op, "xen_hvm_" # _hvm_op "_t", _field)
switch (op) {
- case HVMOP_set_param:
+ case VKI_XEN_HVMOP_set_param:
/* No output paramters */
break;
- case HVMOP_get_param:
- __POST_XEN_HVMOP_WRITE(get_param, xen_hvm_param_t, value);
+ case VKI_XEN_HVMOP_get_param:
+ __POST_XEN_HVMOP_WRITE(get_param, struct vki_xen_hvm_param, value);
break;
}
#undef __POST_XEN_HVMOP_WRITE
nr_args }
static XenHypercallTableEntry hypercall_table[] = {
- // __HYPERVISOR_set_trap_table // 0
- // __HYPERVISOR_mmu_update // 1
- // __HYPERVISOR_set_gdt // 2
- // __HYPERVISOR_stack_switch // 3
- // __HYPERVISOR_set_callbacks // 4
-
- // __HYPERVISOR_fpu_taskswitch // 5
- // __HYPERVISOR_sched_op_compat // 6
- // __HYPERVISOR_platform_op // 7
- // __HYPERVISOR_set_debugreg // 8
- // __HYPERVISOR_get_debugreg // 9
-
- // __HYPERVISOR_update_descriptor // 10
+ // __VKI_XEN_set_trap_table // 0
+ // __VKI_XEN_mmu_update // 1
+ // __VKI_XEN_set_gdt // 2
+ // __VKI_XEN_stack_switch // 3
+ // __VKI_XEN_set_callbacks // 4
+
+ // __VKI_XEN_fpu_taskswitch // 5
+ // __VKI_XEN_sched_op_compat // 6
+ // __VKI_XEN_platform_op // 7
+ // __VKI_XEN_set_debugreg // 8
+ // __VKI_XEN_get_debugreg // 9
+
+ // __VKI_XEN_update_descriptor // 10
// // 11
- HYPXY(__HYPERVISOR_memory_op, memory_op, 2), // 12
- // __HYPERVISOR_multicall // 13
- // __HYPERVISOR_update_va_mapping // 14
-
- // __HYPERVISOR_set_timer_op // 15
- HYPXY(__HYPERVISOR_event_channel_op_compat, evtchn_op_compat, 1), // 16
- HYPXY(__HYPERVISOR_xen_version, xen_version, 2), // 17
- // __HYPERVISOR_console_io // 18
- // __HYPERVISOR_physdev_op_compat // 19
-
- HYPXY(__HYPERVISOR_grant_table_op, grant_table_op, 3), // 20
- // __HYPERVISOR_vm_assist // 21
- // __HYPERVISOR_update_va_mapping_otherdomain // 22
- // __HYPERVISOR_iret, iret // 23
- // __HYPERVISOR_vcpu_op, vcpu_op // 24
-
- // __HYPERVISOR_set_segment_base // 25
- HYPXY(__HYPERVISOR_mmuext_op, mmuext_op, 2), // 26
- // __HYPERVISOR_xsm_op // 27
- // __HYPERVISOR_nmi_op // 28
- // __HYPERVISOR_sched_op // 29
-
- // __HYPERVISOR_callback_op // 30
- // __HYPERVISOR_xenoprof_op // 31
- HYPXY(__HYPERVISOR_event_channel_op, evtchn_op, 2), // 32
- // __HYPERVISOR_physdev_op // 33
- HYPXY(__HYPERVISOR_hvm_op, hvm_op, 2), // 34
-
- HYPXY(__HYPERVISOR_sysctl, sysctl, 1), // 35
- HYPXY(__HYPERVISOR_domctl, domctl, 1), // 36
- // __HYPERVISOR_kexec_op // 37
- // __HYPERVISOR_tmem_op // 38
+ HYPXY(__VKI_XEN_memory_op, memory_op, 2), // 12
+ // __VKI_XEN_multicall // 13
+ // __VKI_XEN_update_va_mapping // 14
+
+ // __VKI_XEN_set_timer_op // 15
+ HYPXY(__VKI_XEN_event_channel_op_compat, evtchn_op_compat, 1), // 16
+ HYPXY(__VKI_XEN_xen_version, xen_version, 2), // 17
+ // __VKI_XEN_console_io // 18
+ // __VKI_XEN_physdev_op_compat // 19
+
+ HYPXY(__VKI_XEN_grant_table_op, grant_table_op, 3), // 20
+ // __VKI_XEN_vm_assist // 21
+ // __VKI_XEN_update_va_mapping_otherdomain // 22
+ // __VKI_XEN_iret, iret // 23
+ // __VKI_XEN_vcpu_op, vcpu_op // 24
+
+ // __VKI_XEN_set_segment_base // 25
+ HYPXY(__VKI_XEN_mmuext_op, mmuext_op, 2), // 26
+ // __VKI_XEN_xsm_op // 27
+ // __VKI_XEN_nmi_op // 28
+ // __VKI_XEN_sched_op // 29
+
+ // __VKI_XEN_callback_op // 30
+ // __VKI_XEN_xenoprof_op // 31
+ HYPXY(__VKI_XEN_event_channel_op, evtchn_op, 2), // 32
+ // __VKI_XEN_physdev_op // 33
+ HYPXY(__VKI_XEN_hvm_op, hvm_op, 2), // 34
+
+ HYPXY(__VKI_XEN_sysctl, sysctl, 1), // 35
+ HYPXY(__VKI_XEN_domctl, domctl, 1), // 36
+ // __VKI_XEN_kexec_op // 37
+ // __VKI_XEN_tmem_op // 38
};
static void bad_before ( ThreadId tid,
if (ent->entry.after)
(ent->entry.after)( tid, arrghs, status );
}
+
+#endif // defined(ENABLE_XEN)
--- /dev/null
+#ifndef __VKI_XEN_DOMCTL_H
+#define __VKI_XEN_DOMCTL_H
+
+/*
+ * The domctl interface is versioned via the interface_version
+ * field. This structures in this header supports domctl interfaces:
+ *
+ * - 00000007: Xen 4.1
+ * - 00000008: Xen 4.2
+ *
+ * When adding a new subop be sure to include the variants used by all
+ * of the above, both here and in syswrap-xen.c
+ *
+ * Structs which are identical in all supported versions have no
+ * version suffix. Structs which do differ are defined multiple times
+ * and use the suffix of the latest version to contain that particular
+ * variant.
+ */
+
+#define VKI_XEN_DOMCTL_createdomain 1
+#define VKI_XEN_DOMCTL_destroydomain 2
+#define VKI_XEN_DOMCTL_pausedomain 3
+#define VKI_XEN_DOMCTL_unpausedomain 4
+#define VKI_XEN_DOMCTL_getdomaininfo 5
+#define VKI_XEN_DOMCTL_getmemlist 6
+#define VKI_XEN_DOMCTL_getpageframeinfo 7
+#define VKI_XEN_DOMCTL_getpageframeinfo2 8
+#define VKI_XEN_DOMCTL_setvcpuaffinity 9
+#define VKI_XEN_DOMCTL_shadow_op 10
+#define VKI_XEN_DOMCTL_max_mem 11
+#define VKI_XEN_DOMCTL_setvcpucontext 12
+#define VKI_XEN_DOMCTL_getvcpucontext 13
+#define VKI_XEN_DOMCTL_getvcpuinfo 14
+#define VKI_XEN_DOMCTL_max_vcpus 15
+#define VKI_XEN_DOMCTL_scheduler_op 16
+#define VKI_XEN_DOMCTL_setdomainhandle 17
+#define VKI_XEN_DOMCTL_setdebugging 18
+#define VKI_XEN_DOMCTL_irq_permission 19
+#define VKI_XEN_DOMCTL_iomem_permission 20
+#define VKI_XEN_DOMCTL_ioport_permission 21
+#define VKI_XEN_DOMCTL_hypercall_init 22
+#define VKI_XEN_DOMCTL_arch_setup 23
+#define VKI_XEN_DOMCTL_settimeoffset 24
+#define VKI_XEN_DOMCTL_getvcpuaffinity 25
+#define VKI_XEN_DOMCTL_real_mode_area 26
+#define VKI_XEN_DOMCTL_resumedomain 27
+#define VKI_XEN_DOMCTL_sendtrigger 28
+#define VKI_XEN_DOMCTL_subscribe 29
+#define VKI_XEN_DOMCTL_gethvmcontext 33
+#define VKI_XEN_DOMCTL_sethvmcontext 34
+#define VKI_XEN_DOMCTL_set_address_size 35
+#define VKI_XEN_DOMCTL_get_address_size 36
+#define VKI_XEN_DOMCTL_assign_device 37
+#define VKI_XEN_DOMCTL_bind_pt_irq 38
+#define VKI_XEN_DOMCTL_memory_mapping 39
+#define VKI_XEN_DOMCTL_ioport_mapping 40
+#define VKI_XEN_DOMCTL_pin_mem_cacheattr 41
+#define VKI_XEN_DOMCTL_set_ext_vcpucontext 42
+#define VKI_XEN_DOMCTL_get_ext_vcpucontext 43
+#define VKI_XEN_DOMCTL_set_opt_feature 44
+#define VKI_XEN_DOMCTL_test_assign_device 45
+#define VKI_XEN_DOMCTL_set_target 46
+#define VKI_XEN_DOMCTL_deassign_device 47
+#define VKI_XEN_DOMCTL_unbind_pt_irq 48
+#define VKI_XEN_DOMCTL_set_cpuid 49
+#define VKI_XEN_DOMCTL_get_device_group 50
+#define VKI_XEN_DOMCTL_set_machine_address_size 51
+#define VKI_XEN_DOMCTL_get_machine_address_size 52
+#define VKI_XEN_DOMCTL_suppress_spurious_page_faults 53
+#define VKI_XEN_DOMCTL_debug_op 54
+#define VKI_XEN_DOMCTL_gethvmcontext_partial 55
+#define VKI_XEN_DOMCTL_mem_event_op 56
+#define VKI_XEN_DOMCTL_mem_sharing_op 57
+#define VKI_XEN_DOMCTL_disable_migrate 58
+#define VKI_XEN_DOMCTL_gettscinfo 59
+#define VKI_XEN_DOMCTL_settscinfo 60
+#define VKI_XEN_DOMCTL_getpageframeinfo3 61
+#define VKI_XEN_DOMCTL_setvcpuextstate 62
+#define VKI_XEN_DOMCTL_getvcpuextstate 63
+#define VKI_XEN_DOMCTL_set_access_required 64
+#define VKI_XEN_DOMCTL_audit_p2m 65
+#define VKI_XEN_DOMCTL_set_virq_handler 66
+#define VKI_XEN_DOMCTL_gdbsx_guestmemio 1000
+#define VKI_XEN_DOMCTL_gdbsx_pausevcpu 1001
+#define VKI_XEN_DOMCTL_gdbsx_unpausevcpu 1002
+#define VKI_XEN_DOMCTL_gdbsx_domstatus 1003
+
+struct vki_xen_domctl_createdomain {
+ /* IN parameters */
+ vki_uint32_t ssidref;
+ vki_xen_domain_handle_t handle;
+ vki_uint32_t flags;
+};
+
+struct vki_xen_domctl_getdomaininfo_00000007 {
+ /* OUT variables. */
+ vki_xen_domid_t domain;
+ vki_uint32_t flags;
+ vki_xen_uint64_aligned_t tot_pages;
+ vki_xen_uint64_aligned_t max_pages;
+ vki_xen_uint64_aligned_t shr_pages;
+ vki_xen_uint64_aligned_t paged_pages;
+ vki_xen_uint64_aligned_t shared_info_frame;
+ vki_xen_uint64_aligned_t cpu_time;
+ vki_uint32_t nr_online_vcpus;
+ vki_uint32_t max_vcpu_id;
+ vki_uint32_t ssidref;
+ vki_xen_domain_handle_t handle;
+ vki_uint32_t cpupool;
+};
+typedef struct vki_xen_domctl_getdomaininfo_00000007 vki_xen_domctl_getdomaininfo_00000007_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000007_t);
+
+struct vki_xen_domctl_getdomaininfo_00000008 {
+ /* OUT variables. */
+ vki_xen_domid_t domain;
+ vki_uint32_t flags;
+ vki_xen_uint64_aligned_t tot_pages;
+ vki_xen_uint64_aligned_t max_pages;
+ vki_xen_uint64_aligned_t shr_pages;
+ vki_xen_uint64_aligned_t paged_pages;
+ vki_xen_uint64_aligned_t shared_info_frame;
+ vki_xen_uint64_aligned_t cpu_time;
+ vki_uint32_t nr_online_vcpus;
+ vki_uint32_t max_vcpu_id;
+ vki_uint32_t ssidref;
+ vki_xen_domain_handle_t handle;
+ vki_uint32_t cpupool;
+};
+typedef struct vki_xen_domctl_getdomaininfo_00000008 vki_xen_domctl_getdomaininfo_00000008_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000008_t);
+
+struct vki_xen_domctl_vcpuaffinity {
+ vki_uint32_t vcpu; /* IN */
+ struct vki_xenctl_cpumap cpumap; /* IN/OUT */
+};
+
+struct vki_xen_domctl_max_mem {
+ /* IN variables. */
+ vki_xen_uint64_aligned_t max_memkb;
+};
+
+struct vki_xen_domctl_vcpucontext {
+ vki_uint32_t vcpu; /* IN */
+ VKI_XEN_GUEST_HANDLE_64(vki_xen_vcpu_guest_context_t) ctxt; /* IN/OUT */
+};
+
+struct vki_xen_domctl_getvcpuinfo {
+ /* IN variables. */
+ vki_uint32_t vcpu;
+ /* OUT variables. */
+ vki_uint8_t online; /* currently online (not hotplugged)? */
+ vki_uint8_t blocked; /* blocked waiting for an event? */
+ vki_uint8_t running; /* currently scheduled on its CPU? */
+ vki_xen_uint64_aligned_t cpu_time;/* total cpu time consumed (ns) */
+ vki_uint32_t cpu; /* current mapping */
+};
+
+struct vki_xen_domctl_scheduler_op {
+ vki_uint32_t sched_id; /* VKI_XEN_SCHEDULER_* */
+#define VKI_XEN_SCHEDULER_SEDF 4
+#define VKI_XEN_SCHEDULER_CREDIT 5
+#define VKI_XEN_SCHEDULER_CREDIT2 6
+#define VKI_XEN_SCHEDULER_ARINC653 7
+ vki_uint32_t cmd; /* VKI_XEN_DOMCTL_SCHEDOP_* */
+#define VKI_XEN_DOMCTL_SCHEDOP_putinfo 0
+#define VKI_XEN_DOMCTL_SCHEDOP_getinfo 1
+ union {
+ struct xen_domctl_sched_sedf {
+ vki_xen_uint64_aligned_t period;
+ vki_xen_uint64_aligned_t slice;
+ vki_xen_uint64_aligned_t latency;
+ vki_uint32_t extratime;
+ vki_uint32_t weight;
+ } sedf;
+ struct xen_domctl_sched_credit {
+ vki_uint16_t weight;
+ vki_uint16_t cap;
+ } credit;
+ struct xen_domctl_sched_credit2 {
+ vki_uint16_t weight;
+ } credit2;
+ } u;
+};
+
+struct vki_xen_domctl_max_vcpus {
+ vki_uint32_t max; /* maximum number of vcpus */
+};
+
+struct vki_xen_domctl_hypercall_init {
+ vki_xen_uint64_aligned_t gmfn; /* GMFN to be initialised */
+};
+
+struct vki_xen_domctl_cpuid {
+ vki_uint32_t input[2];
+ vki_uint32_t eax;
+ vki_uint32_t ebx;
+ vki_uint32_t ecx;
+ vki_uint32_t edx;
+};
+
+struct vki_xen_guest_tsc_info {
+ vki_uint32_t tsc_mode;
+ vki_uint32_t gtsc_khz;
+ vki_uint32_t incarnation;
+ vki_uint32_t pad;
+ vki_xen_uint64_aligned_t elapsed_nsec;
+};
+typedef struct vki_xen_guest_tsc_info vki_xen_guest_tsc_info_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_guest_tsc_info_t);
+
+struct vki_xen_domctl_tsc_info {
+ VKI_XEN_GUEST_HANDLE_64(vki_xen_guest_tsc_info_t) out_info; /* OUT */
+ vki_xen_guest_tsc_info_t info; /* IN */
+};
+
+struct vki_xen_domctl_vcpuextstate {
+ vki_uint32_t vcpu;
+ vki_xen_uint64_aligned_t xfeature_mask;
+ vki_xen_uint64_aligned_t size;
+ VKI_XEN_GUEST_HANDLE_64(vki_uint64) buffer;
+};
+
+struct vki_xen_domctl_address_size {
+ vki_uint32_t size;
+};
+
+struct vki_xen_domctl {
+ vki_uint32_t cmd;
+ vki_uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
+ vki_xen_domid_t domain;
+ union {
+ struct vki_xen_domctl_createdomain createdomain;
+ struct vki_xen_domctl_getdomaininfo_00000007 getdomaininfo_00000007;
+ struct vki_xen_domctl_getdomaininfo_00000008 getdomaininfo_00000008;
+ //struct vki_xen_domctl_getmemlist getmemlist;
+ //struct vki_xen_domctl_getpageframeinfo getpageframeinfo;
+ //struct vki_xen_domctl_getpageframeinfo2 getpageframeinfo2;
+ //struct vki_xen_domctl_getpageframeinfo3 getpageframeinfo3;
+ struct vki_xen_domctl_vcpuaffinity vcpuaffinity;
+ //struct vki_xen_domctl_shadow_op shadow_op;
+ struct vki_xen_domctl_max_mem max_mem;
+ struct vki_xen_domctl_vcpucontext vcpucontext;
+ struct vki_xen_domctl_getvcpuinfo getvcpuinfo;
+ struct vki_xen_domctl_max_vcpus max_vcpus;
+ struct vki_xen_domctl_scheduler_op scheduler_op;
+ //struct vki_xen_domctl_setdomainhandle setdomainhandle;
+ //struct vki_xen_domctl_setdebugging setdebugging;
+ //struct vki_xen_domctl_irq_permission irq_permission;
+ //struct vki_xen_domctl_iomem_permission iomem_permission;
+ //struct vki_xen_domctl_ioport_permission ioport_permission;
+ struct vki_xen_domctl_hypercall_init hypercall_init;
+ //struct vki_xen_domctl_arch_setup arch_setup;
+ //struct vki_xen_domctl_settimeoffset settimeoffset;
+ //struct vki_xen_domctl_disable_migrate disable_migrate;
+ struct vki_xen_domctl_tsc_info tsc_info;
+ //struct vki_xen_domctl_real_mode_area real_mode_area;
+ //struct vki_xen_domctl_hvmcontext hvmcontext;
+ //struct vki_xen_domctl_hvmcontext_partial hvmcontext_partial;
+ struct vki_xen_domctl_address_size address_size;
+ //struct vki_xen_domctl_sendtrigger sendtrigger;
+ //struct vki_xen_domctl_get_device_group get_device_group;
+ //struct vki_xen_domctl_assign_device assign_device;
+ //struct vki_xen_domctl_bind_pt_irq bind_pt_irq;
+ //struct vki_xen_domctl_memory_mapping memory_mapping;
+ //struct vki_xen_domctl_ioport_mapping ioport_mapping;
+ //struct vki_xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
+ //struct vki_xen_domctl_ext_vcpucontext ext_vcpucontext;
+ //struct vki_xen_domctl_set_opt_feature set_opt_feature;
+ //struct vki_xen_domctl_set_target set_target;
+ //struct vki_xen_domctl_subscribe subscribe;
+ //struct vki_xen_domctl_debug_op debug_op;
+ //struct vki_xen_domctl_mem_event_op mem_event_op;
+ //struct vki_xen_domctl_mem_sharing_op mem_sharing_op;
+#if defined(__i386__) || defined(__x86_64__)
+ struct vki_xen_domctl_cpuid cpuid;
+ struct vki_xen_domctl_vcpuextstate vcpuextstate;
+#endif
+ //struct vki_xen_domctl_set_access_required access_required;
+ //struct vki_xen_domctl_audit_p2m audit_p2m;
+ //struct vki_xen_domctl_set_virq_handler set_virq_handler;
+ //struct vki_xen_domctl_gdbsx_memio gdbsx_guest_memio;
+ //struct vki_xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
+ //struct vki_xen_domctl_gdbsx_domstatus gdbsx_domstatus;
+ vki_uint8_t pad[128];
+ } u;
+};
+
+#endif // __VKI_XEN_DOMCTL_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/