diff --git a/core/cpu.c b/core/cpu.c
index 48a264b1..27452903 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -43,7 +43,7 @@ static cpuid_cache_t cache = {
.initialized = 0
};
-static vmx_error_t cpu_vmentry_failed(struct vcpu_t *vcpu, vmx_error_t err);
+static void cpu_vmentry_failed(struct vcpu_t *vcpu, vmx_result_t result);
static int cpu_vmexit_handler(struct vcpu_t *vcpu, exit_reason_t exit_reason,
struct hax_tunnel *htun);
@@ -227,18 +227,18 @@ static void vmread_cr(struct vcpu_t *vcpu)
state->_cr4 = (cr4 & ~cr4_mask) | (state->_cr4 & cr4_mask);
}
-vmx_error_t cpu_vmx_vmptrld(struct per_cpu_data *cpu_data, paddr_t vmcs,
- struct vcpu_t *vcpu)
+vmx_result_t cpu_vmx_vmptrld(struct per_cpu_data *cpu_data, paddr_t vmcs,
+ struct vcpu_t *vcpu)
{
- vmx_error_t r = __vmptrld(vmcs);
+ vmx_result_t r = asm_vmptrld(&vmcs);
return r;
}
bool vcpu_is_panic(struct vcpu_t *vcpu)
{
struct hax_tunnel *htun = vcpu->tunnel;
- if (vcpu->paniced) {
- hax_error("vcpu is paniced, id:%d\n", vcpu->vcpu_id);
+ if (vcpu->panicked) {
+ hax_error("vcpu has panicked, id:%d\n", vcpu->vcpu_id);
hax_panic_log(vcpu);
htun->_exit_status = HAX_EXIT_STATECHANGE;
return 1;
@@ -324,9 +324,9 @@ static int cpu_vmexit_handler(struct vcpu_t *vcpu, exit_reason_t exit_reason,
#ifdef CONFIG_DARWIN
__attribute__ ((__noinline__))
#endif
-vmx_error_t cpu_vmx_run(struct vcpu_t *vcpu, struct hax_tunnel *htun)
+vmx_result_t cpu_vmx_run(struct vcpu_t *vcpu, struct hax_tunnel *htun)
{
- uint64 rflags = 0;
+ vmx_result_t result = 0;
mword host_rip;
/* prepare the RIP */
@@ -338,7 +338,7 @@ vmx_error_t cpu_vmx_run(struct vcpu_t *vcpu, struct hax_tunnel *htun)
* put the vmwrite before is_running, so that the vcpu->cpu_id is set
* when we check vcpu->is_running in vcpu_pause
*/
- host_rip = get_rip();
+ host_rip = vmx_get_rip();
vmwrite(vcpu, HOST_RIP, (mword)host_rip);
vcpu->is_running = 1;
#ifdef DEBUG_HOST_STATE
@@ -349,7 +349,7 @@ vmx_error_t cpu_vmx_run(struct vcpu_t *vcpu, struct hax_tunnel *htun)
load_guest_msr(vcpu);
- rflags = __vmx_run(vcpu->state, vcpu->launched);
+ result = asm_vmxrun(vcpu->state, vcpu->launched);
vcpu->is_running = 0;
save_guest_msr(vcpu);
@@ -360,13 +360,12 @@ vmx_error_t cpu_vmx_run(struct vcpu_t *vcpu, struct hax_tunnel *htun)
compare_host_state(vcpu);
#endif
- hax_debug("\ncpu_vmx_run %llx, FAIL %llx\n", rflags, rflags & VMX_FAIL_MASK);
- if (rflags & VMX_FAIL_MASK) {
- cpu_vmentry_failed(vcpu, rflags & VMX_FAIL_MASK);
+ if (result != VMX_SUCCEED) {
+ cpu_vmentry_failed(vcpu, result);
htun->_exit_reason = 0;
htun->_exit_status = HAX_EXIT_UNKNOWN;
}
- return (rflags & VMX_FAIL_MASK);
+ return result;
}
void vcpu_handle_vmcs_pending(struct vcpu_t *vcpu)
@@ -402,7 +401,7 @@ void vcpu_handle_vmcs_pending(struct vcpu_t *vcpu)
/* Return the value same as ioctl value */
int cpu_vmx_execute(struct vcpu_t *vcpu, struct hax_tunnel *htun)
{
- vmx_error_t err = 0;
+ vmx_result_t res = 0;
int ret;
preempt_flag flags;
struct vcpu_state_t *state = vcpu->state;
@@ -449,9 +448,9 @@ int cpu_vmx_execute(struct vcpu_t *vcpu, struct hax_tunnel *htun)
vmwrite(vcpu, GUEST_TR_AR, temp);
}
- err = cpu_vmx_run(vcpu, htun);
- if (err) {
- hax_debug("cpu_vmx_run error, code:%lx\n", err);
+ res = cpu_vmx_run(vcpu, htun);
+ if (res) {
+ hax_debug("cpu_vmx_run error, code:%xlx\n", res);
if ((vmcs_err = put_vmcs(vcpu, &flags))) {
hax_panic_vcpu(vcpu, "put_vmcs fail: %x\n", vmcs_err);
hax_panic_log(vcpu);
@@ -520,7 +519,7 @@ void restore_host_cr4_vmxe(struct per_cpu_data *cpu_data);
uint32 log_host_cr4_vmxe = 0;
uint64 log_host_cr4 = 0;
-vmx_error_t log_vmxon_err = 0;
+vmx_result_t log_vmxon_res = 0;
uint64 log_vmxon_addr = 0;
uint32 log_vmxon_err_type1 = 0;
uint32 log_vmxon_err_type2 = 0;
@@ -528,13 +527,13 @@ uint32 log_vmxon_err_type3 = 0;
uint32 log_vmclear_err = 0;
uint32 log_vmptrld_err = 0;
uint32 log_vmxoff_no = 0;
-vmx_error_t log_vmxoff_err = 0;
+vmx_result_t log_vmxoff_res = 0;
void hax_clear_panic_log(struct vcpu_t *vcpu)
{
log_host_cr4_vmxe = 0;
log_host_cr4 = 0;
- log_vmxon_err = 0;
+ log_vmxon_res = 0;
log_vmxon_addr = 0;
log_vmxon_err_type1 = 0;
log_vmxon_err_type2 = 0;
@@ -542,7 +541,7 @@ void hax_clear_panic_log(struct vcpu_t *vcpu)
log_vmclear_err = 0;
log_vmptrld_err = 0;
log_vmxoff_no = 0;
- log_vmxoff_err = 0;
+ log_vmxoff_res = 0;
}
void hax_panic_log(struct vcpu_t *vcpu)
@@ -551,7 +550,7 @@ void hax_panic_log(struct vcpu_t *vcpu)
return;
hax_error("log_host_cr4_vmxe: %x\n", log_host_cr4_vmxe);
hax_error("log_host_cr4 %llx\n", log_host_cr4);
- hax_error("log_vmxon_err %lx\n", log_vmxon_err);
+ hax_error("log_vmxon_res %x\n", log_vmxon_res);
hax_error("log_vmxon_addr %llx\n", log_vmxon_addr);
hax_error("log_vmxon_err_type1 %x\n", log_vmxon_err_type1);
hax_error("log_vmxon_err_type2 %x\n", log_vmxon_err_type2);
@@ -559,7 +558,7 @@ void hax_panic_log(struct vcpu_t *vcpu)
hax_error("log_vmclear_err %x\n", log_vmclear_err);
hax_error("log_vmptrld_err %x\n", log_vmptrld_err);
hax_error("log_vmoff_no %x\n", log_vmxoff_no);
- hax_error("log_vmoff_err %lx\n", log_vmxoff_err);
+ hax_error("log_vmxoff_res %x\n", log_vmxoff_res);
}
uint32 load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
@@ -597,7 +596,7 @@ uint32 load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
vmcs_phy = hax_page_pa(cpu_data->vmcs_page);
- if (__vmptrld(vmcs_phy) != VMX_SUCCEED) {
+ if (asm_vmptrld(&vmcs_phy) != VMX_SUCCEED) {
hax_error("HAX: vmptrld failed (%08llx)\n", vmcs_phy);
cpu_vmxroot_leave();
log_vmxon_err_type3 = 1;
@@ -631,10 +630,9 @@ void restore_host_cr4_vmxe(struct per_cpu_data *cpu_data)
uint32 put_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
{
- int cpu_id = hax_cpuid();
- struct per_cpu_data *cpu_data = hax_cpu_data[cpu_id];
+ struct per_cpu_data *cpu_data = current_cpu_data();
paddr_t vmcs_phy;
- vmx_error_t vmxoff_err = 0;
+ vmx_result_t vmxoff_res = 0;
if (vcpu && cpu_data->nested > 0) {
cpu_data->nested--;
goto out;
@@ -645,20 +643,20 @@ uint32 put_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
else
vmcs_phy = hax_page_pa(cpu_data->vmcs_page);
- if (__vmclear(vmcs_phy) != VMX_SUCCEED) {
- hax_error("HAX: vmclear ailed (%llx)\n", vmcs_phy);
+ if (asm_vmclear(&vmcs_phy) != VMX_SUCCEED) {
+ hax_error("HAX: vmclear failed (%llx)\n", vmcs_phy);
log_vmclear_err = 1;
}
cpu_data->current_vcpu = NULL;
- vmxoff_err = cpu_vmxroot_leave();
+ vmxoff_res = cpu_vmxroot_leave();
cpu_data->other_vmcs = VMCS_NONE;
if (vcpu && vcpu->is_vmcs_loaded)
vcpu->is_vmcs_loaded = 0;
out:
hax_enable_preemption(flags);
- return vmxoff_err;
+ return vmxoff_res;
}
void load_vmcs_common(struct vcpu_t *vcpu)
@@ -695,14 +693,14 @@ void load_vmcs_common(struct vcpu_t *vcpu)
}
-static vmx_error_t cpu_vmentry_failed(struct vcpu_t *vcpu, vmx_error_t err)
+static void cpu_vmentry_failed(struct vcpu_t *vcpu, vmx_result_t result)
{
- hax_debug("HAX: VM entry failed: err=%lx RIP=%08lx\n",
- err, (mword)vmread(vcpu, GUEST_RIP));
+ hax_debug("HAX: VM entry failed: result=%x RIP=%08lx\n",
+ result, (mword)vmread(vcpu, GUEST_RIP));
//dump_vmcs();
- if (err == VMX_FAIL_VALID) {
+ if (result == VMX_FAIL_VALID) {
hax_log("HAX: Prev exit: %llx error code: %llx\n",
vmread(vcpu, VM_EXIT_INFO_REASON),
vmread(vcpu, VMX_INSTRUCTION_ERROR_CODE));
@@ -713,17 +711,16 @@ static vmx_error_t cpu_vmentry_failed(struct vcpu_t *vcpu, vmx_error_t err)
hax_log("HAX: VM entry failed\n");
hax_log("end of cpu_vmentry_failed\n");
- return err;
}
-vmx_error_t cpu_vmxroot_leave(void)
+vmx_result_t cpu_vmxroot_leave(void)
{
struct per_cpu_data *cpu_data = current_cpu_data();
- vmx_error_t err = VMX_SUCCEED;
+ vmx_result_t result = VMX_SUCCEED;
if (cpu_data->vmm_flag & VMXON_HAX) {
- err = __vmxoff();
- if (!(err & VMX_FAIL_MASK)) {
+ result = asm_vmxoff();
+ if (result == VMX_SUCCEED) {
cpu_data->vmm_flag &= ~VMXON_HAX;
restore_host_cr4_vmxe(cpu_data);
} else {
@@ -736,20 +733,21 @@ vmx_error_t cpu_vmxroot_leave(void)
" Hypervisor Framework) is running\n");
#else
// It should not go here in Win64/win32
- err = VMX_FAIL_VALID;
+ result = VMX_FAIL_VALID;
hax_error("NO VMXOFF.......\n");
#endif
}
- cpu_data->vmxoff_err = err;
+ cpu_data->vmxoff_res = result;
- return err;
+ return result;
}
-vmx_error_t cpu_vmxroot_enter(void)
+vmx_result_t cpu_vmxroot_enter(void)
{
struct per_cpu_data *cpu_data = current_cpu_data();
uint64 fc_msr;
- vmx_error_t err = VMX_SUCCEED;
+ paddr_t vmxon_addr;
+ vmx_result_t result = VMX_SUCCEED;
cpu_data->host_cr4_vmxe = (get_cr4() & CR4_VMXE);
if (cpu_data->host_cr4_vmxe) {
@@ -782,18 +780,19 @@ vmx_error_t cpu_vmxroot_enter(void)
ia32_wrmsr(IA32_FEATURE_CONTROL,
fc_msr | FC_LOCKED | FC_VMXON_OUTSMX);
- err = __vmxon(hax_page_pa(cpu_data->vmxon_page));
+ vmxon_addr = hax_page_pa(cpu_data->vmxon_page);
+ result = asm_vmxon(&vmxon_addr);
- log_vmxon_err = err;
- log_vmxon_addr = hax_page_pa(cpu_data->vmxon_page);
+ log_vmxon_res = result;
+ log_vmxon_addr = vmxon_addr;
- if (!(err & VMX_FAIL_MASK)) {
+ if (result == VMX_SUCCEED) {
cpu_data->vmm_flag |= VMXON_HAX;
} else {
bool fatal = true;
#ifdef __MACH__
- if ((err & VMX_FAIL_INVALID) && cpu_data->host_cr4_vmxe) {
+ if ((result == VMX_FAIL_INVALID) && cpu_data->host_cr4_vmxe) {
// On macOS, if VMXON fails with VMX_FAIL_INVALID and host CR4.VMXE
// was already set, it is very likely that another VMM (VirtualBox
// or any VMM based on macOS Hypervisor Framework, e.g. Docker) is
@@ -804,22 +803,23 @@ vmx_error_t cpu_vmxroot_enter(void)
// is not actually in VMX operation, VMPTRST will probably cause a
// host reboot. But we don't have a better choice, and it is worth
// taking the risk.
- __vmptrst();
+ paddr_t vmcs_addr;
+ asm_vmptrst(&vmcs_addr);
// It is still alive - Just assumption is right.
fatal = false;
- err = VMX_SUCCEED;
+ result = VMX_SUCCEED;
// Indicate that it is not necessary to call VMXOFF later
cpu_data->vmm_flag &= ~VMXON_HAX;
}
#endif
if (fatal) {
- hax_error("VMXON failed for region 0x%llx (err=0x%x, vmxe=%x)\n",
- hax_page_pa(cpu_data->vmxon_page), (uint32)err,
+ hax_error("VMXON failed for region 0x%llx (result=0x%x, vmxe=%x)\n",
+ hax_page_pa(cpu_data->vmxon_page), (uint32)result,
(uint32)cpu_data->host_cr4_vmxe);
restore_host_cr4_vmxe(cpu_data);
- if (err & VMX_FAIL_INVALID) {
+ if (result == VMX_FAIL_INVALID) {
log_vmxon_err_type1 = 1;
} else {
// TODO: Should VMX_FAIL_VALID be ignored? The current VMCS can
@@ -828,6 +828,6 @@ vmx_error_t cpu_vmxroot_enter(void)
}
}
}
- cpu_data->vmxon_err = err;
- return err;
+ cpu_data->vmxon_res = result;
+ return result;
}
diff --git a/core/cpuid.c b/core/cpuid.c
index c5c53475..a5771056 100644
--- a/core/cpuid.c
+++ b/core/cpuid.c
@@ -48,14 +48,14 @@ typedef union cpuid_feature_t {
void cpuid_query_leaf(cpuid_args_t *args, uint32_t leaf)
{
args->eax = leaf;
- __handle_cpuid(args);
+ asm_cpuid(args);
}
void cpuid_query_subleaf(cpuid_args_t *args, uint32_t leaf, uint32_t subleaf)
{
args->eax = leaf;
args->ecx = subleaf;
- __handle_cpuid(args);
+ asm_cpuid(args);
}
void cpuid_host_init(cpuid_cache_t *cache)
diff --git a/core/ept.c b/core/ept.c
index 7c0d3fcc..ee3737c3 100644
--- a/core/ept.c
+++ b/core/ept.c
@@ -330,12 +330,12 @@ static void invept_smpfunc(struct invept_bundle *bundle)
smp_mb();
cpu_data = current_cpu_data();
- cpu_data->invept_err = VMX_SUCCEED;
+ cpu_data->invept_res = VMX_SUCCEED;
cpu_vmxroot_enter();
- if (!(cpu_data->vmxon_err & VMX_FAIL_MASK)) {
- cpu_data->invept_err = __invept(bundle->type, bundle->desc);
+ if (cpu_data->vmxon_res == VMX_SUCCEED) {
+ cpu_data->invept_res = asm_invept(bundle->type, bundle->desc);
cpu_vmxroot_leave();
}
}
@@ -346,7 +346,7 @@ void invept(hax_vm_t *hax_vm, uint type)
struct invept_desc desc = { eptp_value, 0 };
struct invept_bundle bundle;
int cpu_id;
- uint32 err;
+ uint32 res;
if (!ept_has_cap(ept_cap_invept)) {
hax_warning("INVEPT was not called due to missing host support"
@@ -394,20 +394,20 @@ void invept(hax_vm_t *hax_vm, uint type)
continue;
}
- err = (uint32)cpu_data->vmxon_err;
- if (err & VMX_FAIL_MASK) {
+ res = (uint32)cpu_data->vmxon_res;
+ if (res != VMX_SUCCEED) {
hax_error("[Processor #%d] INVEPT was not called, because VMXON"
- " failed (err=0x%x)\n", cpu_id, err);
+ " failed (err=0x%x)\n", cpu_id, res);
} else {
- err = (uint32)cpu_data->invept_err;
- if (err & VMX_FAIL_MASK) {
+ res = (uint32)cpu_data->invept_res;
+ if (res != VMX_SUCCEED) {
hax_error("[Processor #%d] INVEPT failed (err=0x%x)\n", cpu_id,
- err);
+ res);
}
- err = (uint32)cpu_data->vmxoff_err;
- if (err & VMX_FAIL_MASK) {
+ res = (uint32)cpu_data->vmxoff_res;
+ if (res != VMX_SUCCEED) {
hax_error("[Processor #%d] INVEPT was called, but VMXOFF failed"
- " (err=0x%x)\n", cpu_id, err);
+ " (err=0x%x)\n", cpu_id, res);
}
}
}
diff --git a/core/hax.c b/core/hax.c
index 4661f68c..668cb536 100644
--- a/core/hax.c
+++ b/core/hax.c
@@ -41,16 +41,16 @@
/* deal with module parameter */
struct config_t config = {
- 0, /* memory_pass_through */
- 0, /* disable_ept */
- 1, /* ept_small_pages */
- 1, /* disable_vpid */
- 1, /* disable_unrestricted_guest */
- 1, /* no_cpuid_pass_through */
- 0, /* cpuid_pass_through */
- 0, /* cpuid_no_mwait */
- 0
-}; /* no_msr_pass_through */
+ .memory_pass_through = 0,
+ .disable_ept = 0,
+ .ept_small_pages = 1,
+ .disable_vpid = 1,
+ .disable_unrestricted_guest = 1,
+ .no_cpuid_pass_through = 1,
+ .cpuid_pass_through = 0,
+ .cpuid_no_mwait = 0,
+ .no_msr_pass_through = 0
+};
struct hax_page *io_bitmap_page_a;
struct hax_page *io_bitmap_page_b;
@@ -147,7 +147,7 @@ int hax_em64t_enabled(void)
*/
static int hax_vmx_enable_check(void)
{
- int vts = 0, nxs =0, vte = 0, nxe = 0, em64s = 0, em64e = 0, finished = 0;
+ int vts = 0, nxs = 0, vte = 0, nxe = 0, em64s = 0, em64e = 0, finished = 0;
int cpu, tnum = 0, error = 0;
for (cpu = 0; cpu < max_cpus; cpu++) {
diff --git a/core/haxlib.vcxproj b/core/haxlib.vcxproj
index f65121ce..774451ca 100644
--- a/core/haxlib.vcxproj
+++ b/core/haxlib.vcxproj
@@ -81,9 +81,9 @@
-
- Document
-
+
+
+
@@ -114,6 +114,7 @@
+
diff --git a/core/ia32.c b/core/ia32.c
new file mode 100644
index 00000000..72ae9187
--- /dev/null
+++ b/core/ia32.c
@@ -0,0 +1,189 @@
+/*
+* Copyright (c) 2011 Intel Corporation
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+* 1. Redistributions of source code must retain the above copyright notice,
+* this list of conditions and the following disclaimer.
+*
+* 2. Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in the
+* documentation and/or other materials provided with the distribution.
+*
+* 3. Neither the name of the copyright holder nor the names of its
+* contributors may be used to endorse or promote products derived from
+* this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+* POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "../include/hax.h"
+#include "../include/asm.h"
+
+struct qword_val {
+ uint32 low;
+ uint32 high;
+};
+
+extern void ASMCALL asm_enable_irq(void);
+extern void ASMCALL asm_disable_irq(void);
+
+extern mword ASMCALL asm_vmread(uint32 component);
+extern void ASMCALL asm_vmwrite(uint32 component, mword val);
+
+#ifdef _M_IX86
+extern void ASMCALL asm_rdmsr(uint32 reg, struct qword_val *qv);
+extern void ASMCALL asm_wrmsr(uint32 reg, struct qword_val *qv);
+extern void ASMCALL asm_rdtsc(struct qword_val *qv);
+#else // !_M_IX86
+extern uint64 ASMCALL asm_rdmsr(uint32 reg);
+extern void ASMCALL asm_wrmsr(uint32 reg, uint64_t val);
+extern uint64 ASMCALL asm_rdtsc();
+#endif // _M_IX86
+
+uint64 ia32_rdmsr(uint32 reg)
+{
+#ifdef _M_IX86
+ struct qword_val val = { 0 };
+
+ asm_rdmsr(reg, &val);
+ return ((uint64)(val.low) | (uint64)(val.high) << 32);
+#else
+ return asm_rdmsr(reg);
+#endif
+}
+
+void ia32_wrmsr(uint32 reg, uint64 val)
+{
+#ifdef _M_IX86
+ struct qword_val tmp = { 0 };
+
+ tmp.high = (uint32)(val >> 32);
+ tmp.low = (uint32)val;
+ asm_wrmsr(reg, &tmp);
+#else
+ asm_wrmsr(reg, val);
+#endif
+}
+
+uint64 rdtsc(void)
+{
+#ifdef _M_IX86
+ struct qword_val val = { 0 };
+ asm_rdtsc(&val);
+ return ((uint64)(val.low) | (uint64)(val.high) << 32);
+#else
+ return asm_rdtsc();
+#endif
+}
+
+void fxinit(void)
+{
+ asm_fxinit();
+}
+
+void fxsave(mword *addr)
+{
+ asm_fxsave(addr);
+}
+
+void fxrstor(mword *addr)
+{
+ asm_fxrstor(addr);
+}
+
+void btr(uint8 *addr, uint bit)
+{
+ // asm_btr() may not be able to handle bit offsets greater than 0xff. For
+ // absolute safety, ensure that the bit offset is less than 8.
+ uint8 *base = addr + bit / 8;
+ uint offset = bit % 8;
+ asm_btr(base, offset);
+}
+
+void bts(uint8 *addr, uint bit)
+{
+ uint8 *base = addr + bit / 8;
+ uint offset = bit % 8;
+ asm_bts(base, offset);
+}
+
+void _vmx_vmwrite(struct vcpu_t *vcpu, const char *name,
+ component_index_t component,
+ mword source_val)
+{
+ asm_vmwrite(component, source_val);
+}
+
+void _vmx_vmwrite_64(struct vcpu_t *vcpu, const char *name,
+ component_index_t component,
+ uint64 source_val)
+{
+#ifdef _M_IX86
+ asm_vmwrite(component, (uint32)source_val);
+ asm_vmwrite(component + 1, (uint32)(source_val >> 32));
+#else
+ asm_vmwrite(component, source_val);
+#endif
+}
+
+void _vmx_vmwrite_natural(struct vcpu_t *vcpu, const char *name,
+ component_index_t component,
+ uint64 source_val)
+{
+#ifdef _M_IX86
+ asm_vmwrite(component, (uint32)source_val);
+#else
+ asm_vmwrite(component, source_val);
+#endif
+}
+
+uint64 vmx_vmread(struct vcpu_t *vcpu, component_index_t component)
+{
+ uint64 val = 0;
+
+ val = asm_vmread(component);
+ return val;
+}
+
+uint64 vmx_vmread_natural(struct vcpu_t *vcpu, component_index_t component)
+{
+ uint64 val = 0;
+
+ val = asm_vmread(component);
+ return val;
+}
+
+uint64 vmx_vmread_64(struct vcpu_t *vcpu, component_index_t component)
+{
+ uint64 val = 0;
+
+ val = asm_vmread(component);
+#ifdef _M_IX86
+ val |= ((uint64)(asm_vmread(component + 1)) << 32);
+#endif
+ return val;
+}
+
+#ifndef __MACH__
+void hax_enable_irq(void)
+{
+ asm_enable_irq();
+}
+
+void hax_disable_irq(void)
+{
+ asm_disable_irq();
+}
+#endif
diff --git a/core/ia32_ops.asm b/core/ia32_ops.asm
new file mode 100644
index 00000000..7ca7c83d
--- /dev/null
+++ b/core/ia32_ops.asm
@@ -0,0 +1,355 @@
+;
+; Copyright (c) 2011 Intel Corporation
+; Copyright (c) 2018 Alexandro Sanchez Bach
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; 1. Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+;
+; 2. Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in the
+; documentation and/or other materials provided with the distribution.
+;
+; 3. Neither the name of the copyright holder nor the names of its
+; contributors may be used to endorse or promote products derived from
+; this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+;
+; Detect architecture
+;
+%ifidn __OUTPUT_FORMAT__, elf32
+ %define __BITS__ 32
+ %define __CONV__ x32_cdecl
+%elifidn __OUTPUT_FORMAT__, win32
+ %define __BITS__ 32
+ %define __CONV__ x32_cdecl
+%elifidn __OUTPUT_FORMAT__, macho32
+ %define __BITS__ 32
+ %define __CONV__ x32_cdecl
+%elifidn __OUTPUT_FORMAT__, elf64
+ %define __BITS__ 64
+ %define __CONV__ x64_systemv
+%elifidn __OUTPUT_FORMAT__, win64
+ %define __BITS__ 64
+ %define __CONV__ x64_microsoft
+%elifidn __OUTPUT_FORMAT__, macho64
+ %define __BITS__ 64
+ %define __CONV__ x64_systemv
+%endif
+
+;
+; Describe calling convention
+;
+%ifidn __CONV__, x32_cdecl
+;
+; Although cdecl does not place arguments in registers, we simulate fastcall
+; by reading the first 2 stack arguments into the ecx/edx respectively.
+;
+ %define reg_arg1_16 cx
+ %define reg_arg1_32 ecx
+ %define reg_arg1 reg_arg1_32
+ %define reg_arg2_16 dx
+ %define reg_arg2_32 edx
+ %define reg_arg2 reg_arg2_32
+ %define reg_ret_16 ax
+ %define reg_ret_32 eax
+ %define reg_ret reg_ret_32
+%elifidn __CONV__, x64_systemv
+ %define reg_arg1_16 di
+ %define reg_arg1_32 edi
+ %define reg_arg1_64 rdi
+ %define reg_arg1 reg_arg1_64
+ %define reg_arg2_16 si
+ %define reg_arg2_32 esi
+ %define reg_arg2_64 rsi
+ %define reg_arg2 reg_arg2_64
+ %define reg_ret_16 ax
+ %define reg_ret_32 eax
+ %define reg_ret_64 rax
+ %define reg_ret reg_ret_64
+%elifidn __CONV__, x64_microsoft
+ %define reg_arg1_16 cx
+ %define reg_arg1_32 ecx
+ %define reg_arg1_64 rcx
+ %define reg_arg1 reg_arg1_64
+ %define reg_arg2_16 dx
+ %define reg_arg2_32 edx
+ %define reg_arg2_64 rdx
+ %define reg_arg2 reg_arg2_64
+ %define reg_ret_16 ax
+ %define reg_ret_32 eax
+ %define reg_ret_64 rax
+ %define reg_ret reg_ret_64
+%endif
+
+;
+; Helpers
+;
+
+; Macro: function
+; Declares a function. Arguments:
+; - %1 Name of the function
+; - %2 Number of arguments
+;
+%macro function 2
+ global %1
+ %1:
+%ifidn __CONV__, x32_cdecl
+ %if %2 >= 3
+ %error "Unsupported number of arguments"
+ %else
+ %if %2 >= 1
+ mov reg_arg1, [esp + 0x4]
+ %endif
+ %if %2 >= 2
+ mov reg_arg2, [esp + 0x8]
+ %endif
+ %endif
+%endif
+%endmacro
+
+%macro function_get_reg 1
+ function get_%+%1, 0
+ mov reg_ret, %1
+ ret
+%endmacro
+%macro function_set_reg 1
+ function set_%+%1, 1
+ mov %1, reg_arg1
+ ret
+%endmacro
+%macro function_get_segment 1
+ function get_kernel_%+%1, 0
+ mov reg_ret_16, %1
+ ret
+%endmacro
+%macro function_set_segment 1
+ function set_kernel_%+%1, 1
+ mov %1, reg_arg1_16
+ ret
+%endmacro
+
+section .text
+
+struc qword_struct
+ .lo resd 1
+ .hi resd 1
+endstruc
+
+struc cpuid_args
+ ._eax resd 1
+ ._ecx resd 1
+ ._edx resd 1
+ ._ebx resd 1
+endstruc
+
+function __nmi, 0
+ int 2h
+ ret
+
+function __fls, 1
+ xor reg_ret_32, reg_ret_32
+ bsr reg_ret_32, reg_arg1_32
+ ret
+
+function asm_cpuid, 1
+%ifidn __BITS__, 64
+ push rbx
+ mov r8, reg_arg1
+ mov eax, [r8 + cpuid_args._eax]
+ mov ecx, [r8 + cpuid_args._ecx]
+ cpuid
+ mov [r8 + cpuid_args._eax], eax
+ mov [r8 + cpuid_args._ebx], ebx
+ mov [r8 + cpuid_args._ecx], ecx
+ mov [r8 + cpuid_args._edx], edx
+ pop rbx
+ ret
+%elifidn __BITS__, 32
+ push ebx
+ push esi
+ mov esi, reg_arg1
+ mov eax, [esi + cpuid_args._eax]
+ mov ecx, [esi + cpuid_args._ecx]
+ cpuid
+ mov [esi + cpuid_args._eax], eax
+ mov [esi + cpuid_args._ebx], ebx
+ mov [esi + cpuid_args._ecx], ecx
+ mov [esi + cpuid_args._edx], edx
+ pop esi
+ pop ebx
+ ret
+%else
+ %error "Unimplemented function"
+%endif
+
+function asm_btr, 2
+ lock btr [reg_arg1], reg_arg2
+ ret
+
+function asm_bts, 2
+ lock bts [reg_arg1], reg_arg2
+ ret
+
+function asm_disable_irq, 0
+ cli
+ ret
+
+function asm_enable_irq, 0
+ sti
+ ret
+
+function asm_fxinit, 0
+ finit
+ ret
+
+function asm_fxrstor, 1
+ fxrstor [reg_arg1]
+ ret
+
+function asm_fxsave, 1
+ fxsave [reg_arg1]
+ ret
+
+function asm_rdmsr, 2
+%ifidn __BITS__, 64
+ mov rcx, reg_arg1
+ rdmsr
+ shl rdx, 32
+ or reg_ret, rdx
+ ret
+%elifidn __CONV__, x32_cdecl
+ push ebx
+ mov ebx, reg_arg2
+ rdmsr
+ mov [ebx + qword_struct.lo], eax
+ mov [ebx + qword_struct.hi], edx
+ pop ebx
+ ret
+%else
+ %error "Unimplemented function"
+%endif
+
+function asm_rdtsc, 1
+%ifidn __BITS__, 64
+ rdtsc
+ shl rdx, 32
+ or reg_ret, rdx
+ ret
+%elifidn __BITS__, 32
+ rdtsc
+ mov [reg_arg1 + qword_struct.lo], eax
+ mov [reg_arg1 + qword_struct.hi], edx
+ ret
+%else
+ %error "Unimplemented function"
+%endif
+
+function asm_wrmsr, 2
+%ifidn __BITS__, 64
+ push rbx
+ mov rbx, reg_arg2
+ mov rcx, reg_arg1
+ mov eax, ebx
+ mov rdx, rbx
+ shr rdx, 32
+ wrmsr
+ pop rbx
+ ret
+%elifidn __CONV__, x32_cdecl
+ push edi
+ push esi
+ mov edi, [reg_arg2 + qword_struct.lo]
+ mov esi, [reg_arg2 + qword_struct.hi]
+ mov eax, edi
+ mov edx, esi
+ wrmsr
+ pop esi
+ pop edi
+ ret
+%else
+ %error "Unimplemented function"
+%endif
+
+function get_kernel_tr_selector, 0
+ str reg_ret_16
+ ret
+
+function get_kernel_ldt, 0
+ sldt reg_ret_16
+ ret
+
+function get_kernel_gdt, 1
+ sgdt [reg_arg1]
+ ret
+
+function get_kernel_idt, 1
+ sidt [reg_arg1]
+ ret
+
+function get_kernel_rflags, 0
+ pushfw
+ pop reg_ret_16
+ ret
+
+function set_kernel_ldt, 1
+ lldt reg_arg1_16
+ ret
+
+function set_kernel_gdt, 1
+ lgdt [reg_arg1]
+ ret
+
+function set_kernel_idt, 1
+ lidt [reg_arg1]
+ ret
+
+function_get_reg cr0
+function_get_reg cr2
+function_get_reg cr3
+function_get_reg cr4
+function_get_reg dr0
+function_get_reg dr1
+function_get_reg dr2
+function_get_reg dr3
+function_get_reg dr6
+function_get_reg dr7
+
+function_set_reg cr0
+function_set_reg cr2
+function_set_reg cr3
+function_set_reg cr4
+function_set_reg dr0
+function_set_reg dr1
+function_set_reg dr2
+function_set_reg dr3
+function_set_reg dr6
+function_set_reg dr7
+
+function_get_segment cs
+function_get_segment ds
+function_get_segment es
+function_get_segment ss
+function_get_segment gs
+function_get_segment fs
+
+function_set_segment cs
+function_set_segment ds
+function_set_segment es
+function_set_segment ss
+function_set_segment gs
+function_set_segment fs
diff --git a/core/include/cpu.h b/core/include/cpu.h
index 113b8763..05fc1db5 100644
--- a/core/include/cpu.h
+++ b/core/include/cpu.h
@@ -109,9 +109,9 @@ struct per_cpu_data {
* HAXM to implement smp_call_function()) is known to be prone to deadlocks:
* https://lists.apple.com/archives/darwin-kernel/2006/Dec/msg00006.html
*/
- vmx_error_t vmxon_err;
- vmx_error_t vmxoff_err;
- vmx_error_t invept_err;
+ vmx_result_t vmxon_res;
+ vmx_result_t vmxoff_res;
+ vmx_result_t invept_res;
/*
* bit 0: valid
@@ -174,15 +174,15 @@ bool cpu_has_feature(uint32_t feature);
void hax_panic_log(struct vcpu_t *vcpu);
void hax_clear_panic_log(struct vcpu_t *vcpu);
-vmx_error_t cpu_vmx_run(struct vcpu_t *vcpu, struct hax_tunnel *htun);
+vmx_result_t cpu_vmx_run(struct vcpu_t *vcpu, struct hax_tunnel *htun);
int cpu_vmx_execute(struct vcpu_t *vcpu, struct hax_tunnel *htun);
-vmx_error_t vmptrld(paddr_t vmcs, struct vcpu_t *vcpu);
-vmx_error_t resume(paddr_t vmcs, struct vcpu_t *vcpu);
-vmx_error_t launch(paddr_t vmcs, struct vcpu_t *vcpu);
+vmx_result_t vmptrld(paddr_t vmcs, struct vcpu_t *vcpu);
+vmx_result_t resume(paddr_t vmcs, struct vcpu_t *vcpu);
+vmx_result_t launch(paddr_t vmcs, struct vcpu_t *vcpu);
-vmx_error_t cpu_vmxroot_leave(void);
-vmx_error_t cpu_vmxroot_enter(void);
+vmx_result_t cpu_vmxroot_leave(void);
+vmx_result_t cpu_vmxroot_enter(void);
extern struct hax_page *io_bitmap_page_a;
extern struct hax_page *io_bitmap_page_b;
diff --git a/core/include/ia32.h b/core/include/ia32.h
index e5c6d957..a6e9767b 100644
--- a/core/include/ia32.h
+++ b/core/include/ia32.h
@@ -220,26 +220,27 @@ enum {
EFLAGS_SETBITS = (1u << 1)
};
+// Intel SDM Vol. 2A: Table 3-4. Intel 64 and IA-32 General Exceptions
enum {
- EXC_DIVIDE_ERROR = 0,
- EXC_DEBUG = 1,
- EXC_NMI = 2,
- EXC_BREAK_POINT = 3,
- EXC_OVERFLOW = 4,
- EXC_BOUND_RANGE_EXCEEDED = 5,
- EXC_UNDEFINED_OPCODE = 6,
- EXC_NOMATH = 7,
- EXC_DOUBLEFAULT = 8,
- EXC_COPROC_SEG_OVERRUN = 9,
- EXC_INVALID_TSS = 10,
- EXC_SEG_NOT_PRESENT = 11,
- EXC_STACK_SEG_FAULT = 12,
- EXC_GENERAL_PROTECTION = 13,
- EXC_PAGEFAULT = 14,
- EXC_MATHFAULT = 16,
- EXC_ALIGNMENT_CHECK = 17,
- EXC_MACHINE_CHECK = 18,
- EXC_SIMD = 19
+ VECTOR_DE = 0, // Divide Error
+ VECTOR_DB = 1, // Debug
+ VECTOR_NMI = 2, // NMI Interrupt
+ VECTOR_BP = 3, // Breakpoint
+ VECTOR_OF = 4, // Overflow
+ VECTOR_BR = 5, // BOUND Range Exceeded
+ VECTOR_UD = 6, // Undefined Opcode
+ VECTOR_NM = 7, // Device Not Available (No Math Coprocessor)
+ VECTOR_DF = 8, // Double Fault
+ VECTOR_TS = 10, // Invalid TSS
+ VECTOR_NP = 11, // Segment Not Present
+ VECTOR_SS = 12, // Stack Segment Fault
+ VECTOR_GP = 13, // General Protection
+ VECTOR_PF = 14, // Page Fault
+ VECTOR_MF = 16, // Floating-Point Error (Math Error)
+ VECTOR_AC = 17, // Alignment Check
+ VECTOR_MC = 18, // Machine Check
+ VECTOR_XM = 19, // SIMD Floating-Point Numeric Error
+ VECTOR_VE = 20 // Virtualization Exception
};
// For IA32_APIC_BASE MSR (see IASDM Vol. 3A 10.4.4)
diff --git a/core/include/segments.h b/core/include/segments.h
index 6ddfaf53..dc9ee427 100644
--- a/core/include/segments.h
+++ b/core/include/segments.h
@@ -71,12 +71,6 @@ struct PACKED system_desc_t {
typedef struct system_desc_t system_desc_t;
-extern void set_kernel_gdt(system_desc_t *sys_desc);
-extern void set_kernel_idt(system_desc_t *sys_desc);
-extern void get_kernel_gdt(system_desc_t *sys_desc);
-extern void get_kernel_idt(system_desc_t *sys_desc);
-extern void load_kernel_ldt(uint16 sel);
-
/*
* This is to pass to VMCS, it should return uint64 on long or compatible mode
* and return uint32 on pure 32-bit mode.
@@ -170,14 +164,10 @@ static inline uint32 get_kernel_fs_gs_base(uint16 selector)
return desc_base;
}
-extern uint16 get_kernel_tr_selector(void);
-
static inline uint64 get_kernel_tr_base(void)
{
uint16 selector = get_kernel_tr_selector();
return get_tr_desc_base(selector);
}
-extern uint16 get_kernel_ldt(void);
-
#endif // HAX_CORE_SEGMENTS_H_
diff --git a/core/include/vcpu.h b/core/include/vcpu.h
index c096e2fb..fa167789 100644
--- a/core/include/vcpu.h
+++ b/core/include/vcpu.h
@@ -177,7 +177,7 @@ struct vcpu_t {
void *vcpu_host;
struct {
uint64 paused : 1;
- uint64 paniced : 1;
+ uint64 panicked : 1;
uint64 is_running : 1;
uint64 is_fpu_used : 1;
uint64 is_vmcs_loaded : 1;
diff --git a/core/include/vmx.h b/core/include/vmx.h
index 07123fab..2ada4d62 100644
--- a/core/include/vmx.h
+++ b/core/include/vmx.h
@@ -38,72 +38,68 @@
// Size of VMCS structure
#define IA32_VMX_VMCS_SIZE 4096
+// Intel SDM Vol. 3D: Table C-1. Basic Exit Reasons
enum {
- INT_EXCEPTION_NMI = 0, // An SW interrupt, exception or NMI has occurred
- EXT_INTERRUPT = 1, // An external interrupt has occurred
- TRIPLE_FAULT = 2, // Triple fault occurred
- INIT_EVENT = 3,
- SIPI_EVENT = 4,
-
- SMI_IO_EVENT = 5,
- SMI_OTHER_EVENT = 6,
- PENDING_INTERRUPT = 7,
- PENDING_NMI = 8,
- TASK_SWITCH = 9,
-
- CPUID_INSTRUCTION = 10, // Guest executed CPUID instruction
- GETSEC_INSTRUCTION = 11,
- HLT_INSTRUCTION = 12, // Guest executed HLT instruction
- INVD_INSTRUCTION = 13, // Guest executed INVD instruction
- INVLPG_INSTRUCTION = 14, // Guest executed INVLPG instruction
- RDPMC_INSTRUCTION = 15, // Guest executed RDPMC instruction
- RDTSC_INSTRUCTION = 16, // Guest executed RDTSC instruction
- RSM_INSTRUCTION = 17,
-
- // Guest executed VMX instruction
- VMCALL_INSTRUCTION = 18,
- VMCLEAR_INSTRUCTION = 19,
- VMLAUNCH_INSTRUCTION = 20,
- VMPTRLD_INSTRUCTION = 21,
- VMPTRST_INSTRUCTION = 22,
- VMREAD_INSTRUCTION = 23,
- VMRESUME_INSTRUCTION = 24,
- VMWRITE_INSTRUCTION = 25,
- VMXOFF_INSTRUCTION = 26,
- VMXON_INSTRUCTION = 27,
-
- CR_ACCESS = 28, // Guest accessed a control register
- DR_ACCESS = 29, // Guest attempted access to debug register
- IO_INSTRUCTION = 30, // Guest attempted io
- MSR_READ = 31, // Guest attempted to read an MSR
- MSR_WRITE = 32, // Guest attempted to write an MSR
-
- FAILED_VMENTER_GS = 33, // VMENTER failed due to guest state
- FAILED_VMENTER_MSR = 34, // VMENTER failed due to msr loading
-
- MWAIT_INSTRUCTION = 36,
- MTF_EXIT = 37,
-
- MONITOR_INSTRUCTION = 39,
- PAUSE_INSTRUCTION = 40,
- MACHINE_CHECK = 41,
- TPR_BELOW_THRESHOLD = 43,
-
- APIC_ACCESS = 44,
-
- GDT_IDT_ACCESS = 46,
- LDT_TR_ACCESS = 47,
-
- EPT_VIOLATION = 48,
- EPT_MISCONFIG = 49,
- INVEPT_INSTRUCTION = 50,
- RDTSCP_INSTRUCTION = 51,
- VMX_TIMER_EXIT = 52,
- INVVPID_INSTRUCTION = 53,
-
- WBINVD_INSTRUCTION = 54,
- XSETBV_INSTRUCTION = 55,
- APIC_WRITE = 56
+ VMX_EXIT_INT_EXCEPTION_NMI = 0, // An SW interrupt, exception or NMI has occurred
+ VMX_EXIT_EXT_INTERRUPT = 1, // An external interrupt has occurred
+ VMX_EXIT_TRIPLE_FAULT = 2, // Triple fault occurred
+ VMX_EXIT_INIT_EVENT = 3, // INIT signal arrived
+ VMX_EXIT_SIPI_EVENT = 4, // SIPI signal arrived
+ VMX_EXIT_SMI_IO_EVENT = 5,
+ VMX_EXIT_SMI_OTHER_EVENT = 6,
+ VMX_EXIT_PENDING_INTERRUPT = 7,
+ VMX_EXIT_PENDING_NMI = 8,
+ VMX_EXIT_TASK_SWITCH = 9, // Guest attempted a task switch
+ VMX_EXIT_CPUID = 10, // Guest executed CPUID instruction
+ VMX_EXIT_GETSEC = 11, // Guest executed GETSEC instruction
+ VMX_EXIT_HLT = 12, // Guest executed HLT instruction
+ VMX_EXIT_INVD = 13, // Guest executed INVD instruction
+ VMX_EXIT_INVLPG = 14, // Guest executed INVLPG instruction
+ VMX_EXIT_RDPMC = 15, // Guest executed RDPMC instruction
+ VMX_EXIT_RDTSC = 16, // Guest executed RDTSC instruction
+ VMX_EXIT_RSM = 17, // Guest executed RSM instruction in SMM
+ VMX_EXIT_VMCALL = 18,
+ VMX_EXIT_VMCLEAR = 19,
+ VMX_EXIT_VMLAUNCH = 20,
+ VMX_EXIT_VMPTRLD = 21,
+ VMX_EXIT_VMPTRST = 22,
+ VMX_EXIT_VMREAD = 23,
+ VMX_EXIT_VMRESUME = 24,
+ VMX_EXIT_VMWRITE = 25,
+ VMX_EXIT_VMXOFF = 26,
+ VMX_EXIT_VMXON = 27,
+ VMX_EXIT_CR_ACCESS = 28, // Guest accessed a control register
+ VMX_EXIT_DR_ACCESS = 29, // Guest attempted access to debug register
+ VMX_EXIT_IO = 30, // Guest attempted I/O
+ VMX_EXIT_MSR_READ = 31, // Guest attempted to read an MSR
+ VMX_EXIT_MSR_WRITE = 32, // Guest attempted to write an MSR
+ VMX_EXIT_FAILED_VMENTER_GS = 33, // VMENTER failed due to guest state
+ VMX_EXIT_FAILED_VMENTER_MSR = 34, // VMENTER failed due to MSR loading
+ VMX_EXIT_MWAIT = 36,
+ VMX_EXIT_MTF_EXIT = 37,
+ VMX_EXIT_MONITOR = 39,
+ VMX_EXIT_PAUSE = 40,
+ VMX_EXIT_MACHINE_CHECK = 41,
+ VMX_EXIT_TPR_BELOW_THRESHOLD = 43,
+ VMX_EXIT_APIC_ACCESS = 44,
+ VMX_EXIT_GDT_IDT_ACCESS = 46,
+ VMX_EXIT_LDT_TR_ACCESS = 47,
+ VMX_EXIT_EPT_VIOLATION = 48,
+ VMX_EXIT_EPT_MISCONFIG = 49,
+ VMX_EXIT_INVEPT = 50,
+ VMX_EXIT_RDTSCP = 51,
+ VMX_EXIT_VMX_TIMER_EXIT = 52,
+ VMX_EXIT_INVVPID = 53,
+ VMX_EXIT_WBINVD = 54,
+ VMX_EXIT_XSETBV = 55,
+ VMX_EXIT_APIC_WRITE = 56,
+ VMX_EXIT_RDRAND = 57,
+ VMX_EXIT_INVPCID = 58,
+ VMX_EXIT_VMFUNC = 59,
+ VMX_EXIT_ENCLS = 60,
+ VMX_EXIT_RDSEED = 61,
+ VMX_EXIT_XSAVES = 63,
+ VMX_EXIT_XRSTORS = 64
};
// PIN-BASED CONTROLS
@@ -171,42 +167,49 @@ enum {
#define ENTRY_CONTROL_LOAD_EFER 0x00008000
#define ENTRY_CONTROLS_DEFINED 0x0000ee04
-enum {
- VMX_SUCCEED = 0,
- VMX_FAIL_VALID = EFLAGS_ZF,
- VMX_FAIL_INVALID = EFLAGS_CF,
- VMX_FAIL_MASK = (VMX_FAIL_VALID | VMX_FAIL_INVALID)
-};
-
-// VMX error reasons (see Table J-1)
-enum error_id_t {
- VMCALL_IN_VMX_ROOT = 1,
- VMCLEAR_INVLD_ADDR = 2,
- VMCLEAR_VMXON_PTR = 3,
- VMLAUNCH_NON_CLEAR_VMCS = 4,
- VMRESUME_NON_LAUNCHED_VMCS = 5,
- VMRESUME_CORRUPT_VMCS = 6,
- VM_ENTRY_INVLD_CTRL = 7,
- VM_ENTRY_INVLD_HOST_STATE = 8,
- VMPTRLD_INVLD_ADDR = 9,
- VMPTRLD_VMXON_PTR = 10,
- VMPTRLD_INVLD_VMCS_REV = 11,
- VMREAD_VMWRITE_INVLD_FIELD = 12,
- VMWRITE_READONLY_FIELD = 13,
- VMXON_IN_VMX_ROOT = 15,
- VM_ENTRY_INVLD_VMCS = 16,
- VM_ENTRY_NON_LAUNCHED_VMCS = 17,
- VM_ENTRY_NON_VMXON_PTR = 18,
- VMCALL_NON_CLEAR_VMCS = 19,
- VMCALL_INVLD_VM_EXIT_CTRL = 20,
- VMCALL_INVLD_MSEG_REV = 22,
- VMXOFF_IN_SMM = 23,
- VMCALL_INVLD_SMM = 24,
- VM_ENTRY_INVLD_CTRL_SMM = 25,
- VM_ENTRY_MOV_SS = 26
-};
-
-typedef enum error_id_t error_id_t;
+// Intel SDM Vol. 3C: 30.2 Conventions
+typedef enum vmx_result_t {
+ /* VMsucceed
+ * Operation succeeded (OSZPAC flags are 0) */
+ VMX_SUCCEED = 0,
+
+ /* VMfailInvalid
+ * Operation failed and VCMS pointer is invalid (CF=1) */
+ VMX_FAIL_INVALID = 1,
+
+ /* VMfailValid(ErrorNumber)
+ * Operation failed and VCMS pointer is valid (ZF=1) */
+ VMX_FAIL_VALID = 2,
+} vmx_result_t;
+
+// Intel SDM Vol. 3C: 30.4 VM Instruction Error Numbers
+typedef enum vmx_error_t {
+ VMX_ERROR_VMCALL_ROOT = 1, // VMCALL executed in VMX root operation
+ VMX_ERROR_VMCLEAR_PADDR_INVALID = 2, // VMCLEAR with invalid physical address
+ VMX_ERROR_VMCLEAR_VMXON_PTR = 3, // VMCLEAR with VMXON pointer
+ VMX_ERROR_VMLAUNCH_VMCS_UNCLEAR = 4, // VMLAUNCH with non-clear VMCS
+ VMX_ERROR_VMRESUME_VMCS_UNLAUNCHED = 5, // VMRESUME with non-launched VMCS
+ VMX_ERROR_VMRESUME_AFTER_VMXOFF = 6, // VMRESUME after VMXOFF
+ VMX_ERROR_ENTRY_CTRL_FIELDS_INVALID = 7, // VM entry with invalid control field(s)
+ VMX_ERROR_ENTRY_HOST_FIELDS_INVALID = 8, // VM entry with invalid host-state field(s)
+ VMX_ERROR_VMPTRLD_PADDR_INVALID = 9, // VMPTRLD with invalid physical address
+ VMX_ERROR_VMPTRLD_VMXON_PTR = 10, // VMPTRLD with VMXON pointer
+ VMX_ERROR_VMPTRLD_VMCSREV_INVALID = 11, // VMPTRLD with incorrect VMCS revision identifier
+ VMX_ERROR_VMREAD_VMWRITE_INVALID = 12, // VMREAD/VMWRITE from/to unsupported VMCS component
+ VMX_ERROR_VMWRITE_READONLY = 13, // VMWRITE to read-only VMCS component
+ VMX_ERROR_VMXON_ROOT = 15, // VMXON executed in VMX root operation
+ VMX_ERROR_ENTRY_VMCS_INVALID = 16, // VM entry with invalid executive-VMCS pointer
+ VMX_ERROR_ENTRY_VMCS_UNLAUNCHED = 17, // VM entry with non-launched executive VMCS
+ VMX_ERROR_ENTRY_VMCS_NOT_VMXON = 18, // VM entry with executive-VMCS pointer not VMXON pointer
+ VMX_ERROR_VMCALL_VMCS_UNCLEAR = 19, // VMCALL with non-clear VMCS
+ VMX_ERROR_VMCALL_EXIT_INVALID = 20, // VMCALL with invalid VM-exit control fields
+ VMX_ERROR_VMCALL_MSEG_INVALID = 22, // VMCALL with incorrect MSEG revision identifier
+ VMX_ERROR_VMXOFF_SMM_DUALMONITOR = 23, // VMXOFF under dual-monitor treatment of SMIs and SMM
+ VMX_ERROR_VMCALL_SMM_INVALID = 24, // VMCALL with invalid SMM-monitor features
+ VMX_ERROR_ENTRY_EXECCTRL_INVALID = 25, // VM entry with invalid VM-execution control fields in executive VMCS
+ VMX_ERROR_ENTRY_MOV_SS = 26, // VM entry with events blocked by MOV SS
+ VMX_ERROR_INVEPT_INVALID = 28, // Invalid operand to INVEPT/INVVPID
+} vmx_error_t;
// Exit qualification 64-bit OK
union exit_qualification_t {
@@ -484,10 +487,26 @@ typedef enum encode_t encode_t;
#define ENCODE_MASK 0x3
#define ENCODE_SHIFT 13
-extern uint64 vmx_vmread(struct vcpu_t *vcpu, component_index_t component);
-extern uint64 vmx_vmread_natural(struct vcpu_t *vcpu,
- component_index_t component);
-extern uint64 vmx_vmread_64(struct vcpu_t *vcpu, component_index_t component);
+vmx_result_t ASMCALL asm_invept(uint type, struct invept_desc *desc);
+vmx_result_t ASMCALL asm_vmclear(const paddr_t *addr_in);
+vmx_result_t ASMCALL asm_vmptrld(const paddr_t *addr_in);
+vmx_result_t ASMCALL asm_vmxon(const paddr_t *addr_in);
+vmx_result_t ASMCALL asm_vmxoff(void);
+vmx_result_t ASMCALL asm_vmptrst(paddr_t *addr_out);
+uint64 ASMCALL asm_vmxrun(struct vcpu_state_t *state, uint16 launch);
+
+mword ASMCALL vmx_get_rip(void);
+
+uint64 vmx_vmread(struct vcpu_t *vcpu, component_index_t component);
+uint64 vmx_vmread_natural(struct vcpu_t *vcpu, component_index_t component);
+uint64 vmx_vmread_64(struct vcpu_t *vcpu, component_index_t component);
+
+void _vmx_vmwrite(struct vcpu_t *vcpu, const char *name,
+ component_index_t component, mword source_val);
+void _vmx_vmwrite_natural(struct vcpu_t *vcpu, const char *name,
+ component_index_t component, uint64 source_val);
+void _vmx_vmwrite_64(struct vcpu_t *vcpu, const char *name,
+ component_index_t component, uint64 source_val);
static inline uint64 __vmread_common(struct vcpu_t *vcpu,
component_index_t component)
diff --git a/core/intr_exc.c b/core/intr_exc.c
index e4440639..1ca25fa8 100644
--- a/core/intr_exc.c
+++ b/core/intr_exc.c
@@ -183,7 +183,7 @@ static int is_double_fault(uint8 first_vec, uint8 second_vec)
if (is_extern_interrupt(first_vec))
return 0;
- if ((first_vec == EXC_PAGEFAULT && (exc_bitmap1 & (1u << second_vec))) ||
+ if ((first_vec == VECTOR_PF && (exc_bitmap1 & (1u << second_vec))) ||
((exc_bitmap2 & (1u << first_vec)) && (exc_bitmap2 &
(1u << second_vec))))
return 1;
@@ -207,7 +207,7 @@ void hax_inject_exception(struct vcpu_t *vcpu, uint8 vector, uint32 error_code)
first_vec = (uint8) (vect_info & INTR_INFO_VECTOR_MASK);
if (is_double_fault(first_vec, vector)) {
intr_info = (1 << 31) | (1 << 11) | (EXCEPTION << 8)
- | EXC_DOUBLEFAULT;
+ | VECTOR_DF;
error_code = 0;
} else {
intr_info = (1 << 31) | (EXCEPTION << 8) | vector;
@@ -216,7 +216,7 @@ void hax_inject_exception(struct vcpu_t *vcpu, uint8 vector, uint32 error_code)
intr_info = (1 << 31) | (EXCEPTION << 8) | vector;
if (error_code != NO_ERROR_CODE) {
intr_info |= 1 << 11;
- if (vector == EXC_PAGEFAULT) {
+ if (vector == VECTOR_PF) {
vcpu->vmcs_pending_entry_error_code = 1;
vmx(vcpu, entry_exception_error_code) = error_code;
} else {
@@ -225,7 +225,7 @@ void hax_inject_exception(struct vcpu_t *vcpu, uint8 vector, uint32 error_code)
}
}
- if (vector == EXC_PAGEFAULT) {
+ if (vector == VECTOR_PF) {
vcpu->vmcs_pending_entry_instr_length = 1;
vmx(vcpu, entry_instr_length) = exit_instr_length;
vcpu->vmcs_pending_entry_intr_info = 1;
@@ -242,5 +242,5 @@ void hax_inject_exception(struct vcpu_t *vcpu, uint8 vector, uint32 error_code)
void hax_inject_page_fault(struct vcpu_t *vcpu, mword error_code)
{
- hax_inject_exception(vcpu, EXC_PAGEFAULT, error_code);
+ hax_inject_exception(vcpu, VECTOR_PF, error_code);
}
diff --git a/core/vcpu.c b/core/vcpu.c
index 7580636b..c50d0a3f 100644
--- a/core/vcpu.c
+++ b/core/vcpu.c
@@ -367,31 +367,23 @@ static int vcpu_vpid_free(struct vcpu_t *vcpu)
}
static int (*handler_funcs[])(struct vcpu_t *vcpu, struct hax_tunnel *htun) = {
- exit_exc_nmi,
- exit_interrupt,
- exit_triple_fault,
- 0, 0, 0, 0,
- exit_interrupt_window, // Interrupt window
- exit_interrupt_window, // NMI window
- 0,
- exit_cpuid,
- 0,
- exit_hlt,
- 0,
- exit_invlpg,
- 0,
- exit_rdtsc,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 17 ... 27
- exit_cr_access,
- exit_dr_access,
- exit_io_access,
- exit_msr_read,
- exit_msr_write,
- exit_invalid_guest_state,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 34 ... 47
- exit_ept_violation,
- exit_ept_misconfiguration,
- 0, 0, 0, 0, 0, 0 // 50 ... 55
+ [VMX_EXIT_INT_EXCEPTION_NMI] = exit_exc_nmi,
+ [VMX_EXIT_EXT_INTERRUPT] = exit_interrupt,
+ [VMX_EXIT_TRIPLE_FAULT] = exit_triple_fault,
+ [VMX_EXIT_PENDING_INTERRUPT] = exit_interrupt_window,
+ [VMX_EXIT_PENDING_NMI] = exit_interrupt_window,
+ [VMX_EXIT_CPUID] = exit_cpuid,
+ [VMX_EXIT_HLT] = exit_hlt,
+ [VMX_EXIT_INVLPG] = exit_invlpg,
+ [VMX_EXIT_RDTSC] = exit_rdtsc,
+ [VMX_EXIT_CR_ACCESS] = exit_cr_access,
+ [VMX_EXIT_DR_ACCESS] = exit_dr_access,
+ [VMX_EXIT_IO] = exit_io_access,
+ [VMX_EXIT_MSR_READ] = exit_msr_read,
+ [VMX_EXIT_MSR_WRITE] = exit_msr_write,
+ [VMX_EXIT_FAILED_VMENTER_GS] = exit_invalid_guest_state,
+ [VMX_EXIT_EPT_VIOLATION] = exit_ept_violation,
+ [VMX_EXIT_EPT_MISCONFIG] = exit_ept_misconfiguration,
};
static int nr_handlers = ARRAY_ELEMENTS(handler_funcs);
@@ -1184,7 +1176,7 @@ static void fill_common_vmcs(struct vcpu_t *vcpu)
}
}
- exc_bitmap = (1u << EXC_MACHINE_CHECK) | (1u << EXC_NOMATH);
+ exc_bitmap = (1u << VECTOR_MC) | (1u << VECTOR_NM);
#ifdef __x86_64__
exit_ctls = EXIT_CONTROL_HOST_ADDR_SPACE_SIZE | EXIT_CONTROL_LOAD_EFER |
@@ -1323,7 +1315,7 @@ void vcpu_load_host_state(struct vcpu_t *vcpu)
// Should be called when lock is got
vcpu->state->_cr2 = get_cr2();
- load_kernel_ldt(hstate->ldt_selector);
+ set_kernel_ldt(hstate->ldt_selector);
if (hstate->seg_valid & HOST_SEG_VALID_ES) {
set_kernel_es(hstate->es);
}
@@ -1785,7 +1777,7 @@ static void vmwrite_cr(struct vcpu_t *vcpu)
if (vtlb_active(vcpu)) {
hax_debug("vTLB mode, cr0 %llx\n", vcpu->state->_cr0);
vcpu->mmu->mmu_mode = MMU_MODE_VTLB;
- exc_bitmap |= 1u << EXC_PAGEFAULT;
+ exc_bitmap |= 1u << VECTOR_PF;
cr0 |= CR0_WP;
cr0_mask |= CR0_WP;
cr4 |= CR4_PGE | CR4_PAE;
@@ -2162,11 +2154,11 @@ static int exit_exc_nmi(struct vcpu_t *vcpu, struct hax_tunnel *htun)
hax_debug("exception vmexit vector:%x\n", exit_intr_info.vector);
switch (exit_intr_info.vector) {
- case EXC_NMI: {
+ case VECTOR_NMI: {
__nmi();
return HAX_RESUME;
}
- case EXC_PAGEFAULT: {
+ case VECTOR_PF: {
if (vtlb_active(vcpu)) {
if (handle_vtlb(vcpu))
return HAX_RESUME;
@@ -2179,33 +2171,33 @@ static int exit_exc_nmi(struct vcpu_t *vcpu, struct hax_tunnel *htun)
}
break;
}
- case EXC_NOMATH: {
+ case VECTOR_NM: {
cr0 = vcpu_read_cr(state, 0);
if (cr0 & CR0_TS) {
uint32 exc_bitmap = vmx(vcpu, exc_bitmap);
if (!vcpu->is_fpu_used) {
vcpu->is_fpu_used = 1;
}
- exc_bitmap &= ~(1u << EXC_NOMATH);
+ exc_bitmap &= ~(1u << VECTOR_NM);
vmwrite(vcpu, VMX_EXCEPTION_BITMAP,
vmx(vcpu, exc_bitmap) = exc_bitmap);
}
return HAX_RESUME;
}
- case EXC_MACHINE_CHECK: {
+ case VECTOR_MC: {
hax_panic_vcpu(vcpu, "Machine check happens!\n");
dump_vmcs(vcpu);
handle_machine_check(vcpu);
break;
}
- case EXC_DOUBLEFAULT: {
+ case VECTOR_DF: {
hax_panic_vcpu(vcpu, "Double fault!\n");
dump_vmcs(vcpu);
break;
}
}
- if (exit_intr_info.vector == EXC_PAGEFAULT) {
+ if (exit_intr_info.vector == VECTOR_PF) {
state->_cr2 = vmx(vcpu, exit_qualification.address);
}
@@ -2282,7 +2274,7 @@ static int exit_triple_fault(struct vcpu_t *vcpu, struct hax_tunnel *htun)
static int exit_interrupt_window(struct vcpu_t *vcpu, struct hax_tunnel *htun)
{
vmx(vcpu, pcpu_ctls) &=
- vmx(vcpu, exit_reason).basic_reason == PENDING_INTERRUPT
+ vmx(vcpu, exit_reason).basic_reason == VMX_EXIT_PENDING_INTERRUPT
? ~INTERRUPT_WINDOW_EXITING : ~NMI_WINDOW_EXITING;
vmwrite(vcpu, VMX_PRIMARY_PROCESSOR_CONTROLS, vmx(vcpu, pcpu_ctls));
@@ -2306,7 +2298,7 @@ static void handle_cpuid(struct vcpu_t *vcpu, struct hax_tunnel *htun)
args.eax = state->_eax;
args.ecx = state->_ecx;
- __handle_cpuid(&args);
+ asm_cpuid(&args);
state->_eax = args.eax;
state->_ecx = args.ecx;
state->_edx = args.edx;
@@ -2638,13 +2630,13 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun)
" _cr4=0x%llx, _efer=0x%x\n", vcpu->vcpu_id,
state->_cr0, val, state->_cr4, state->_efer);
if ((val & CR0_PG) && !(val & CR0_PE)) {
- hax_inject_exception(vcpu, EXC_GENERAL_PROTECTION, 0);
+ hax_inject_exception(vcpu, VECTOR_GP, 0);
return HAX_RESUME;
}
if (!(state->_cr0 & CR0_PG) && (val & CR0_PG) &&
(state->_efer & IA32_EFER_LME)) {
if (!(state->_cr4 & CR4_PAE)) {
- hax_inject_exception(vcpu, EXC_GENERAL_PROTECTION, 0);
+ hax_inject_exception(vcpu, VECTOR_GP, 0);
return HAX_RESUME;
}
}
@@ -2673,7 +2665,7 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun)
"_cr0=0x%llx, _efer=0x%x\n", vcpu->vcpu_id,
state->_cr4, val, state->_cr0, state->_efer);
if ((state->_efer & IA32_EFER_LMA) && !(val & CR4_PAE)) {
- hax_inject_exception(vcpu, EXC_GENERAL_PROTECTION, 0);
+ hax_inject_exception(vcpu, VECTOR_GP, 0);
return HAX_RESUME;
}
@@ -2801,7 +2793,7 @@ static int exit_dr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun)
state->_dr6 |= DR6_BD;
vmwrite(vcpu, GUEST_DR7, state->_dr7);
// Priority 4 fault
- hax_inject_exception(vcpu, EXC_DEBUG, NO_ERROR_CODE);
+ hax_inject_exception(vcpu, VECTOR_DB, NO_ERROR_CODE);
return HAX_RESUME;
}
@@ -2824,7 +2816,7 @@ static int exit_dr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun)
}
case 4: {
if (state->_cr4 & CR4_DE) {
- hax_inject_exception(vcpu, EXC_UNDEFINED_OPCODE, NO_ERROR_CODE);
+ hax_inject_exception(vcpu, VECTOR_UD, NO_ERROR_CODE);
return HAX_RESUME;
}
// Fall through
@@ -2835,7 +2827,7 @@ static int exit_dr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun)
}
case 5: {
if (state->_cr4 & CR4_DE) {
- hax_inject_exception(vcpu, EXC_UNDEFINED_OPCODE, NO_ERROR_CODE);
+ hax_inject_exception(vcpu, VECTOR_UD, NO_ERROR_CODE);
return HAX_RESUME;
}
// Fall through
@@ -3000,7 +2992,7 @@ static int exit_msr_read(struct vcpu_t *vcpu, struct hax_tunnel *htun)
state->_rax = val & 0xffffffff;
state->_rdx = (val >> 32) & 0xffffffff;
} else {
- hax_inject_exception(vcpu, EXC_GENERAL_PROTECTION, 0);
+ hax_inject_exception(vcpu, VECTOR_GP, 0);
return HAX_RESUME;
}
@@ -3017,7 +3009,7 @@ static int exit_msr_write(struct vcpu_t *vcpu, struct hax_tunnel *htun)
htun->_exit_reason = vmx(vcpu, exit_reason).basic_reason;
if (handle_msr_write(vcpu, msr, val)) {
- hax_inject_exception(vcpu, EXC_GENERAL_PROTECTION, 0);
+ hax_inject_exception(vcpu, VECTOR_GP, 0);
return HAX_RESUME;
}
@@ -3943,7 +3935,7 @@ int vcpu_event_pending(struct vcpu_t *vcpu)
void vcpu_set_panic(struct vcpu_t *vcpu)
{
- vcpu->paniced = 1;
+ vcpu->panicked = 1;
}
static int vcpu_set_apic_base(struct vcpu_t *vcpu, uint64 val)
diff --git a/core/vmx_ops.asm b/core/vmx_ops.asm
new file mode 100644
index 00000000..8d9cfe4a
--- /dev/null
+++ b/core/vmx_ops.asm
@@ -0,0 +1,375 @@
+;
+; Copyright (c) 2011 Intel Corporation
+; Copyright (c) 2018 Alexandro Sanchez Bach
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; 1. Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+;
+; 2. Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in the
+; documentation and/or other materials provided with the distribution.
+;
+; 3. Neither the name of the copyright holder nor the names of its
+; contributors may be used to endorse or promote products derived from
+; this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+;
+; Detect architecture
+;
+%ifidn __OUTPUT_FORMAT__, elf32
+ %define __BITS__ 32
+ %define __CONV__ x32_cdecl
+%elifidn __OUTPUT_FORMAT__, win32
+ %define __BITS__ 32
+ %define __CONV__ x32_cdecl
+%elifidn __OUTPUT_FORMAT__, macho32
+ %define __BITS__ 32
+ %define __CONV__ x32_cdecl
+%elifidn __OUTPUT_FORMAT__, elf64
+ %define __BITS__ 64
+ %define __CONV__ x64_systemv
+%elifidn __OUTPUT_FORMAT__, win64
+ %define __BITS__ 64
+ %define __CONV__ x64_microsoft
+%elifidn __OUTPUT_FORMAT__, macho64
+ %define __BITS__ 64
+ %define __CONV__ x64_systemv
+%endif
+
+;
+; Describe calling convention
+;
+%ifidn __CONV__, x32_cdecl
+;
+; Although cdecl does not place arguments in registers, we simulate fastcall
+; by reading the first 2 stack arguments into the ecx/edx respectively.
+;
+ %define reg_arg1_16 cx
+ %define reg_arg1_32 ecx
+ %define reg_arg1 reg_arg1_32
+ %define reg_arg2_16 dx
+ %define reg_arg2_32 edx
+ %define reg_arg2 reg_arg2_32
+ %define reg_ret_16 ax
+ %define reg_ret_32 eax
+ %define reg_ret reg_ret_32
+%elifidn __CONV__, x64_systemv
+ %define reg_arg1_16 di
+ %define reg_arg1_32 edi
+ %define reg_arg1_64 rdi
+ %define reg_arg1 reg_arg1_64
+ %define reg_arg2_16 si
+ %define reg_arg2_32 esi
+ %define reg_arg2_64 rsi
+ %define reg_arg2 reg_arg2_64
+ %define reg_ret_16 ax
+ %define reg_ret_32 eax
+ %define reg_ret_64 rax
+ %define reg_ret reg_ret_64
+%elifidn __CONV__, x64_microsoft
+ %define reg_arg1_16 cx
+ %define reg_arg1_32 ecx
+ %define reg_arg1_64 rcx
+ %define reg_arg1 reg_arg1_64
+ %define reg_arg2_16 dx
+ %define reg_arg2_32 edx
+ %define reg_arg2_64 rdx
+ %define reg_arg2 reg_arg2_64
+ %define reg_ret_16 ax
+ %define reg_ret_32 eax
+ %define reg_ret_64 rax
+ %define reg_ret reg_ret_64
+%endif
+
+;
+; Helpers
+;
+
+; Macro: function
+; Declares a function. Arguments:
+; - %1 Name of the function
+; - %2 Number of arguments
+;
+%macro function 2
+ global %1
+ %1:
+%ifidn __CONV__, x32_cdecl
+ %if %2 >= 3
+ %error "Unsupported number of arguments"
+ %else
+ %if %2 >= 1
+ mov reg_arg1, [esp + 0x4]
+ %endif
+ %if %2 >= 2
+ mov reg_arg2, [esp + 0x8]
+ %endif
+ %endif
+%endif
+%endmacro
+
+section .text
+
+struc qword_struct
+ .lo resd 1
+ .hi resd 1
+endstruc
+
+struc vcpu_state
+ ._rax resq 1
+ ._rcx resq 1
+ ._rdx resq 1
+ ._rbx resq 1
+ ._rsp resq 1
+ ._rbp resq 1
+ ._rsi resq 1
+ ._rdi resq 1
+ ._r8 resq 1
+ ._r9 resq 1
+ ._r10 resq 1
+ ._r11 resq 1
+ ._r12 resq 1
+ ._r13 resq 1
+ ._r14 resq 1
+ ._r15 resq 1
+endstruc
+
+%macro vmx_check 0
+ pushfw
+ pop ax
+ test ax, 0x40
+ jnz %%fail_valid
+ test ax, 0x01
+ jnz %%fail_invalid
+ mov reg_ret, 0
+ jmp %%continue
+%%fail_invalid:
+ mov reg_ret, 1
+ jmp %%continue
+%%fail_valid:
+ mov reg_ret, 2
+%%continue:
+%endmacro
+
+function asm_invept, 2
+ invept reg_arg1, [reg_arg2]
+ vmx_check
+ ret
+
+function asm_vmxon, 1
+ vmxon [reg_arg1]
+ vmx_check
+ ret
+
+function asm_vmxoff, 0
+ vmxoff
+ vmx_check
+ ret
+
+function asm_vmclear, 1
+ vmclear [reg_arg1]
+ vmx_check
+ ret
+
+function asm_vmptrld, 1
+ vmptrld [reg_arg1]
+ vmx_check
+ ret
+
+function asm_vmptrst, 1
+ vmptrst [reg_arg1]
+ vmx_check
+ ret
+
+function asm_vmread, 1
+ xor reg_ret, reg_ret
+ vmread reg_ret, reg_arg1
+ ret
+
+function asm_vmwrite, 2
+ vmwrite reg_arg1, reg_arg2
+ ret
+
+function asm_vmxrun, 2
+%ifidn __BITS__, 64
+ pushfq
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+ push r13
+ push r14
+ push r15
+ push rcx
+ push rdx
+ push rsi
+ push rdi
+ push rbp
+ push rax
+ push rbx
+ ; write host rsp
+ mov ebx, 6C14h
+ mov rax, rsp
+ sub rax, 8h
+ vmwrite rbx, rax
+ pop rbx
+ pop rax
+ push rax
+ push rbx
+ ; push the state
+ push reg_arg1
+ cmp reg_arg2_16, 1h
+ mov rax, reg_arg1
+ mov rcx, [rax + vcpu_state._rcx]
+ mov rdx, [rax + vcpu_state._rdx]
+ mov rbx, [rax + vcpu_state._rbx]
+ mov rbp, [rax + vcpu_state._rbp]
+ mov rsi, [rax + vcpu_state._rsi]
+ mov rdi, [rax + vcpu_state._rdi]
+ mov r8, [rax + vcpu_state._r8]
+ mov r9, [rax + vcpu_state._r9]
+ mov r10, [rax + vcpu_state._r10]
+ mov r11, [rax + vcpu_state._r11]
+ mov r12, [rax + vcpu_state._r12]
+ mov r13, [rax + vcpu_state._r13]
+ mov r14, [rax + vcpu_state._r14]
+ mov r15, [rax + vcpu_state._r15]
+ mov rax, [rax + vcpu_state._rax]
+ je .resume
+ vmlaunch
+ jmp .exit_entry_fail
+.resume:
+ vmresume
+ jmp .exit_entry_fail
+.exit_entry:
+ push rdi
+ mov rdi, [rsp+8]
+ mov [rdi + vcpu_state._rax], rax
+ mov [rdi + vcpu_state._rcx], rcx
+ mov [rdi + vcpu_state._rdx], rdx
+ pop rcx
+ mov [rdi + vcpu_state._rbx], rbx
+ mov [rdi + vcpu_state._rbp], rbp
+ mov [rdi + vcpu_state._rsi], rsi
+ mov [rdi + vcpu_state._rdi], rcx
+ mov [rdi + vcpu_state._r8], r8
+ mov [rdi + vcpu_state._r9], r9
+ mov [rdi + vcpu_state._r10], r10
+ mov [rdi + vcpu_state._r11], r11
+ mov [rdi + vcpu_state._r12], r12
+ mov [rdi + vcpu_state._r13], r13
+ mov [rdi + vcpu_state._r14], r14
+ mov [rdi + vcpu_state._r15], r15
+.exit_entry_fail:
+ ; pop the state
+ pop rbx
+ pop rbx
+ pop rax
+ pop rbp
+ pop rdi
+ pop rsi
+ pop rdx
+ pop rcx
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop r11
+ pop r10
+ pop r9
+ pop r8
+ vmx_check
+ popfq
+ ret
+%elifidn __BITS__, 32
+ pushfd
+ push ecx
+ push edx
+ push esi
+ push edi
+ push ebp
+ push eax
+ push ebx
+ ; write host rsp
+ mov ebx, 6C14h
+ mov eax, esp
+ sub eax, 4h
+ vmwrite ebx, eax
+ pop ebx
+ pop eax
+ push eax
+ push ebx
+ ; push the state
+ mov eax, reg_arg1
+ push eax
+ cmp reg_arg2_16, 1h
+ mov ecx, [eax + vcpu_state._rcx]
+ mov edx, [eax + vcpu_state._rdx]
+ mov ebx, [eax + vcpu_state._rbx]
+ mov ebp, [eax + vcpu_state._rbp]
+ mov esi, [eax + vcpu_state._rsi]
+ mov edi, [eax + vcpu_state._rdi]
+ mov eax, [eax + vcpu_state._rax]
+ je .resume
+ vmlaunch
+ jmp .exit_entry_fail
+.resume:
+ vmresume
+ jmp .exit_entry_fail
+.exit_entry:
+ push edi
+ mov edi, [esp+4]
+ mov [edi + vcpu_state._rax], eax
+ mov [edi + vcpu_state._rcx], ecx
+ mov [edi + vcpu_state._rdx], edx
+ pop ecx
+ mov [edi + vcpu_state._rbx], ebx
+ mov [edi + vcpu_state._rbp], ebp
+ mov [edi + vcpu_state._rsi], esi
+ mov [edi + vcpu_state._rdi], ecx
+.exit_entry_fail:
+ ; pop the state
+ pop eax
+ pop ebx
+ pop eax
+ pop ebp
+ pop edi
+ pop esi
+ pop edx
+ pop ecx
+ vmx_check
+ popfd
+ ret
+%else
+ %error "Unimplemented function"
+%endif
+
+function vmx_get_rip, 0
+%ifidn __BITS__, 64
+ ; Notes:
+ ; 1. For some reason, on macOS (macho64), MOV will cause the wrong address
+ ; (VM exit host RIP) to be loaded into reg_ret.
+ ; 2. macho64 requires the REL specifier to be used in conjunction with LEA
+ ; to derive the host RIP using RIP-relative addressing.
+ lea reg_ret, [rel asm_vmxrun.exit_entry]
+%elifidn __BITS__, 32
+ mov reg_ret, asm_vmxrun.exit_entry
+%else
+ %error "Unimplemented function"
+%endif
+ ret
diff --git a/darwin/hax_driver/com_intel_hax/asm/ia32.c b/darwin/hax_driver/com_intel_hax/asm/ia32.c
deleted file mode 100644
index 1539f86d..00000000
--- a/darwin/hax_driver/com_intel_hax/asm/ia32.c
+++ /dev/null
@@ -1,442 +0,0 @@
-/*
- * Copyright (c) 2009 Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "../../../../core/include/types.h"
-#include "../../../../core/include/segments.h"
-#include "../../../../core/include/ia32.h"
-#include "../../../../core/include/vcpu.h"
-#include "../../../../core/include/cpuid.h"
-#include "../../../../include/hax.h"
-
-mword get_cr0(void)
-{
- mword val;
- asm volatile (
- "mov %%cr0, %0"
- : "=r" (val)
- );
- return val;
-}
-
-mword get_cr2(void)
-{
- mword val;
- asm volatile (
- "mov %%cr2, %0"
- : "=r" (val)
- );
- return val;
-}
-
-mword get_cr4(void)
-{
- mword val;
- asm volatile (
- "mov %%cr4, %0"
- : "=r" (val)
- );
- return val;
-}
-
-mword get_dr0(void)
-{
- mword val;
- asm volatile (
- "mov %%dr0, %0"
- : "=r" (val)
- );
- return val;
-}
-
-mword get_dr1(void)
-{
- mword val;
- asm volatile (
- "mov %%dr1, %0"
- : "=r" (val)
- );
- return val;
-}
-
-mword get_dr2(void)
-{
- mword val;
- asm volatile (
- "mov %%dr2, %0"
- : "=r" (val)
- );
- return val;
-}
-
-mword get_dr3(void)
-{
- mword val;
- asm volatile (
- "mov %%dr3, %0"
- : "=r" (val)
- );
- return val;
-}
-
-mword get_dr6(void)
-{
- mword val;
- asm volatile (
- "mov %%dr6, %0"
- : "=r" (val)
- );
- return val;
-}
-
-mword get_dr7(void)
-{
- mword val;
- asm volatile (
- "mov %%dr7, %0"
- : "=r" (val)
- );
- return val;
-}
-
-void set_cr0(mword val)
-{
- asm volatile (
- "mov %0, %%cr0"
- :
- : "r" (val)
- );
-}
-
-void set_cr2(mword val)
-{
- asm volatile (
- "mov %0, %%cr2"
- :
- : "r" (val)
- );
-}
-
-void set_cr3(mword val)
-{
- asm volatile (
- "mov %0, %%cr3"
- :
- : "r" (val)
- );
-}
-
-void set_cr4(mword val)
-{
- asm volatile (
- "mov %0, %%cr4"
- :
- : "r" (val)
- );
-}
-
-void set_dr0(mword val)
-{
- asm volatile (
- "mov %0, %%dr0"
- :
- : "r" (val)
- );
-}
-
-void set_dr1(mword val)
-{
- asm volatile (
- "mov %0, %%dr1"
- :
- : "r" (val)
- );
-}
-
-void set_dr2(mword val)
-{
- asm volatile (
- "mov %0, %%dr2"
- :
- : "r" (val)
- );
-}
-
-void set_dr3(mword val)
-{
- asm volatile (
- "mov %0, %%dr3"
- :
- : "r" (val)
- );
-}
-
-void set_dr6(mword val)
-{
- asm volatile (
- "mov %0, %%dr6"
- :
- : "r" (val)
- );
-}
-
-void set_dr7(mword val)
-{
- asm volatile (
- "mov %0, %%dr7"
- :
- : "r" (val)
- );
-}
-
-uint16 get_kernel_cs(void)
-{
- mword cs;
- asm volatile (
- "mov %%cs, %0"
- : "=r" (cs)
- );
- return cs;
-}
-
-uint16 get_kernel_ds(void)
-{
- mword ds;
- asm volatile (
- "mov %%ds, %0"
- : "=r" (ds)
- );
- return ds;
-}
-
-uint16 get_kernel_es(void)
-{
- mword es;
- asm volatile (
- "mov %%es, %0"
- : "=r" (es)
- );
- return es;
-}
-
-uint16 get_kernel_ss(void)
-{
- mword ss;
- asm volatile (
- "mov %%ss, %0"
- : "=r" (ss)
- );
- return ss;
-}
-
-uint16 get_kernel_gs(void)
-{
- mword gs;
- asm volatile (
- "mov %%gs, %0"
- : "=r" (gs)
- );
- return gs;
-}
-
-void set_kernel_gs(uint16 gs)
-{
- asm volatile (
- "mov %0, %%gs"
- :
- : "r" (gs)
- );
-}
-
-void set_kernel_ds(uint16 ds)
-{
- asm volatile (
- "mov %0, %%ds"
- :
- : "r" (ds)
- );
-}
-
-void set_kernel_es(uint16 es)
-{
- asm volatile (
- "mov %0, %%es"
- :
- : "r" (es)
- );
-}
-
-void set_kernel_fs(uint16 fs)
-{
- asm volatile (
- "mov %0, %%fs"
- :
- : "r" (fs)
- );
-}
-
-uint16 get_kernel_fs(void)
-{
- mword fs;
- asm volatile (
- "mov %%fs, %0"
- : "=r" (fs)
- );
- return fs;
-}
-
-void ia32_wrmsr(uint32 reg, uint64 val)
-{
- asm volatile (
- "wrmsr"
- :
- : "c" (reg),
- "d" ((uint32)(val >> 32)),
- "a" ((uint32)val)
- );
-}
-
-uint64 ia32_rdmsr(uint32 reg)
-{
- uint32 a, d;
- asm volatile (
- "rdmsr"
- : "=a" (a),
- "=d" (d)
- : "c" (reg)
- );
- return ((uint64)d << 32) | (uint64)a;
-}
-
-uint64 rdtsc(void)
-{
- mword a, d;
- asm volatile (
- "rdtsc"
- : "=a" (a),
- "=d" (d)
- );
- return ((uint64)d << 32) | (uint64)a;
-}
-
-void fxsave(unsigned long *addr)
-{
- asm volatile (
- "fxsave %0"
- :
- : "m" (*addr)
- );
-}
-
-void fxrstor(unsigned long *addr)
-{
- asm volatile (
- "fxrstor %0"
- :
- : "m" (*addr)
- );
-}
-
-void btr(uint8 *addr, uint bit)
-{
- // bitrl may be able to handle large bit offsets. Nevertheless, use a small
- // offset (i.e. less than 8) as the Windows wrappers do, just to be on the
- // safe side.
- uint8 *base = addr + bit / 8;
- uint offset = bit % 8;
-
- // C.f. the first code sample in section 6.45.2.3 (Output Operands) of
- // https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html
- asm volatile (
- "lock; btrl %1, %0"
- : "+m" (*base)
- : "Ir" (offset)
- : "cc"
- );
-}
-
-void bts(uint8 *addr, uint bit)
-{
- uint8 *base = addr + bit / 8;
- uint offset = bit % 8;
-
- asm volatile (
- "lock; btsl %1, %0"
- : "+m" (*base)
- : "Ir" (offset)
- : "cc"
- );
-}
-
-void __handle_cpuid(union cpuid_args_t *args)
-{
- uint32 a = args->eax, c = args->ecx;
-
- asm ("cpuid"
- : "=a" (args->eax),
- "=c" (args->ecx),
- "=b" (args->ebx),
- "=d" (args->edx)
- : "0" (a),
- "1" (c)
- );
-}
-
-uint64 get_kernel_rflags(void)
-{
- mword flags;
-#ifdef __x86_64__
- asm volatile (
- "pushfq \n\t"
- "popq %0 \n\t"
- : "=r" (flags)
- );
-#else
- asm volatile (
- "pushfd \n\t"
- "pop %0 \n\t"
- : "=r" (flags)
- );
-#endif
- return flags;
-}
-
-void __nmi(void)
-{
- asm ("int $2");
-}
-
-uint32 __fls(uint32 bit32)
-{
- asm ("bsr %1, %0"
- : "=r" (bit32)
- : "rm" (bit32)
- );
- return bit32;
-}
diff --git a/darwin/hax_driver/com_intel_hax/asm/segments.c b/darwin/hax_driver/com_intel_hax/asm/segments.c
deleted file mode 100644
index fb3b77da..00000000
--- a/darwin/hax_driver/com_intel_hax/asm/segments.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (c) 2009 Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "../../../../core/include/types.h"
-#include "../../../../core/include/compiler.h"
-#include "../../../../core/include/ia32.h"
-#include "../../../../include/hax.h"
-
-void set_kernel_gdt(system_desc_t *sys_desc)
-{
-#ifdef __x86_64__
- asm ("lgdt %0"
- : "=m" (*sys_desc)
- );
-#else
- if (is_compatible()) {
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P1 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "lgdt %0 \n\t"
- "ljmp *(%%rip) \n\t"
- "4: \n\t"
- ".long 5f \n\t"
- ".word %P2 \n\t"
- ".code32 \n\t"
- "5:"
- : "=m" (*sys_desc)
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS)
- : "memory",
- "cc"
- );
- } else {
- asm ("lgdt %0"
- : "=m" (*sys_desc)
- );
- }
-#endif
-}
-
-void set_kernel_idt(system_desc_t *sys_desc)
-{
-#ifdef __x86_64__
- asm ("lidt %0"
- : "=m" (*sys_desc)
- );
-#else
- if (is_compatible()) {
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P1 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "lidt %0 \n\t"
- "ljmp *(%%rip) \n\t"
- "4: \n\t"
- ".long 5f \n\t"
- ".word %P2 \n\t"
- ".code32 \n\t"
- "5:"
- : "=m" (*sys_desc)
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS)
- : "memory",
- "cc"
- );
- } else {
- asm ("lidt %0"
- : "=m" (*sys_desc)
- );
- }
-#endif
-}
-
-void get_kernel_gdt(system_desc_t *sys_desc)
-{
-#ifdef __x86_64__
- asm ("sgdt %0"
- : "=m" (*sys_desc)
- );
-#else
- if (is_compatible()) {
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P1 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "sgdt %0 \n\t"
- "ljmp *(%%rip) \n\t"
- "4: \n\t"
- ".long 5f \n\t"
- ".word %P2 \n\t"
- ".code32 \n\t"
- "5:"
- : "=m" (*sys_desc)
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS)
- : "memory",
- "cc"
- );
- } else {
- asm ("sgdt %0"
- : "=m" (*sys_desc)
- );
- }
-#endif
-}
-
-void get_kernel_idt(system_desc_t *sys_desc)
-{
-#ifdef __x86_64__
- asm ("sidt %0"
- : "=m" (*sys_desc)
- );
-#else
- if (is_compatible()) {
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P1 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "sidt %0 \n\t"
- "ljmp *(%%rip) \n\t"
- "4: \n\t"
- ".long 5f \n\t"
- ".word %P2 \n\t"
- ".code32 \n\t"
- "5:"
- : "=m" (*sys_desc)
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS)
- : "memory",
- "cc"
- );
- } else {
- asm ("sidt %0"
- : "=m" (*sys_desc)
- );
- }
-#endif
-}
-
-void load_kernel_ldt(uint16 sel)
-{
-#ifdef __x86_64__
- asm ("lldt %0"
- :
- : "m" (sel)
- );
-#else
- if (is_compatible()) {
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P0 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "lldt %2 \n\t"
- "ljmp *(%%rip) \n\t"
- "4: \n\t"
- ".long 5f \n\t"
- ".word %P1 \n\t"
- ".code32 \n\t"
- "5:"
- :
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS),
- "m" (sel)
- : "memory",
- "cc"
- );
- } else {
- asm ("lldt %0"
- :
- : "m" (sel)
- );
- }
-#endif
-}
-
-uint16 get_kernel_tr_selector(void)
-{
- uint16 selector, *sel;
- sel = &selector;
-
- asm ("str %0"
- : "=m" (*sel)
- );
- return selector;
-}
-
-uint16 get_kernel_ldt(void)
-{
- uint16 selector, *sel;
- sel = &selector;
-
- asm ("sldt %0"
- : "=m" (*sel)
- );
- return selector;
-}
diff --git a/darwin/hax_driver/com_intel_hax/asm/vmcs.c b/darwin/hax_driver/com_intel_hax/asm/vmcs.c
deleted file mode 100644
index 58be5caf..00000000
--- a/darwin/hax_driver/com_intel_hax/asm/vmcs.c
+++ /dev/null
@@ -1,942 +0,0 @@
-/*
- * Copyright (c) 2009 Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "../../../../core/include/vmx.h"
-#include "../../../../core/include/types.h"
-#include "../../../../core/include/vcpu.h"
-
-/* Don't call these two functions as NOT INLINE manner */
-#ifdef __i386__
-static inline void switch_to_64bit_mode(void)
-{
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P0 \n\t"
- ".code64 \n\t"
- "1:"
- :
- : "i" (HAX_KERNEL64_CS)
- );
-}
-
-static inline void switch_to_compat_mode(void)
-{
- __asm__ __volatile__ (
- ".code64 \n\t"
- "ljmp *(%%rip) \n\t"
- ".long 2f \n\t"
- ".word %P0 \n\t"
- ".code32 \n\t"
- "2:"
- :
- : "i" (HAX_KERNEL32_CS)
- );
-}
-#endif
-
-static uint64 _get_cr3(void)
-{
- uint64 val = 0;
- asm volatile (
- "mov %%cr3, %0"
- : "=r" (val)
- );
- return val;
-}
-
-uint64 get_cr3(void)
-{
-#ifdef __x86_64__
- return _get_cr3();
-#else
- uint64 val = 0;
- uint64 up = 0;
- uint32 low = 0;
-
- if (is_compatible()) {
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P2 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "mov %%cr3, %%rax \n\t"
- "mov %%rax, %%rdx \n\t"
- "shr $32, %%rdx \n\t"
- "mov %%edx, %0 \n\t"
- "mov %%eax, %1 \n\t"
- "ljmp *(%%rip) \n\t"
- "2: \n\t"
- ".long 3f \n\t"
- ".word %P3 \n\t"
- ".code32 \n\t"
- "3:"
- : "=m" (up),
- "=m" (low)
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS)
- : "memory",
- "cc",
- "rdx"
- );
- val = (uint64)up << 32 | low;
- return val;
- } else
- return _get_cr3();
-#endif
-}
-
-#ifdef __x86_64__
-static inline vmx_error_t vmx_vmxon_64(paddr_t addr)
-{
- vmx_error_t eflags = 0;
- asm volatile (
- "vmxon %1 \n\t"
- "pushf \n\t"
- "pop %0"
- : "=r" (eflags)
- : "m" (addr)
- : "memory"
- );
-
- return eflags & VMX_FAIL_MASK;
-}
-#else
-static inline vmx_error_t vmx_vmxon_32(paddr_t addr)
-{
- vmx_error_t eflags = 0;
-
- asm volatile (
- "vmxon %4 \n\t"
- "cmovcl %2, %0 \n\t"
- "cmovzl %3, %0"
- : "=&r" (eflags)
- : "0" (VMX_SUCCEED),
- "r" (VMX_FAIL_INVALID),
- "r" (VMX_FAIL_VALID),
- "m" (addr)
- : "memory",
- "cc"
- );
- return eflags;
-}
-#endif
-
-vmx_error_t __vmxon(uint64 addr)
-{
-#ifdef __x86_64__
- return (vmx_vmxon_64(addr));
-#else
- if (is_compatible()) {
- vmx_error_t result = 0;
- switch_to_64bit_mode();
- result = vmx_vmxon_32(addr);
- switch_to_compat_mode();
- return result;
- } else {
- return (vmx_vmxon_32(addr));
- }
-#endif
-}
-
-#ifdef __x86_64__
-static inline vmx_error_t vmx_vmxoff_64(void)
-{
- vmx_error_t eflags = 0;
- asm volatile (
- "vmxoff \n\t"
- "pushf \n\t"
- "pop %0"
- : "=r" (eflags)
- :
- : "memory",
- "cc"
- );
-
- return eflags & VMX_FAIL_MASK;
-}
-#else
-static inline vmx_error_t vmx_vmxoff_32(void)
-{
- vmx_error_t eflags = 0;
-
- asm volatile (
- "vmxoff \n\t"
- "cmovcl %2, %0 \n\t"
- "cmovzl %3, %0"
- : "=&r" (eflags)
- : "0" (VMX_SUCCEED),
- "r" (VMX_FAIL_INVALID),
- "r" (VMX_FAIL_VALID)
- : "memory",
- "cc"
- );
- return eflags;
-}
-#endif
-
-vmx_error_t __vmxoff(void)
-{
-#ifdef __x86_64__
- return (vmx_vmxoff_64());
-#else
- if (is_compatible()) {
- vmx_error_t result = 0;
- switch_to_64bit_mode();
- result = vmx_vmxoff_32();
- switch_to_compat_mode();
- return result;
- } else {
- return (vmx_vmxoff_32());
- }
-#endif
-}
-
-#ifdef __x86_64__
-static inline vmx_error_t vmx_vmclear_64(paddr_t address)
-{
- vmx_error_t eflags = 0;
- asm volatile (
- "vmclear %1 \n\t"
- "pushf \n\t"
- "pop %0"
- : "=r" (eflags)
- : "m" (address)
- : "memory"
- );
- return eflags & VMX_FAIL_MASK;
-}
-#else
-static inline vmx_error_t vmx_vmclear_32(paddr_t addr)
-{
- vmx_error_t eflags = 0;
-
- asm volatile (
- "vmclear %4 \n\t"
- "cmovcl %2, %0 \n\t"
- "cmovzl %3, %0"
- : "=&r" (eflags)
- : "0" (VMX_SUCCEED),
- "r" (VMX_FAIL_INVALID),
- "r" (VMX_FAIL_VALID),
- "m" (addr)
- : "memory",
- "cc"
- );
- return eflags;
-}
-#endif
-
-vmx_error_t __vmclear(uint64 addr)
-{
-#ifdef __x86_64__
- return (vmx_vmclear_64(addr));
-#else
- if (is_compatible()) {
- vmx_error_t result = 0;
- /* Don't put anything between these lines! */
- switch_to_64bit_mode();
- result = vmx_vmclear_32(addr);
- switch_to_compat_mode();
- return result;
- } else {
- return (vmx_vmclear_32(addr));
- }
-#endif
-}
-
-#ifdef __x86_64__
-static inline vmx_error_t vmx_vmptrld_64(paddr_t addr)
-{
- vmx_error_t eflags = 0;
- asm volatile (
- "vmptrld %1 \n\t"
- "pushf \n\t"
- "pop %0"
- : "=r" (eflags)
- : "m" (addr)
- : "memory"
- );
- return eflags & VMX_FAIL_MASK;
-}
-#else
-static inline vmx_error_t vmx_vmptrld_32(paddr_t addr)
-{
- vmx_error_t eflags = 0;
-
- asm volatile (
- "vmptrld %4 \n\t"
- "cmovcl %2, %0 \n\t"
- "cmovzl %3, %0"
- : "=&r" (eflags)
- : "0" (VMX_SUCCEED),
- "r" (VMX_FAIL_INVALID),
- "r" (VMX_FAIL_VALID),
- "m" (addr)
- : "memory",
- "cc"
- );
- return eflags;
-}
-#endif
-
-vmx_error_t __vmptrld(paddr_t addr)
-{
-#ifdef __x86_64__
- return (vmx_vmptrld_64(addr));
-#else
- if (is_compatible()) {
- vmx_error_t result = 0;
- /* Don't put anything between these lines! */
- switch_to_64bit_mode();
- result = vmx_vmptrld_32(addr);
- switch_to_compat_mode();
- return result;
- } else {
- return (vmx_vmptrld_32(addr));
- }
-#endif
-}
-
-#ifdef __x86_64__
-static inline paddr_t vmx_vmptrst_64(void)
-{
- paddr_t address;
-
- asm volatile (
- "vmptrst %0"
- : "=m" (address)
- :
- : "memory"
- );
- return address;
-}
-#else
-static inline paddr_t vmx_vmptrst_32(void)
-{
- paddr_t address;
-
- asm volatile (
- "vmptrst %0"
- : "=m" (address)
- :
- : "memory",
- "cc"
- );
- return address;
-}
-#endif
-
-paddr_t __vmptrst(void)
-{
-#ifdef __x86_64__
- return (vmx_vmptrst_64());
-#else
- if (is_compatible()) {
- /* Don't put anything between these lines! */
- paddr_t address;
- switch_to_64bit_mode();
- address = vmx_vmptrst_32();
- switch_to_compat_mode();
- return address;
- } else {
- return (vmx_vmptrst_32());
- }
-#endif
-}
-
-static inline uint64 ___vmx_vmread(component_index_t component)
-{
- mword result;
- asm volatile (
- "vmread %1, %0"
- : "=rm" (result)
- : "r" ((mword)(component))
- );
- return result;
-}
-
-#ifdef __i386__
-static inline uint64 ___vmx_vmread_64_compatible(component_index_t component)
-{
- uint64 result = 0;
-
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P1 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "pushq %%rbx \n\t"
- "movq $0, %%rbx \n\t"
- "mov %3, %%ebx \n\t"
- "vmread %%rbx, %0 \n\t"
- "popq %%rbx \n\t"
- "ljmp *(%%rip) \n\t"
- "2: \n\t"
- ".long 3f \n\t"
- ".word %P2 \n\t"
- ".code32 \n\t"
- "3:"
- : "=m" (result)
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS),
- "m" ((mword)(component))
- : "memory",
- "cc",
- "rbx"
- );
- return result;
-}
-#endif
-
-uint64 vmx_vmread(struct vcpu_t *vcpu, component_index_t component)
-{
- uint64 val = 0;
-
-#ifdef __x86_64__
- val = ___vmx_vmread(component);
-#else
- if (is_compatible()) {
- uint64 value = ___vmx_vmread_64_compatible(component);
- val = value & 0xffffffffULL;
- } else {
- val = ___vmx_vmread(component);
- }
-#endif
- return val;
-}
-
-uint64 vmx_vmread_natural(struct vcpu_t *vcpu, component_index_t component)
-{
- uint64 val;
-#ifdef __x86_64__
- val = ___vmx_vmread(component);
-#else
- if (is_compatible()) {
- val = ___vmx_vmread_64_compatible(component);
- } else {
- val = ___vmx_vmread(component);
- }
-#endif
- return val;
-}
-
-uint64 vmx_vmread_64(struct vcpu_t *vcpu, component_index_t component)
-{
- uint64 val;
-#ifdef __x86_64__
- val = ___vmx_vmread(component);
-#else
- if (is_compatible()) {
- val = ___vmx_vmread_64_compatible(component);
- } else {
- val = ___vmx_vmread(component + 1);
- val <<= 32;
- val |= ___vmx_vmread(component);
- }
-#endif
- return val;
-}
-
-static inline void ___vmx_vmwrite(const char *name, component_index_t component,
- mword val)
-{
- asm volatile (
- "vmwrite %0, %1"
- :
- : "rm" (val),
- "r" ((mword)(component))
- );
-}
-
-#ifdef __i386__
-static inline uint64 ___vmx_vmwrite_64_compatible(
- const char *name, component_index_t component, uint64 val)
-{
- uint64 eflags = 0;
-
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P1 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "pushq %%rbx \n\t"
- "xorq %%rbx, %%rbx \n\t"
- "mov %4, %%ebx \n\t"
- "vmwrite %3, %%rbx \n\t"
- "pushf \n\t"
- "pop %0 \n\t"
- "popq %%rbx \n\t"
- "ljmp *(%%rip) \n\t"
- "2: \n\t"
- ".long 3f \n\t"
- ".word %P2 \n\t"
- ".code32 \n\t"
- "3:"
- : "=m" (eflags)
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS),
- "m" (val),
- "m" ((mword)(component))
- : "memory",
- "cc",
- "rbx"
- );
-
- return (eflags & VMX_FAIL_MASK);
-}
-#endif
-
-void _vmx_vmwrite_natural(struct vcpu_t *vcpu, const char *name,
- component_index_t component, uint64 source_val)
-{
-#ifdef __x86_64__
- ___vmx_vmwrite(name, component, source_val);
-#endif
-
-#ifdef __i386__
- if (is_compatible()) {
- uint64 result = 0;
- result = ___vmx_vmwrite_64_compatible(name, component, source_val);
- if (result) {
- printf("vmwrite_natural: com %x, val %llx, result %llx\n",
- component, source_val, result);
- panic();
- }
- } else {
- ___vmx_vmwrite(name, component, source_val);
- }
-#endif
-}
-
-void _vmx_vmwrite_64(struct vcpu_t *vcpu, const char *name,
- component_index_t component, uint64 source_val)
-{
-#ifdef __x86_64__
- ___vmx_vmwrite(name, component, source_val);
-#endif
-
-#ifdef __i386__
- if (is_compatible()) {
- uint64 result = 0;
- result = ___vmx_vmwrite_64_compatible(name, component, source_val);
- if (result) {
- printf("vmwrite_64: com %x, val %llx, result %llx\n", component,
- source_val, result);
- panic();
- }
- } else {
- ___vmx_vmwrite(name, component, source_val);
- ___vmx_vmwrite(name, component + 1, source_val >> 32);
- }
-#endif
-}
-
-void _vmx_vmwrite(struct vcpu_t *vcpu, const char *name,
- component_index_t component, mword source_val)
-{
-#ifdef __x86_64__
- ___vmx_vmwrite(name, component, source_val);
-#else
- if (is_compatible()) {
- uint64 result = 0, val = 0;
- val |= source_val;
- result = ___vmx_vmwrite_64_compatible(name, component, val);
- if (result) {
- printf("vmwrite com %x, val %lx\n", component, source_val);
- panic();
- }
- } else {
- ___vmx_vmwrite(name, component, source_val);
- }
-#endif
-}
-
-static inline void vmcall(void)
-{
- asm volatile (
- "vmcall"
- :
- :
- : "memory"
- );
-}
-
-void __vmcall(void)
-{
-#ifdef __x86_64__
- vmcall();
-#else
- if (is_compatible()) {
- /* Don't put anything between these lines! */
- switch_to_64bit_mode();
- vmcall();
- switch_to_compat_mode();
- } else {
- return vmcall();
- }
-#endif
-}
-
-static inline vmx_error_t ___invept(uint type, struct invept_desc *desc)
-{
- vmx_error_t eflags = 0;
-#if 1
- // Hard-code the instruction because INVEPT is not recognized by Xcode.
- // 0x08 is the ModR/M byte, specifying *CX as the register operand and *AX
- // as the memory operand (see IASDM Vol. 2A 2.1.5, Table 2-2)
-#define IA32_INVEPT_OPCODE ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
- asm volatile (
- IA32_INVEPT_OPCODE "\n\t"
- "pushf \n\t"
- "pop %0"
- : "=d" (eflags)
- : "c" (type),
- "a" (desc)
- : "memory"
- );
-#else
- asm volatile (
- "invept %1, %2 \n\t"
- "pushf \n\t"
- "pop %0"
- : "=r" (eflags)
- : "r" (type),
- "m" (desc)
- : "memory"
- );
-#endif
- return eflags & VMX_FAIL_MASK;
-}
-
-#ifdef __i386__ // Obsolete, because 32-bit Mac is no longer supported
-static inline void ___invept_compatible(uint type, struct invept_desc *desc)
-{
-#if 1
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P0 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "mov %2, %%rcx \n\t"
- "mov %3, %%rax \n\t"
- ".byte 0x66, 0x0f, 0x38, 0x80, 0x08 \n\t"
- "ljmp *(%%rip) \n\t"
- "2: \n\t"
- ".long 3f \n\t"
- ".word %P1 \n\t"
- ".code32 \n\t"
- "3:"
- :
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS),
- "m" (type),
- "m" (desc)
- );
-#else
- __asm__ __volatile__ (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word %P0 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "mov %P2, %%rax \n\t"
- "invept %P2, %P3 \n\t"
- "ljmp *(%%rip) \n\t"
- "2: \n\t"
- ".long 3f \n\t"
- ".word %P1 \n\t"
- ".code32 \n\t"
- "3:"
- :
- : "i" (HAX_KERNEL64_CS),
- "i" (HAX_KERNEL32_CS),
- "m" (type),
- "m" (desc)
- : "memory"
- );
-#endif
-}
-#endif
-
-vmx_error_t __invept(uint type, struct invept_desc *desc)
-{
-#ifdef __x86_64__
- return ___invept(type, desc);
-#else // obsolete, because 32-bit Mac is no longer supported
- if (is_compatible()) {
- ___invept_compatible(type, desc);
- // Just return a fake value (this code path is never taken anyway)
- return (vmx_error_t) -1;
- } else {
- return ___invept(type, desc);
- }
-#endif
-}
-
-mword get_rip(void)
-{
- mword host_rip;
-#ifdef __x86_64__
- asm volatile (
- "leaq EXIT_ENTRY(%%rip), %0"
- : "=r" (host_rip)
- );
-#else
- asm volatile (
- "movl $EXIT_ENTRY, %0"
- : "=r" (host_rip)
- );
-#endif
- return host_rip;
-}
-
-uint64 __vmx_run(struct vcpu_state_t *state, uint16 launched)
-{
- uint64 rflags = 0;
-
-#ifdef __x86_64__
- asm volatile (
- "pushfq \n\t"
- "pushq %%r8 \n\t"
- "pushq %%r9 \n\t"
- "pushq %%r10 \n\t"
- "pushq %%r11 \n\t"
- "pushq %%r12 \n\t"
- "pushq %%r13 \n\t"
- "pushq %%r14 \n\t"
- "pushq %%r15 \n\t"
- "pushq %%rcx \n\t"
- "pushq %%rdx \n\t"
- "pushq %%rsi \n\t"
- "pushq %%rdi \n\t"
- "pushq %%rbp \n\t"
- "pushq %%rax \n\t"
- "pushq %%rbx \n\t"
- "movq $0x6c14, %%rbx \n\t"
- "movq %%rsp, %%rax \n\t"
- "subq $8, %%rax \n\t"
- "vmwrite %%rax, %%rbx \n\t"
- "popq %%rbx \n\t"
- "popq %%rax \n\t"
- "pushq %%rax \n\t"
- "pushq %%rbx \n\t"
- "pushq %3 \n\t"
- "cmpl $1, %2 \n\t"
- "movq 0x8(%%rax), %%rcx \n\t"
- "movq 0x10(%%rax), %%rdx \n\t"
- "movq 0x18(%%rax), %%rbx \n\t"
- "movq 0x28(%%rax), %%rbp \n\t"
- "movq 0x30(%%rax), %%rsi \n\t"
- "movq 0x38(%%rax), %%rdi \n\t"
- "movq 0x40(%%rax), %%r8 \n\t"
- "movq 0x48(%%rax), %%r9 \n\t"
- "movq 0x50(%%rax), %%r10 \n\t"
- "movq 0x58(%%rax), %%r11 \n\t"
- "movq 0x60(%%rax), %%r12 \n\t"
- "movq 0x68(%%rax), %%r13 \n\t"
- "movq 0x70(%%rax), %%r14 \n\t"
- "movq 0x78(%%rax), %%r15 \n\t"
- "movq 0x00(%%rax), %%rax \n\t"
- "je RESUME \n\t"
- "vmlaunch \n\t"
- "jmp EXIT_ENTRY_FAIL \n\t"
- "RESUME: \n\t"
- "vmresume \n\t"
- "jmp EXIT_ENTRY_FAIL \n\t"
- "EXIT_ENTRY: \n\t"
- "push %%rdi \n\t"
- "movq 0x8(%%rsp), %%rdi \n\t"
- "movq %%rax, 0x00(%%rdi) \n\t"
- "movq %%rcx, 0x08(%%rdi) \n\t"
- "movq %%rdx, 0x10(%%rdi) \n\t"
- "popq %%rcx \n\t"
- "movq %%rbx, 0x18(%%rdi) \n\t"
- "movq %%rbp, 0x28(%%rdi) \n\t"
- "movq %%rsi, 0x30(%%rdi) \n\t"
- "movq %%rcx, 0x38(%%rdi) \n\t"
- "movq %%r8, 0x40(%%rdi) \n\t"
- "movq %%r9, 0x48(%%rdi) \n\t"
- "movq %%r10, 0x50(%%rdi) \n\t"
- "movq %%r11, 0x58(%%rdi) \n\t"
- "movq %%r12, 0x60(%%rdi) \n\t"
- "movq %%r13, 0x68(%%rdi) \n\t"
- "movq %%r14, 0x70(%%rdi) \n\t"
- "movq %%r15, 0x78(%%rdi) \n\t"
- "EXIT_ENTRY_FAIL: \n\t"
- "popq %%rbx \n\t"
- "popq %%rbx \n\t"
- "popq %%rax \n\t"
- "popq %%rbp \n\t"
- "popq %%rdi \n\t"
- "popq %%rsi \n\t"
- "popq %%rdx \n\t"
- "popq %%rcx \n\t"
- "popq %%r15 \n\t"
- "popq %%r14 \n\t"
- "popq %%r13 \n\t"
- "popq %%r12 \n\t"
- "popq %%r11 \n\t"
- "popq %%r10 \n\t"
- "popq %%r9 \n\t"
- "popq %%r8 \n\t"
- "pushf \n\t"
- "pop %0 \n\t"
- "popfq"
- : "=m" (rflags)
- : "a" (state),
- "b" ((uint32)launched),
- "m" (state)
- );
-#else
-#define HAX_KERNEL32_CS 0x08
-#define HAX_KERNEL64_CS 0x80
- asm volatile (
- ".code32 \n\t"
- ".byte 0xea \n\t"
- ".long 1f \n\t"
- ".word 0x80 \n\t"
- ".code64 \n\t"
- "1: \n\t"
- "pushfq \n\t"
- "pushq %%r8 \n\t"
- "pushq %%r9 \n\t"
- "pushq %%r10 \n\t"
- "pushq %%r11 \n\t"
- "pushq %%r12 \n\t"
- "pushq %%r13 \n\t"
- "pushq %%r14 \n\t"
- "pushq %%r15 \n\t"
- "pushq %%rcx \n\t"
- "pushq %%rdx \n\t"
- "pushq %%rsi \n\t"
- "pushq %%rdi \n\t"
- "pushq %%rbp \n\t"
- "pushq %%rax \n\t"
- "pushq %%rbx \n\t"
- "movq $0x6c14, %%rbx \n\t"
- "movq %%rsp, %%rax \n\t"
- "subq $8, %%rax \n\t"
- "vmwrite %%rax, %%rbx \n\t"
- "popq %%rbx \n\t"
- "popq %%rax \n\t"
- "pushq %%rax \n\t"
- "pushq %%rbx \n\t"
- "xorq %%rbx, %%rbx \n\t"
- "movq $0x00000000ffffffff, %%rbx \n\t"
- "andq %%rbx, %%rax \n\t"
- "popq %%rbx \n\t"
- "cmpl $1, %2 \n\t"
- "pushq %%rbx \n\t"
- "pushq %3 \n\t"
- "movq 0x8(%%rax), %%rcx \n\t"
- "movq 0x10(%%rax), %%rdx \n\t"
- "movq 0x18(%%rax), %%rbx \n\t"
- "movq 0x28(%%rax), %%rbp \n\t"
- "movq 0x30(%%rax), %%rsi \n\t"
- "movq 0x38(%%rax), %%rdi \n\t"
- "movq 0x40(%%rax), %%r8 \n\t"
- "movq 0x48(%%rax), %%r9 \n\t"
- "movq 0x50(%%rax), %%r10 \n\t"
- "movq 0x58(%%rax), %%r11 \n\t"
- "movq 0x60(%%rax), %%r12 \n\t"
- "movq 0x68(%%rax), %%r13 \n\t"
- "movq 0x70(%%rax), %%r14 \n\t"
- "movq 0x78(%%rax), %%r15 \n\t"
- "movq 0x00(%%rax), %%rax \n\t"
- "je RESUME \n\t"
- "vmlaunch \n\t"
- "jmp EXIT_ENTRY_FAIL \n\t"
- "RESUME: \n\t"
- "vmresume \n\t"
- "jmp EXIT_ENTRY_FAIL \n\t"
- "EXIT_ENTRY: \n\t"
- "push %%rdi \n\t"
- "movq $0, %%rdi \n\t"
- "movl 0x8(%%rsp), %%edi \n\t"
- "movq %%rax, 0x00(%%rdi) \n\t"
- "movq %%rcx, 0x08(%%rdi) \n\t"
- "movq %%rdx, 0x10(%%rdi) \n\t"
- "pop %%rcx \n\t"
- "movq %%rbx, 0x18(%%rdi) \n\t"
- "movq %%rbp, 0x28(%%rdi) \n\t"
- "movq %%rsi, 0x30(%%rdi) \n\t"
- "movq %%rcx, 0x38(%%rdi) \n\t"
- "movq %%r8, 0x40(%%rdi) \n\t"
- "movq %%r9, 0x48(%%rdi) \n\t"
- "movq %%r10, 0x50(%%rdi) \n\t"
- "movq %%r11, 0x58(%%rdi) \n\t"
- "movq %%r12, 0x60(%%rdi) \n\t"
- "movq %%r13, 0x68(%%rdi) \n\t"
- "movq %%r14, 0x70(%%rdi) \n\t"
- "movq %%r15, 0x78(%%rdi) \n\t"
- "EXIT_ENTRY_FAIL: \n\t"
- "popq %%rbx \n\t"
- "popq %%rbx \n\t"
- "popq %%rax \n\t"
- "popq %%rbp \n\t"
- "popq %%rdi \n\t"
- "popq %%rsi \n\t"
- "popq %%rdx \n\t"
- "popq %%rcx \n\t"
- "popq %%r15 \n\t"
- "popq %%r14 \n\t"
- "popq %%r13 \n\t"
- "popq %%r12 \n\t"
- "popq %%r11 \n\t"
- "popq %%r10 \n\t"
- "popq %%r9 \n\t"
- "popq %%r8 \n\t"
- "pushf \n\t"
- "pop %0 \n\t"
- "popfq \n\t"
- "ljmp *(%%rip) \n\t"
- "2: \n\t"
- ".long 3f \n\t"
- ".word 0x08 \n\t"
- ".code32 \n\t"
- "3:"
- : "=m" (rflags)
- : "a" (state),
- "b" ((uint32)launched),
- "m" (state));
-#endif
- return rflags;
-}
diff --git a/darwin/hax_driver/com_intel_hax/hax_wrapper.cpp b/darwin/hax_driver/com_intel_hax/hax_wrapper.cpp
index a827c752..11b3bb61 100644
--- a/darwin/hax_driver/com_intel_hax/hax_wrapper.cpp
+++ b/darwin/hax_driver/com_intel_hax/hax_wrapper.cpp
@@ -105,16 +105,6 @@ extern "C" uint32_t hax_cpuid()
return cpu_number();
}
-extern "C" void hax_enable_irq(void)
-{
- ml_set_interrupts_enabled(true);
-}
-
-extern "C" void hax_disable_irq(void)
-{
- ml_set_interrupts_enabled(false);
-}
-
extern "C" void hax_disable_preemption(preempt_flag *eflags)
{
mword flags;
@@ -135,6 +125,16 @@ extern "C" void hax_disable_preemption(preempt_flag *eflags)
hax_disable_irq();
}
+extern "C" void hax_enable_irq(void)
+{
+ ml_set_interrupts_enabled(true);
+}
+
+extern "C" void hax_disable_irq(void)
+{
+ ml_set_interrupts_enabled(false);
+}
+
extern "C" void hax_enable_preemption(preempt_flag *eflags)
{
if (*eflags & EFLAGS_IF)
diff --git a/darwin/hax_driver/com_intel_hax/intelhaxm.xcodeproj/project.pbxproj b/darwin/hax_driver/com_intel_hax/intelhaxm.xcodeproj/project.pbxproj
index 2f75a502..43b2fd7d 100644
--- a/darwin/hax_driver/com_intel_hax/intelhaxm.xcodeproj/project.pbxproj
+++ b/darwin/hax_driver/com_intel_hax/intelhaxm.xcodeproj/project.pbxproj
@@ -35,9 +35,6 @@
43C9A9E7138DDA93000A1071 /* hax_host.h in Headers */ = {isa = PBXBuildFile; fileRef = 43C9A9E6138DDA93000A1071 /* hax_host.h */; };
43F857E013931E75008A93D6 /* com_intel_hax_mem.h in Headers */ = {isa = PBXBuildFile; fileRef = 43F857DE13931E75008A93D6 /* com_intel_hax_mem.h */; };
43F857E113931E75008A93D6 /* com_intel_hax_mem.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 43F857DF13931E75008A93D6 /* com_intel_hax_mem.cpp */; };
- 4BCC4E0513FB6729005E4BE4 /* ia32.c in Sources */ = {isa = PBXBuildFile; fileRef = 4BCC4E0213FB6729005E4BE4 /* ia32.c */; };
- 4BCC4E0613FB6729005E4BE4 /* segments.c in Sources */ = {isa = PBXBuildFile; fileRef = 4BCC4E0313FB6729005E4BE4 /* segments.c */; };
- 4BCC4E0713FB6729005E4BE4 /* vmcs.c in Sources */ = {isa = PBXBuildFile; fileRef = 4BCC4E0413FB6729005E4BE4 /* vmcs.c */; };
642FD41B20D9F74D00C197FF /* cpuid.h in Headers */ = {isa = PBXBuildFile; fileRef = 642FD41A20D9F74D00C197FF /* cpuid.h */; };
642FD41E20D9F79100C197FF /* emulate_ops.h in Headers */ = {isa = PBXBuildFile; fileRef = 642FD41C20D9F79100C197FF /* emulate_ops.h */; };
642FD41F20D9F79100C197FF /* emulate.h in Headers */ = {isa = PBXBuildFile; fileRef = 642FD41D20D9F79100C197FF /* emulate.h */; };
@@ -47,8 +44,11 @@
6496936F20D8AE0000C9BBAF /* cpuid.c in Sources */ = {isa = PBXBuildFile; fileRef = 6496936E20D8AE0000C9BBAF /* cpuid.c */; };
64B72B851EDFFF7E00A8C202 /* hax_host_mem.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 64B72B841EDFFF7E00A8C202 /* hax_host_mem.cpp */; };
64B85BE91EF4D34D00223ABD /* ept2.c in Sources */ = {isa = PBXBuildFile; fileRef = 64B85BE81EF4D34D00223ABD /* ept2.c */; };
+ 64BB0CD220F36C470064593A /* vmx_ops.asm in Sources */ = {isa = PBXBuildFile; fileRef = 64BB0CD020F36C470064593A /* vmx_ops.asm */; };
+ 64BB0CD320F36C470064593A /* ia32_ops.asm in Sources */ = {isa = PBXBuildFile; fileRef = 64BB0CD120F36C470064593A /* ia32_ops.asm */; };
6E2DBBCC18EB6125003B66C9 /* page_walker.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E2DBBCB18EB6125003B66C9 /* page_walker.c */; };
6E2DBBCE18EB6155003B66C9 /* page_walker.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E2DBBCD18EB6155003B66C9 /* page_walker.h */; };
+ A669096B20F9985300739075 /* ia32.c in Sources */ = {isa = PBXBuildFile; fileRef = A669096A20F9985300739075 /* ia32.c */; };
B98ECFB613A059BB00485DDB /* cpu.c in Sources */ = {isa = PBXBuildFile; fileRef = B98ECF9C13A059BB00485DDB /* cpu.c */; };
B98ECFB713A059BB00485DDB /* dump_vmcs.c in Sources */ = {isa = PBXBuildFile; fileRef = B98ECF9D13A059BB00485DDB /* dump_vmcs.c */; };
B98ECFB813A059BB00485DDB /* hax.c in Sources */ = {isa = PBXBuildFile; fileRef = B98ECF9E13A059BB00485DDB /* hax.c */; };
@@ -124,9 +124,6 @@
43C9A9E6138DDA93000A1071 /* hax_host.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hax_host.h; sourceTree = ""; };
43F857DE13931E75008A93D6 /* com_intel_hax_mem.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = com_intel_hax_mem.h; sourceTree = ""; };
43F857DF13931E75008A93D6 /* com_intel_hax_mem.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = com_intel_hax_mem.cpp; sourceTree = ""; };
- 4BCC4E0213FB6729005E4BE4 /* ia32.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ia32.c; sourceTree = ""; };
- 4BCC4E0313FB6729005E4BE4 /* segments.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = segments.c; sourceTree = ""; };
- 4BCC4E0413FB6729005E4BE4 /* vmcs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = vmcs.c; sourceTree = ""; };
642FD41A20D9F74D00C197FF /* cpuid.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpuid.h; sourceTree = ""; };
642FD41C20D9F79100C197FF /* emulate_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = emulate_ops.h; sourceTree = ""; };
642FD41D20D9F79100C197FF /* emulate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = emulate.h; sourceTree = ""; };
@@ -136,8 +133,11 @@
6496936E20D8AE0000C9BBAF /* cpuid.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = cpuid.c; path = ../../../core/cpuid.c; sourceTree = ""; };
64B72B841EDFFF7E00A8C202 /* hax_host_mem.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = hax_host_mem.cpp; sourceTree = ""; };
64B85BE81EF4D34D00223ABD /* ept2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = ept2.c; path = ../../../core/ept2.c; sourceTree = ""; };
+ 64BB0CD020F36C470064593A /* vmx_ops.asm */ = {isa = PBXFileReference; explicitFileType = sourcecode.nasm; name = vmx_ops.asm; path = ../../../core/vmx_ops.asm; sourceTree = ""; };
+ 64BB0CD120F36C470064593A /* ia32_ops.asm */ = {isa = PBXFileReference; explicitFileType = sourcecode.nasm; name = ia32_ops.asm; path = ../../../core/ia32_ops.asm; sourceTree = ""; };
6E2DBBCB18EB6125003B66C9 /* page_walker.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = page_walker.c; path = ../../../core/page_walker.c; sourceTree = ""; };
6E2DBBCD18EB6155003B66C9 /* page_walker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = page_walker.h; sourceTree = ""; };
+ A669096A20F9985300739075 /* ia32.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = ia32.c; path = ../../../core/ia32.c; sourceTree = ""; };
B98ECF9C13A059BB00485DDB /* cpu.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = cpu.c; path = ../../../core/cpu.c; sourceTree = SOURCE_ROOT; };
B98ECF9D13A059BB00485DDB /* dump_vmcs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = dump_vmcs.c; path = ../../../core/dump_vmcs.c; sourceTree = SOURCE_ROOT; };
B98ECF9E13A059BB00485DDB /* hax.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = hax.c; path = ../../../core/hax.c; sourceTree = SOURCE_ROOT; };
@@ -213,10 +213,12 @@
247142CAFF3F8F9811CA285C /* Source */ = {
isa = PBXGroup;
children = (
+ 64BB0CD120F36C470064593A /* ia32_ops.asm */,
+ 64BB0CD020F36C470064593A /* vmx_ops.asm */,
+ A669096A20F9985300739075 /* ia32.c */,
6496936E20D8AE0000C9BBAF /* cpuid.c */,
6E2DBBCB18EB6125003B66C9 /* page_walker.c */,
43038AD9145F94190014BEE6 /* memory.c */,
- 4BCC4E0113FB6729005E4BE4 /* asm */,
22BFCFD513A59A8200AD9F0F /* vtlb.c */,
CFD697461ED2DC9700F10631 /* gpa_space.c */,
CF0539AC1EE536CB00FAD569 /* chunk.c */,
@@ -271,16 +273,6 @@
path = ../../../include;
sourceTree = SOURCE_ROOT;
};
- 4BCC4E0113FB6729005E4BE4 /* asm */ = {
- isa = PBXGroup;
- children = (
- 4BCC4E0213FB6729005E4BE4 /* ia32.c */,
- 4BCC4E0313FB6729005E4BE4 /* segments.c */,
- 4BCC4E0413FB6729005E4BE4 /* vmcs.c */,
- );
- path = asm;
- sourceTree = "";
- };
B98ECF9F13A059BB00485DDB /* include */ = {
isa = PBXGroup;
children = (
@@ -443,6 +435,7 @@
43C9A9E4138DD459000A1071 /* com_intel_hax_component.c in Sources */,
43F857E113931E75008A93D6 /* com_intel_hax_mem.cpp in Sources */,
B98ECFB613A059BB00485DDB /* cpu.c in Sources */,
+ 64BB0CD220F36C470064593A /* vmx_ops.asm in Sources */,
CF0539AD1EE536CB00FAD569 /* chunk.c in Sources */,
64B85BE91EF4D34D00223ABD /* ept2.c in Sources */,
B98ECFB713A059BB00485DDB /* dump_vmcs.c in Sources */,
@@ -462,12 +455,11 @@
22BFCFCE13A59A4300AD9F0F /* ept.c in Sources */,
22BFCFD213A59A6500AD9F0F /* intr_exc.c in Sources */,
22BFCFD613A59A8200AD9F0F /* vtlb.c in Sources */,
- 4BCC4E0513FB6729005E4BE4 /* ia32.c in Sources */,
- 4BCC4E0613FB6729005E4BE4 /* segments.c in Sources */,
64B72B851EDFFF7E00A8C202 /* hax_host_mem.cpp in Sources */,
- 4BCC4E0713FB6729005E4BE4 /* vmcs.c in Sources */,
+ A669096B20F9985300739075 /* ia32.c in Sources */,
43038ADA145F94190014BEE6 /* memory.c in Sources */,
645626211EEFF720005280EF /* ept_tree.c in Sources */,
+ 64BB0CD320F36C470064593A /* ia32_ops.asm in Sources */,
6496936F20D8AE0000C9BBAF /* cpuid.c in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
diff --git a/include/asm.h b/include/asm.h
index 60c22607..74131d2e 100644
--- a/include/asm.h
+++ b/include/asm.h
@@ -33,46 +33,60 @@
#include "hax_types.h"
+#ifdef _M_IX86
+#define ASMCALL __cdecl
+#else // !_M_IX86
+#define ASMCALL
+#endif // _M_IX86
+
union cpuid_args_t;
struct vcpu_t;
struct vcpu_state_t;
struct invept_desc;
-mword get_cr0(void);
-mword get_cr2(void);
-uint64 get_cr3(void);
-mword get_cr4(void);
-mword get_dr0(void);
-mword get_dr1(void);
-mword get_dr2(void);
-mword get_dr3(void);
-mword get_dr6(void);
-mword get_dr7(void);
-
-void set_cr0(mword val);
-void set_cr2(mword val);
-void set_cr3(mword val);
-void set_cr4(mword val);
-void set_dr0(mword val);
-void set_dr1(mword val);
-void set_dr2(mword val);
-void set_dr3(mword val);
-void set_dr6(mword val);
-void set_dr7(mword val);
-
-uint16 get_kernel_cs(void);
-uint16 get_kernel_ds(void);
-uint16 get_kernel_es(void);
-uint16 get_kernel_ss(void);
-uint16 get_kernel_gs(void);
-uint16 get_kernel_fs(void);
-
-void set_kernel_ds(uint16 val);
-void set_kernel_es(uint16 val);
-void set_kernel_gs(uint16 val);
-void set_kernel_fs(uint16 val);
-
-mword get_rip(void);
+mword ASMCALL get_cr0(void);
+mword ASMCALL get_cr2(void);
+uint64 ASMCALL get_cr3(void);
+mword ASMCALL get_cr4(void);
+mword ASMCALL get_dr0(void);
+mword ASMCALL get_dr1(void);
+mword ASMCALL get_dr2(void);
+mword ASMCALL get_dr3(void);
+mword ASMCALL get_dr6(void);
+mword ASMCALL get_dr7(void);
+
+void ASMCALL set_cr0(mword val);
+void ASMCALL set_cr2(mword val);
+void ASMCALL set_cr3(mword val);
+void ASMCALL set_cr4(mword val);
+void ASMCALL set_dr0(mword val);
+void ASMCALL set_dr1(mword val);
+void ASMCALL set_dr2(mword val);
+void ASMCALL set_dr3(mword val);
+void ASMCALL set_dr6(mword val);
+void ASMCALL set_dr7(mword val);
+
+uint16 ASMCALL get_kernel_cs(void);
+uint16 ASMCALL get_kernel_ds(void);
+uint16 ASMCALL get_kernel_es(void);
+uint16 ASMCALL get_kernel_ss(void);
+uint16 ASMCALL get_kernel_gs(void);
+uint16 ASMCALL get_kernel_fs(void);
+
+void ASMCALL set_kernel_ds(uint16 val);
+void ASMCALL set_kernel_es(uint16 val);
+void ASMCALL set_kernel_gs(uint16 val);
+void ASMCALL set_kernel_fs(uint16 val);
+
+void ASMCALL asm_btr(uint8 *addr, uint bit);
+void ASMCALL asm_bts(uint8 *addr, uint bit);
+void ASMCALL asm_fxinit(void);
+void ASMCALL asm_fxsave(mword *addr);
+void ASMCALL asm_fxrstor(mword *addr);
+void ASMCALL asm_cpuid(union cpuid_args_t *state);
+
+void ASMCALL __nmi(void);
+uint32 ASMCALL __fls(uint32 bit32);
uint64 ia32_rdmsr(uint32 reg);
void ia32_wrmsr(uint32 reg, uint64 val);
@@ -86,39 +100,14 @@ void fxrstor(mword *addr);
void btr(uint8 *addr, uint bit);
void bts(uint8 *addr, uint bit);
-uint64 get_kernel_rflags(void);
-void __nmi(void);
-uint32 __fls(uint32 bit32);
-
-void load_kernel_ldt(uint16 sel);
-uint16 get_kernel_tr_selector(void);
-uint16 get_kernel_ldt(void);
-
-void set_kernel_gdt(struct system_desc_t *sys_desc);
-void set_kernel_idt(struct system_desc_t *sys_desc);
-void get_kernel_gdt(struct system_desc_t *sys_desc);
-void get_kernel_idt(struct system_desc_t *sys_desc);
-void __handle_cpuid(union cpuid_args_t *state);
-
-vmx_error_t __vmxon(paddr_t addr);
-vmx_error_t __vmxoff(void);
-
-vmx_error_t __vmclear(paddr_t addr);
-vmx_error_t __vmptrld(paddr_t addr);
-paddr_t __vmptrst(void);
-
-uint64 vmx_vmread(struct vcpu_t *vcpu, component_index_t component);
-uint64 vmx_vmread_natural(struct vcpu_t *vcpu, component_index_t component);
-uint64 vmx_vmread_64(struct vcpu_t *vcpu, component_index_t component);
-void _vmx_vmwrite(struct vcpu_t *vcpu, const char *name,
- component_index_t component, mword source_val);
-void _vmx_vmwrite_natural(struct vcpu_t *vcpu, const char *name,
- component_index_t component, uint64 source_val);
-void _vmx_vmwrite_64(struct vcpu_t *vcpu, const char *name,
- component_index_t component, uint64 source_val);
-
-uint64 __vmx_run(struct vcpu_state_t *state, uint16 launch);
+uint64 ASMCALL get_kernel_rflags(void);
+uint16 ASMCALL get_kernel_tr_selector(void);
-vmx_error_t __invept(uint type, struct invept_desc *desc);
+void ASMCALL set_kernel_gdt(struct system_desc_t *sys_desc);
+void ASMCALL set_kernel_idt(struct system_desc_t *sys_desc);
+void ASMCALL set_kernel_ldt(uint16 sel);
+void ASMCALL get_kernel_gdt(struct system_desc_t *sys_desc);
+void ASMCALL get_kernel_idt(struct system_desc_t *sys_desc);
+uint16 ASMCALL get_kernel_ldt(void);
#endif // HAX_ASM_H_
diff --git a/include/darwin/hax_mac.h b/include/darwin/hax_mac.h
index b0f32409..881ac53e 100644
--- a/include/darwin/hax_mac.h
+++ b/include/darwin/hax_mac.h
@@ -197,7 +197,7 @@ extern int default_hax_log_level;
#define hax_panic_vcpu(v, x...) { \
printf("haxm_panic: " x); \
- v->paniced = 1; \
+ v->panicked = 1; \
}
#define ASSERT(condition) assert(condition)
diff --git a/include/hax_types.h b/include/hax_types.h
index 631ab4e8..dff7736d 100644
--- a/include/hax_types.h
+++ b/include/hax_types.h
@@ -202,6 +202,4 @@ typedef uint64 vaddr_t;
extern int32 hax_page_size;
-typedef mword vmx_error_t;
-
#endif // HAX_TYPES_H_
diff --git a/windows/IntelHaxm.vcxproj b/windows/IntelHaxm.vcxproj
index 9a2cefcc..f1e2b273 100644
--- a/windows/IntelHaxm.vcxproj
+++ b/windows/IntelHaxm.vcxproj
@@ -123,7 +123,6 @@
-
diff --git a/windows/amd64/ia32.asm b/windows/amd64/ia32.asm
deleted file mode 100644
index 45322b7a..00000000
--- a/windows/amd64/ia32.asm
+++ /dev/null
@@ -1,342 +0,0 @@
-;
-; Copyright (c) 2011 Intel Corporation
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions are met:
-;
-; 1. Redistributions of source code must retain the above copyright notice,
-; this list of conditions and the following disclaimer.
-;
-; 2. Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in the
-; documentation and/or other materials provided with the distribution.
-;
-; 3. Neither the name of the copyright holder nor the names of its
-; contributors may be used to endorse or promote products derived from
-; this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-; POSSIBILITY OF SUCH DAMAGE.
-;
-
-option casemap:none
-
-VCPU_STATE STRUCT
- _rax QWORD ?
- _rcx QWORD ?
- _rdx QWORD ?
- _rbx QWORD ?
- _rsp QWORD ?
- _rbp QWORD ?
- _rsi QWORD ?
- _rdi QWORD ?
- _r8 QWORD ?
- _r9 QWORD ?
- _r10 QWORD ?
- _r11 QWORD ?
- _r12 QWORD ?
- _r13 QWORD ?
- _r14 QWORD ?
- _r15 QWORD ?
-VCPU_STATE ENDS
-
-CPUID_ARGS STRUCT
- _eax DWORD ?
- _ecx DWORD ?
- _edx DWORD ?
- _ebx DWORD ?
-CPUID_ARGS ENDS
-
-.data
-;
-
-.code
-
-asm_disable_irq PROC public
- cli
- ret
-asm_disable_irq ENDP
-
-asm_enable_irq PROC public
- sti
- ret
-asm_enable_irq ENDP
-
-get_cr0 PROC public
- xor rax, rax
- mov rax, cr0
- ret
-get_cr0 ENDP
-
-get_cr2 PROC public
- xor rax, rax
- mov rax, cr2
- ret
-get_cr2 ENDP
-
-get_cr3 PROC public
- xor rax, rax
- mov rax, cr3
- ret
-get_cr3 ENDP
-
-get_cr4 PROC public
- xor rax, rax
- mov rax, cr4
- ret
-get_cr4 ENDP
-
-get_dr0 PROC public
- xor rax, rax
- mov rax, dr0
- ret
-get_dr0 ENDP
-
-get_dr1 PROC public
- xor rax, rax
- mov rax, dr1
- ret
-get_dr1 ENDP
-
-get_dr2 PROC public
- xor rax, rax
- mov rax, dr2
- ret
-get_dr2 ENDP
-
-get_dr3 PROC public
- xor rax, rax
- mov rax, dr3
- ret
-get_dr3 ENDP
-
-get_dr6 PROC public
- xor rax, rax
- mov rax, dr6
- ret
-get_dr6 ENDP
-
-get_dr7 PROC public
- xor rax, rax
- mov rax, dr7
- ret
-get_dr7 ENDP
-
-set_cr0 PROC public
- mov cr0, rcx
- ret
-set_cr0 ENDP
-
-set_cr2 PROC public
- mov cr2, rcx
- ret
-set_cr2 ENDP
-
-set_cr3 PROC public
- mov cr3, rcx
- ret
-set_cr3 ENDP
-
-set_cr4 PROC public
- mov cr4, rcx
- ret
-set_cr4 ENDP
-
-set_dr0 PROC public
- mov dr0, rcx
- ret
-set_dr0 ENDP
-
-set_dr1 PROC public
- mov dr1, rcx
- ret
-set_dr1 ENDP
-
-set_dr2 PROC public
- mov dr2, rcx
- ret
-set_dr2 ENDP
-
-set_dr3 PROC public
- mov dr3, rcx
- ret
-set_dr3 ENDP
-
-set_dr6 PROC public
- mov dr6, rcx
- ret
-set_dr6 ENDP
-
-set_dr7 PROC public
- mov dr7, rcx
- ret
-set_dr7 ENDP
-
-get_kernel_cs PROC public
- xor rax, rax
- mov ax, cs
- ret
-get_kernel_cs ENDP
-
-get_kernel_ds PROC public
- xor rax, rax
- mov ax, ds
- ret
-get_kernel_ds ENDP
-
-get_kernel_es PROC public
- xor rax, rax
- mov ax, es
- ret
-get_kernel_es ENDP
-
-get_kernel_ss PROC public
- xor rax, rax
- mov ax, ss
- ret
-get_kernel_ss ENDP
-
-get_kernel_gs PROC public
- xor rax, rax
- mov ax, gs
- ret
-get_kernel_gs ENDP
-
-get_kernel_fs PROC public
- xor rax, rax
- mov ax, fs
- ret
-get_kernel_fs ENDP
-
-set_kernel_ds PROC public
- mov ds, cx
- ret
-set_kernel_ds ENDP
-
-set_kernel_es PROC public
- mov es, cx
- ret
-set_kernel_es ENDP
-
-set_kernel_gs PROC public
- mov gs, cx
- ret
-set_kernel_gs ENDP
-
-set_kernel_fs PROC public
- mov fs, cx
- ret
-set_kernel_fs ENDP
-
-asm_rdmsr PROC public
- xor rax, rax
- xor rdx, rdx
- rdmsr
- mov cl, 32
- shl rdx, cl
- or rax, rdx
- ret
-asm_rdmsr ENDP
-
-asm_wrmsr PROC public
- push rbx
- xor rax, rax
- xor rbx, rbx
- ;mov 1st para to rbx
- mov rbx, rcx
- mov cl, 32
- ;mov 2nd para to rax
- ;got rax
- mov rax, rdx
- shl rax, cl
- shr rax, cl
- ;got rdx
- shr rdx, cl
- mov rcx, rbx
- wrmsr
- pop rbx
- ret
-asm_wrmsr ENDP
-
-asm_rdtsc PROC public
- xor rax, rax
- xor rdx, rdx
- xor rcx, rcx
- rdtsc
- mov cl, 32
- shl rdx, cl
- or rax, rdx
- ret
-asm_rdtsc ENDP
-
-asm_fxinit PROC public
- finit
- ret
-asm_fxinit ENDP
-
-asm_fxsave PROC public
- fxsave [rcx]
- ret
-asm_fxsave ENDP
-
-asm_fxrstor PROC public
- fxrstor [rcx]
- ret
-asm_fxrstor ENDP
-
-asm_btr PROC public
- lock btr [rcx], rdx
- ret
-asm_btr ENDP
-
-asm_bts PROC public
- lock bts [rcx], rdx
- ret
-asm_bts ENDP
-
-get_kernel_rflags PROC public
- xor rax, rax
- pushf
- pop ax
- ret
-get_kernel_rflags ENDP
-
-__nmi PROC public
- int 2h
- ret
-__nmi ENDP
-
-__fls PROC public
- xor eax, eax
- bsr eax, ecx
- ret
-__fls ENDP
-
-__handle_cpuid PROC public
- push rbx
- xor rax, rax
- xor rbx, rbx
- xor rdx, rdx
- xor r8, r8
- mov r8, rcx
- mov eax, [r8].CPUID_ARGS._eax
- mov ecx, [r8].CPUID_ARGS._ecx
- cpuid
- mov [r8].CPUID_ARGS._eax, eax
- mov [r8].CPUID_ARGS._ebx, ebx
- mov [r8].CPUID_ARGS._ecx, ecx
- mov [r8].CPUID_ARGS._edx, edx
- pop rbx
- ret
-__handle_cpuid ENDP
-
-end
diff --git a/windows/amd64/segments.asm b/windows/amd64/segments.asm
deleted file mode 100644
index 91d8b5c8..00000000
--- a/windows/amd64/segments.asm
+++ /dev/null
@@ -1,79 +0,0 @@
-;
-; Copyright (c) 2011 Intel Corporation
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions are met:
-;
-; 1. Redistributions of source code must retain the above copyright notice,
-; this list of conditions and the following disclaimer.
-;
-; 2. Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in the
-; documentation and/or other materials provided with the distribution.
-;
-; 3. Neither the name of the copyright holder nor the names of its
-; contributors may be used to endorse or promote products derived from
-; this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-; POSSIBILITY OF SUCH DAMAGE.
-;
-
-option casemap:none
-
-SYSDESC_T STRUCT
- __limit WORD ?
- __base QWORD ?
-SYSDESC_T ENDS
-
-.data
-;
-
-.code
-
-load_kernel_ldt PROC public
- lldt cx
- ret
-load_kernel_ldt ENDP
-
-get_kernel_tr_selector PROC public
- xor rax, rax
- str ax
- ret
-get_kernel_tr_selector ENDP
-
-get_kernel_ldt PROC public
- xor rax, rax
- sldt ax
- ret
-get_kernel_ldt ENDP
-
-get_kernel_gdt PROC public
- sgdt [rcx]
- ret
-get_kernel_gdt ENDP
-
-get_kernel_idt PROC public
- sidt [rcx]
- ret
-get_kernel_idt ENDP
-
-set_kernel_gdt PROC public
- lgdt fword ptr [rcx]
- ret
-set_kernel_gdt ENDP
-
-set_kernel_idt PROC public
- lidt fword ptr [rcx]
- ret
-set_kernel_idt ENDP
-end
diff --git a/windows/amd64/vmcs.asm b/windows/amd64/vmcs.asm
deleted file mode 100644
index 4f4a5918..00000000
--- a/windows/amd64/vmcs.asm
+++ /dev/null
@@ -1,238 +0,0 @@
-;
-; Copyright (c) 2011 Intel Corporation
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions are met:
-;
-; 1. Redistributions of source code must retain the above copyright notice,
-; this list of conditions and the following disclaimer.
-;
-; 2. Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in the
-; documentation and/or other materials provided with the distribution.
-;
-; 3. Neither the name of the copyright holder nor the names of its
-; contributors may be used to endorse or promote products derived from
-; this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-; POSSIBILITY OF SUCH DAMAGE.
-;
-
-option casemap:none
-
-VCPU_STATE STRUCT
- _rax QWORD ?
- _rcx QWORD ?
- _rdx QWORD ?
- _rbx QWORD ?
- _rsp QWORD ?
- _rbp QWORD ?
- _rsi QWORD ?
- _rdi QWORD ?
- _r8 QWORD ?
- _r9 QWORD ?
- _r10 QWORD ?
- _r11 QWORD ?
- _r12 QWORD ?
- _r13 QWORD ?
- _r14 QWORD ?
- _r15 QWORD ?
-VCPU_STATE ENDS
-
-INVEPT_DESC STRUCT
- eptp QWORD ?
- rsvd QWORD 0
-INVEPT_DESC ENDS
-
-.data
-vmx_fail_mask dword 0041h
-
-.code
-
-__vmxon PROC public
- local x:qword
- xor rax, rax
- mov x, rcx
- vmxon x
- pushfq
- pop rax
- and eax, vmx_fail_mask
- ret
-__vmxon ENDP
-
-__vmxoff PROC public
- xor rax, rax
- vmxoff
- pushfq
- pop rax
- and eax, vmx_fail_mask
- ret
-__vmxoff ENDP
-
-__vmclear PROC public
- local x:qword
- xor rax, rax
- mov x, rcx
- vmclear x
- pushfq
- pop rax
- and eax, vmx_fail_mask
- ret
-__vmclear ENDP
-
-__vmptrld PROC public
- local x:qword
- xor rax, rax
- mov x, rcx
- vmptrld x
- pushfq
- pop rax
- and eax, vmx_fail_mask
- ret
-__vmptrld ENDP
-
-asm_vmptrst PROC public
- local x:qword
- vmptrst x
- mov rax, x
- mov [rcx], rax
- pushfq
- pop rax
- ret
-asm_vmptrst ENDP
-
-__vmread PROC public
- xor rax, rax
- vmread rax, rcx
- ret
-__vmread ENDP
-
-__vmwrite PROC public
- vmwrite rcx, rdx
- ret
-__vmwrite ENDP
-
-__vmx_run PROC public
- pushfq
- push r8
- push r9
- push r10
- push r11
- push r12
- push r13
- push r14
- push r15
- push rcx
- push rdx
- push rsi
- push rdi
- push rbp
- push rax
- push rbx
- ;write host rsp
- xor rbx, rbx
- mov ebx, 6c14h
- mov rax, rsp
- sub rax, 8h
- vmwrite rbx, rax
- pop rbx
- pop rax
- push rax
- push rbx
- ;push the state
- push rcx
- cmp dx, 1h
- mov rax, rcx
- mov rcx, [rax].VCPU_STATE._rcx
- mov rdx, [rax].VCPU_STATE._rdx
- mov rbx, [rax].VCPU_STATE._rbx
- mov rbp, [rax].VCPU_STATE._rbp
- mov rsi, [rax].VCPU_STATE._rsi
- mov rdi, [rax].VCPU_STATE._rdi
- mov r8, [rax].VCPU_STATE._r8
- mov r9, [rax].VCPU_STATE._r9
- mov r10, [rax].VCPU_STATE._r10
- mov r11, [rax].VCPU_STATE._r11
- mov r12, [rax].VCPU_STATE._r12
- mov r13, [rax].VCPU_STATE._r13
- mov r14, [rax].VCPU_STATE._r14
- mov r15, [rax].VCPU_STATE._r15
- mov rax, [rax].VCPU_STATE._rax
- je RESUME
- vmlaunch
- jmp EXIT_ENTRY_FAIL
- RESUME:
- vmresume
- jmp EXIT_ENTRY_FAIL
- EXIT_ENTRY::
- push rdi
- mov rdi, [rsp+8]
- mov [rdi].VCPU_STATE._rax, rax
- mov [rdi].VCPU_STATE._rcx, rcx
- mov [rdi].VCPU_STATE._rdx, rdx
- pop rcx
- mov [rdi].VCPU_STATE._rbx, rbx
- mov [rdi].VCPU_STATE._rbp, rbp
- mov [rdi].VCPU_STATE._rsi, rsi
- mov [rdi].VCPU_STATE._rdi, rcx
- mov [rdi].VCPU_STATE._r8, r8
- mov [rdi].VCPU_STATE._r9, r9
- mov [rdi].VCPU_STATE._r10, r10
- mov [rdi].VCPU_STATE._r11, r11
- mov [rdi].VCPU_STATE._r12, r12
- mov [rdi].VCPU_STATE._r13, r13
- mov [rdi].VCPU_STATE._r14, r14
- mov [rdi].VCPU_STATE._r15, r15
- EXIT_ENTRY_FAIL:
- ; pop the state
- pop rbx
- pop rbx
- pop rax
- pop rbp
- pop rdi
- pop rsi
- pop rdx
- pop rcx
- pop r15
- pop r14
- pop r13
- pop r12
- pop r11
- pop r10
- pop r9
- pop r8
- pushfq
- pop rax
- popfq
- ret
-__vmx_run ENDP
-
-get_rip PROC public
- xor rax, rax
- ;XXX is it right?
- lea rax, EXIT_ENTRY
- ret
-get_rip ENDP
-
-; 1st parameter (RCX): INVEPT type
-; 2nd parameter (RDX): HVA of INVEPT descriptor
-__invept PROC public
- xor rax, rax
- invept rcx, OWORD PTR [rdx]
- pushfq
- pop rax
- and eax, vmx_fail_mask
- ret
-__invept ENDP
-
-end
diff --git a/windows/amd64/wrapper.c b/windows/amd64/wrapper.c
deleted file mode 100644
index c6240cdc..00000000
--- a/windows/amd64/wrapper.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2011 Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "..\hax_win.h"
-
-struct qword_val {
- uint32 low;
- uint32 high;
-};
-
-extern void asm_enable_irq(void);
-extern void asm_disable_irq(void);
-extern uint64 asm_rdmsr(uint32 reg);
-extern void asm_wrmsr(uint32 reg, uint64 val);
-
-extern uint64 asm_rdtsc();
-
-extern void asm_fxinit(void);
-extern void asm_fxsave(mword *addr);
-extern void asm_fxrstor(mword *addr);
-
-extern void asm_btr(uint8 *addr, uint bit);
-extern void asm_bts(uint8 *addr, uint bit);
-
-extern void __vmwrite(component_index_t component, mword val);
-extern uint64 __vmread(component_index_t component);
-
-extern uint64 asm_rdmsr(uint32 reg);
-extern void asm_wrmsr(uint32 reg, uint64 val);
-
-extern void asm_vmptrst(paddr_t *address);
-
-paddr_t __vmptrst(void)
-{
- paddr_t address = 0;
- asm_vmptrst(&address);
- return address;
-}
-
-void _vmx_vmwrite(struct vcpu_t *vcpu, const char *name,
- component_index_t component, mword source_val)
-{
- __vmwrite(component, source_val);
-}
-
-void _vmx_vmwrite_natural(struct vcpu_t *vcpu, const char *name,
- component_index_t component, uint64 source_val)
-{
- __vmwrite(component, source_val);
-}
-
-void _vmx_vmwrite_64(struct vcpu_t *vcpu, const char *name,
- component_index_t component, uint64 source_val)
-{
- __vmwrite(component, source_val);
-}
-
-uint64 vmx_vmread(struct vcpu_t *vcpu, component_index_t component)
-{
- uint64 val = 0;
-
- val = __vmread(component);
- return val;
-}
-
-uint64 vmx_vmread_natural(struct vcpu_t *vcpu, component_index_t component)
-{
- uint64 val = 0;
-
- val = __vmread(component);
- return val;
-}
-
-uint64 vmx_vmread_64(struct vcpu_t *vcpu, component_index_t component)
-{
- uint64 val = 0;
-
- val = __vmread(component);
- return val;
-}
-
-uint64 rdtsc(void)
-{
- return asm_rdtsc();
-}
-
-void fxinit(void)
-{
- asm_fxinit();
-}
-
-void fxsave(mword *addr)
-{
- asm_fxsave(addr);
-}
-
-void fxrstor(mword *addr)
-{
- asm_fxrstor(addr);
-}
-
-void btr(uint8 *addr, uint bit)
-{
- uint8 *base = addr + bit / 8;
- uint offset = bit % 8;
- asm_btr(base, offset);
-}
-
-void bts(uint8 *addr, uint bit)
-{
- uint8 *base = addr + bit / 8;
- uint offset = bit % 8;
- asm_bts(base, offset);
-}
-
-uint64 ia32_rdmsr(uint32 reg)
-{
- return asm_rdmsr(reg);
-}
-void ia32_wrmsr(uint32 reg, uint64 val)
-{
- asm_wrmsr(reg, val);
-}
-
-void hax_enable_irq(void)
-{
- asm_enable_irq();
-}
-
-void hax_disable_irq(void)
-{
- asm_disable_irq();
-}
diff --git a/windows/hax_entry.c b/windows/hax_entry.c
index 58f2fb64..601ac19a 100644
--- a/windows/hax_entry.c
+++ b/windows/hax_entry.c
@@ -60,7 +60,7 @@ static int hax_host_init(void)
/* we get the max_cpus from real_cpus in darwin, so add 1 here */
max_cpus++;
- ret =smpc_dpc_init();
+ ret = smpc_dpc_init();
if (ret < 0) {
return ret;
}
diff --git a/windows/hax_mm.c b/windows/hax_mm.c
index b9a7c2b3..1988ab17 100644
--- a/windows/hax_mm.c
+++ b/windows/hax_mm.c
@@ -195,7 +195,7 @@ uint64_t get_hpfn_from_pmem(struct hax_vcpu_mem *pmem, uint64_t va)
else
return kphys.QuadPart >> page_shift;
} else {
- unsigned long long index =0;
+ unsigned long long index = 0;
PMDL pmdl = NULL;
PPFN_NUMBER ppfnnum;
diff --git a/windows/i386/ia32.asm b/windows/i386/ia32.asm
deleted file mode 100644
index faca7ad0..00000000
--- a/windows/i386/ia32.asm
+++ /dev/null
@@ -1,387 +0,0 @@
-;
-; Copyright (c) 2011 Intel Corporation
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions are met:
-;
-; 1. Redistributions of source code must retain the above copyright notice,
-; this list of conditions and the following disclaimer.
-;
-; 2. Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in the
-; documentation and/or other materials provided with the distribution.
-;
-; 3. Neither the name of the copyright holder nor the names of its
-; contributors may be used to endorse or promote products derived from
-; this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-; POSSIBILITY OF SUCH DAMAGE.
-;
-
-.686p
-.mmx
-.xmm
-.model flat, stdcall
-option casemap:none
-
-QWORD_STRUCT STRUCT
- _low DWORD ?
- _high DWORD ?
-QWORD_STRUCT ENDS
-
-VCPU_STATE STRUCT
- _eax DWORD ?
- _pad0 DWORD ?
- _ecx DWORD ?
- _pad1 DWORD ?
- _edx DWORD ?
- _pad2 DWORD ?
- _ebx DWORD ?
- _pad3 DWORD ?
- _esp DWORD ?
- _pad4 DWORD ?
- _ebp DWORD ?
- _pad5 DWORD ?
- _esi DWORD ?
- _pad6 DWORD ?
- _edi DWORD ?
- _pad7 DWORD ?
-VCPU_STATE ENDS
-
-CPUID_ARGS STRUCT
- _eax DWORD ?
- _ecx DWORD ?
- _edx DWORD ?
- _ebx DWORD ?
-CPUID_ARGS ENDS
-
-.data
-;
-
-.code
-start:
-
-get_cr0 PROC PUBLIC
- xor eax, eax
- mov eax, cr0
- ret
-get_cr0 ENDP
-
-get_cr2 PROC PUBLIC
- xor eax, eax
- mov eax, cr2
- ret
-get_cr2 ENDP
-
-get_cr3 PROC PUBLIC
- xor eax, eax
- mov eax, cr3
- ret
-get_cr3 ENDP
-
-get_cr4 PROC PUBLIC
- xor eax, eax
- mov eax, cr4
- ret
-get_cr4 ENDP
-
-asm_disable_irq PROC C public
- cli
- ret
-asm_disable_irq ENDP
-
-asm_enable_irq PROC C public
- sti
- ret
-asm_enable_irq ENDP
-
-get_dr0 PROC PUBLIC
- xor eax, eax
- mov eax, dr0
- ret
-get_dr0 ENDP
-
-get_dr1 PROC PUBLIC
- xor eax, eax
- mov eax, dr1
- ret
-get_dr1 ENDP
-
-get_dr2 PROC PUBLIC
- xor eax, eax
- mov eax, dr2
- ret
-get_dr2 ENDP
-
-get_dr3 PROC PUBLIC
- xor eax, eax
- mov eax, dr3
- ret
-get_dr3 ENDP
-
-get_dr6 PROC PUBLIC
- xor eax, eax
- mov eax, dr6
- ret
-get_dr6 ENDP
-
-get_dr7 PROC PUBLIC
- xor eax, eax
- mov eax, dr7
- ret
-get_dr7 ENDP
-
-set_cr0 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov cr0, eax
- ret
-set_cr0 ENDP
-
-set_cr2 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov cr2, eax
- ret
-set_cr2 ENDP
-
-set_cr3 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov cr3, eax
- ret
-set_cr3 ENDP
-
-set_cr4 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov cr4, eax
- ret
-set_cr4 ENDP
-
-set_dr0 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov dr0, eax
- ret
-set_dr0 ENDP
-
-set_dr1 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov dr1, eax
- ret
-set_dr1 ENDP
-
-set_dr2 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov dr2, eax
- ret
-set_dr2 ENDP
-
-set_dr3 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov dr3, eax
- ret
-set_dr3 ENDP
-
-set_dr6 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov dr6, eax
- ret
-set_dr6 ENDP
-
-set_dr7 PROC PUBLIC USES EAX x:dword
- xor eax, eax
- mov eax, x
- mov dr7, eax
- ret
-set_dr7 ENDP
-
-get_kernel_cs PROC PUBLIC
- xor eax, eax
- mov ax, cs
- ret
-get_kernel_cs ENDP
-
-get_kernel_ds PROC PUBLIC
- xor eax, eax
- mov ax, ds
- ret
-get_kernel_ds ENDP
-
-get_kernel_es PROC PUBLIC
- xor eax, eax
- mov ax, es
- ret
-get_kernel_es ENDP
-
-get_kernel_ss PROC PUBLIC
- xor eax, eax
- mov ax, ss
- ret
-get_kernel_ss ENDP
-
-get_kernel_gs PROC PUBLIC
- xor eax, eax
- mov ax, gs
- ret
-get_kernel_gs ENDP
-
-get_kernel_fs PROC PUBLIC
- xor eax, eax
- mov ax, fs
- ret
-get_kernel_fs ENDP
-
-set_kernel_ds PROC PUBLIC USES EAX x:word
- xor eax, eax
- mov ax, x
- mov ds, ax
- ret
-set_kernel_ds ENDP
-
-set_kernel_es PROC PUBLIC USES EAX x:word
- xor eax, eax
- mov ax, x
- mov es, ax
- ret
-set_kernel_es ENDP
-
-set_kernel_gs PROC PUBLIC USES EAX x:word
- xor eax, eax
- mov ax, x
- mov gs, ax
- ret
-set_kernel_gs ENDP
-
-set_kernel_fs PROC PUBLIC USES EAX x:word
- xor eax, eax
- mov ax, x
- mov fs, ax
- ret
-set_kernel_fs ENDP
-
-asm_rdmsr PROC PUBLIC USES EAX EDX ECX x:dword, y:ptr QWORD_STRUCT
- xor eax, eax
- xor edx, edx
- xor ecx, ecx
- mov ecx, x
- rdmsr
- xor ecx, ecx
- mov ecx, y
- mov [ecx].QWORD_STRUCT._low, eax
- mov [ecx].QWORD_STRUCT._high, edx
- ret
-asm_rdmsr ENDP
-
-asm_wrmsr PROC PUBLIC USES EAX EDX ECX x:dword, y:ptr QWORD_STRUCT
- xor eax, eax
- xor edx, edx
- xor ecx, ecx
- mov ecx, y
- mov eax, [ecx].QWORD_STRUCT._low
- mov edx, [ecx].QWORD_STRUCT._high
- xor ecx, ecx
- mov ecx, x
- wrmsr
- ret
-asm_wrmsr ENDP
-
-asm_rdtsc PROC PUBLIC USES EAX EDX ECX x:ptr QWORD_STRUCT
- xor eax, eax
- xor edx, edx
- xor ecx, ecx
- mov ecx, x
- rdtsc
- mov [ecx].QWORD_STRUCT._low, eax
- mov [ecx].QWORD_STRUCT._high, edx
- ret
-asm_rdtsc ENDP
-
-asm_fxinit PROC PUBLIC
- finit
- ret
-asm_fxinit ENDP
-
-asm_fxsave PROC PUBLIC USES EAX x:ptr byte
- xor eax, eax
- mov eax, x
- fxsave byte ptr [eax]
- ret
-asm_fxsave ENDP
-
-asm_fxrstor PROC PUBLIC USES EAX x:ptr byte
- xor eax, eax
- mov eax, x
- fxrstor byte ptr [eax]
- ret
-asm_fxrstor ENDP
-
-; TODO: Does declaring |x| (bit base address) as "ptr byte" limit the range of
-; |y| (bit offset)? For safety, never call this routine with |y| >= 8
-asm_btr PROC PUBLIC USES EAX x:ptr byte, y:dword
- xor eax, eax
- mov eax, y
- lock btr x, eax
- ret
-asm_btr ENDP
-
-asm_bts PROC PUBLIC USES EAX x:ptr byte, y:dword
- xor eax, eax
- mov eax, y
- lock bts x, eax
- ret
-asm_bts ENDP
-
-get_kernel_rflags PROC PUBLIC
- xor eax, eax
- pushf
- pop ax
- ret
-get_kernel_rflags ENDP
-
-__nmi PROC PUBLIC
- int 2h
- ret
-__nmi ENDP
-
-__fls PROC PUBLIC USES EBX x:dword
- xor eax, eax
- xor ebx, ebx
- mov ebx, x
- bsr eax, ebx
- ret
-__fls ENDP
-
-__handle_cpuid PROC PUBLIC USES EAX EBX EDX ECX ESI x:ptr CPUID_ARGS
- xor eax, eax
- xor ebx, ebx
- xor edx, edx
- xor ecx, ecx
- xor esi, esi
- mov esi, x
- mov eax, [esi].CPUID_ARGS._eax
- mov ecx, [esi].CPUID_ARGS._ecx
- cpuid
- mov [esi].CPUID_ARGS._eax, eax
- mov [esi].CPUID_ARGS._ebx, ebx
- mov [esi].CPUID_ARGS._ecx, ecx
- mov [esi].CPUID_ARGS._edx, edx
- ret
-__handle_cpuid ENDP
-
-end
diff --git a/windows/i386/segments.asm b/windows/i386/segments.asm
deleted file mode 100644
index 09b9c071..00000000
--- a/windows/i386/segments.asm
+++ /dev/null
@@ -1,94 +0,0 @@
-;
-; Copyright (c) 2011 Intel Corporation
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions are met:
-;
-; 1. Redistributions of source code must retain the above copyright notice,
-; this list of conditions and the following disclaimer.
-;
-; 2. Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in the
-; documentation and/or other materials provided with the distribution.
-;
-; 3. Neither the name of the copyright holder nor the names of its
-; contributors may be used to endorse or promote products derived from
-; this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-; POSSIBILITY OF SUCH DAMAGE.
-;
-
-.686p
-.mmx
-.xmm
-.model flat, stdcall
-option casemap:none
-
-SYSDESC_T STRUCT
- __limit WORD ?
- __base DWORD ?
-SYSDESC_T ENDS
-
-.data
-;
-
-.code
-start:
-load_kernel_ldt PROC public USES EAX x:word
- xor eax, eax
- mov ax, x
- lldt ax
- ret
-load_kernel_ldt ENDP
-
-get_kernel_tr_selector PROC public
- xor eax, eax
- str ax
- ret
-get_kernel_tr_selector ENDP
-
-get_kernel_ldt PROC public
- xor eax, eax
- sldt ax
- ret
-get_kernel_ldt ENDP
-
-get_kernel_gdt PROC public USES ECX x:dword
- xor ecx, ecx
- mov ecx, x
- sgdt [ecx]
- ret
-get_kernel_gdt ENDP
-
-get_kernel_idt PROC public USES ECX x:dword
- xor ecx, ecx
- mov ecx, x
- sidt [ecx]
- ret
-get_kernel_idt ENDP
-
-set_kernel_gdt PROC public USES ECX x:dword
- xor ecx, ecx
- mov ecx, x
- lgdt fword ptr [ecx]
- ret
-set_kernel_gdt ENDP
-
-set_kernel_idt PROC public USES ECX x:dword
- xor ecx, ecx
- mov ecx, x
- lidt fword ptr [ecx]
- ret
-set_kernel_idt ENDP
-
-end
diff --git a/windows/i386/vmcs.asm b/windows/i386/vmcs.asm
deleted file mode 100644
index b612b91d..00000000
--- a/windows/i386/vmcs.asm
+++ /dev/null
@@ -1,213 +0,0 @@
-;
-; Copyright (c) 2011 Intel Corporation
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions are met:
-;
-; 1. Redistributions of source code must retain the above copyright notice,
-; this list of conditions and the following disclaimer.
-;
-; 2. Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in the
-; documentation and/or other materials provided with the distribution.
-;
-; 3. Neither the name of the copyright holder nor the names of its
-; contributors may be used to endorse or promote products derived from
-; this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-; POSSIBILITY OF SUCH DAMAGE.
-;
-
-.686p
-.mmx
-.xmm
-.model flat, stdcall
-option casemap:none
-
-QWORD_STRUCT STRUCT
- _low DWORD ?
- _high DWORD ?
-QWORD_STRUCT ENDS
-
-VCPU_STATE_32 STRUCT
- _eax DWORD ?
- _pad0 DWORD ?
- _ecx DWORD ?
- _pad1 DWORD ?
- _edx DWORD ?
- _pad2 DWORD ?
- _ebx DWORD ?
- _pad3 DWORD ?
- _esp DWORD ?
- _pad4 DWORD ?
- _ebp DWORD ?
- _pad5 DWORD ?
- _esi DWORD ?
- _pad6 DWORD ?
- _edi DWORD ?
- _pad7 DWORD ?
-VCPU_STATE_32 ENDS
-
-INVEPT_DESC_32 STRUCT
- eptp DWORD ?
- pad1 DWORD 0
- rsvd DWORD 0
- pad2 DWORD 0
-INVEPT_DESC_32 ENDS
-
-.data
-vmx_fail_mask word 41h
-;
-
-.code
-start:
-
-__vmxon PROC public x:qword
- xor eax, eax
- vmxon x
- pushf
- pop ax
- and ax, vmx_fail_mask
- ret
-__vmxon ENDP
-
-__vmxoff PROC public
- xor eax, eax
- vmxoff
- pushf
- pop ax
- and ax, vmx_fail_mask
- ret
-__vmxoff ENDP
-
-__vmclear PROC public x:qword
- xor eax, eax
- vmclear x
- pushf
- pop ax
- and ax, vmx_fail_mask
- ret
-__vmclear ENDP
-
-__vmptrld PROC public x:qword
- xor eax, eax
- vmptrld x
- pushf
- pop ax
- and ax, vmx_fail_mask
- ret
-__vmptrld ENDP
-
-asm_vmptrst PROC public USES EAX x:ptr qword
- xor eax, eax
- mov eax, x
- vmptrst qword ptr [eax]
- pushf
- pop ax
- ret
-asm_vmptrst ENDP
-
-ia32_asm_vmread PROC public USES EBX x:dword
- xor eax, eax
- xor ebx, ebx
- mov ebx, x
- vmread eax, ebx
- ret
-ia32_asm_vmread ENDP
-
-ia32_asm_vmwrite PROC public USES EAX EBX x:dword, y:dword
- xor eax, eax
- xor ebx, ebx
- mov eax, x
- mov ebx, y
- vmwrite eax, ebx
- ret
-ia32_asm_vmwrite ENDP
-
-__vmx_run PROC public x:ptr VCPU_STATE_32, y:word
- pushfd
- push ecx
- push edx
- push esi
- push edi
- push ebp
- push eax
- push ebx
- ; write host rsp
- mov ebx, 6c14h
- mov eax, esp
- sub eax, 4h
- vmwrite ebx, eax
- pop ebx
- pop eax
- push eax
- push ebx
- ; push the state
- mov eax, x
- mov dx, y
- push eax
- cmp dx, 1h
- mov ecx, [eax].VCPU_STATE_32._ecx
- mov edx, [eax].VCPU_STATE_32._edx
- mov ebx, [eax].VCPU_STATE_32._ebx
- mov ebp, [eax].VCPU_STATE_32._ebp
- mov esi, [eax].VCPU_STATE_32._esi
- mov edi, [eax].VCPU_STATE_32._edi
- mov eax, [eax].VCPU_STATE_32._eax
- je RESUME
- vmlaunch
- jmp EXIT_ENTRY_FAIL
- RESUME:
- vmresume
- jmp EXIT_ENTRY_FAIL
- EXIT_ENTRY::
- push edi
- mov edi, [esp+4]
- mov [edi].VCPU_STATE_32._eax, eax
- mov [edi].VCPU_STATE_32._ecx, ecx
- mov [edi].VCPU_STATE_32._edx, edx
- pop ecx
- mov [edi].VCPU_STATE_32._ebx, ebx
- mov [edi].VCPU_STATE_32._ebp, ebp
- mov [edi].VCPU_STATE_32._esi, esi
- mov [edi].VCPU_STATE_32._edi, ecx
- EXIT_ENTRY_FAIL:
- ; pop the state
- pop eax
- pop ebx
- pop eax
- pop ebp
- pop edi
- pop esi
- pop edx
- pop ecx
- pushfd
- pop eax
- popfd
- ret
-__vmx_run ENDP
-
-get_rip PROC public
- xor eax, eax
- lea eax, EXIT_ENTRY
- ret
-get_rip ENDP
-
-; Unimplemented
-__invept PROC PUBLIC x:dword, y:ptr INVEPT_DESC_32
- ; Just return an error
- or ax, vmx_fail_mask
- ret
-__invept ENDP
-
-end
diff --git a/windows/i386/wrapper.c b/windows/i386/wrapper.c
deleted file mode 100644
index 1af80b43..00000000
--- a/windows/i386/wrapper.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright (c) 2011 Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "..\hax_win.h"
-
-struct qword_val {
- uint32 low;
- uint32 high;
-};
-
-extern uint32 ia32_asm_vmread(component_index_t component);
-extern void _cdecl asm_enable_irq(void);
-extern void _cdecl asm_disable_irq(void);
-extern void ia32_asm_vmwrite(component_index_t component, uint32 val);
-
-extern uint64 asm_rdmsr(uint32 reg, struct qword_val *qv);
-extern void asm_wrmsr(uint32 reg, struct qword_val *qv);
-
-extern uint64 asm_rdtsc(struct qword_val *qv);
-
-extern void asm_fxinit(void);
-extern void asm_fxsave(mword *addr);
-extern void asm_fxrstor(mword *addr);
-
-extern void asm_btr(uint8 *addr, uint bit);
-extern void asm_bts(uint8 *addr, uint bit);
-
-extern void asm_vmptrst(paddr_t *addr);
-
-paddr_t __vmptrst(void)
-{
- paddr_t address = 0;
- asm_vmptrst(&address);
- return address;
-}
-
-uint64 ia32_rdmsr(uint32 reg)
-{
- struct qword_val val = {0};
- asm_rdmsr(reg, &val);
-
- return ((uint64)(val.low) | (uint64)(val.high) << 32);
-}
-
-void ia32_wrmsr(uint32 reg, uint64 val)
-{
- struct qword_val tmp = {0};
- uint64 old = val;
-
- tmp.high = (uint32)(val >> 32);
- tmp.low = (uint32)val;
-
- asm_wrmsr(reg, &tmp);
-}
-
-uint64 rdtsc(void)
-{
- struct qword_val val = {0};
- asm_rdtsc(&val);
- return ((uint64)(val.low) | (uint64)(val.high) << 32);
-}
-
-void fxinit(void)
-{
- asm_fxinit();
-}
-
-void fxsave(mword *addr)
-{
- asm_fxsave(addr);
-}
-
-void fxrstor(mword *addr)
-{
- asm_fxrstor(addr);
-}
-
-void btr(uint8 *addr, uint bit)
-{
- // asm_btr() may not be able to handle bit offsets greater than 0xff. For
- // absolute safety, ensure that the bit offset is less than 8.
- uint8 *base = addr + bit / 8;
- uint offset = bit % 8;
- asm_btr(base, offset);
-}
-
-void bts(uint8 *addr, uint bit)
-{
- uint8 *base = addr + bit / 8;
- uint offset = bit % 8;
- asm_bts(base, offset);
-}
-
-uint32 ia32_vmread(component_index_t component)
-{
- return (ia32_asm_vmread(component));
-}
-
-void ia32_vmwrite(component_index_t component, uint32 val)
-{
- ia32_asm_vmwrite(component, val);
-}
-
-void _vmx_vmwrite(struct vcpu_t *vcpu, const char *name,
- component_index_t component,
- mword source_val)
-{
- ia32_vmwrite(component, source_val);
-}
-
-void _vmx_vmwrite_64(struct vcpu_t *vcpu, const char *name,
- component_index_t component,
- uint64 source_val)
-{
- ia32_vmwrite(component, (uint32)source_val);
- ia32_vmwrite(component + 1, (uint32)(source_val >> 32));
-}
-
-void _vmx_vmwrite_natural(struct vcpu_t *vcpu, const char *name,
- component_index_t component,
- uint64 source_val)
-{
- ia32_vmwrite(component, (uint32)source_val);
-}
-
-uint64 vmx_vmread(struct vcpu_t *vcpu, component_index_t component)
-{
- uint64 val = 0;
-
- val = ia32_vmread(component);
- return val;
-}
-
-uint64 vmx_vmread_natural(struct vcpu_t *vcpu, component_index_t component)
-{
- uint64 val = 0;
-
- val = ia32_vmread(component);
- return val;
-}
-
-uint64 vmx_vmread_64(struct vcpu_t *vcpu, component_index_t component)
-{
- uint64 val = 0;
-
- val = ia32_vmread(component);
- val |= ((uint64)(ia32_vmread(component + 1)) << 32);
- return val;
-}
-
-void hax_enable_irq(void)
-{
- asm_enable_irq();
-}
-
-void hax_disable_irq(void)
-{
- asm_disable_irq();
-}
diff --git a/windows/sources b/windows/sources
index f1e175a8..60dab210 100644
--- a/windows/sources
+++ b/windows/sources
@@ -19,15 +19,5 @@ SOURCES= hax_entry.c \
version.rc \
hax_host_mem.c
-I386_SOURCES=i386\ia32.asm \
- i386\segments.asm \
- i386\vmcs.asm \
- i386\wrapper.c
-
-AMD64_SOURCES=amd64\ia32.asm \
- amd64\segments.asm \
- amd64\vmcs.asm \
- amd64\wrapper.c
-
NTTARGETFILE0 = hax_event_win.h
diff --git a/windows/sources.props b/windows/sources.props
index 605310c5..a9f06d4c 100644
--- a/windows/sources.props
+++ b/windows/sources.props
@@ -49,18 +49,6 @@
hax_wrapper.c
version.rc
-
- i386\ia32.asm
- i386\segments.asm
- i386\vmcs.asm
- i386\wrapper.c
-
-
- amd64\ia32.asm
- amd64\segments.asm
- amd64\vmcs.asm
- amd64\wrapper.c
-
hax_event_win.h