mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-05-01 13:14:02 +00:00
HV:treewide:Add exec_vmread32 and exec_vmwrite32 functions
In the hypervisor, VMCS fields include 16-bit fields, 32-bit fields, 64-bit fields and natural-width fields. In the current implement, no exec_vmread32/exec_vmwrite32 is for accessing 32-bit fields. So there are many type casting for the return value and parameters vmread/vmwrite operations. Create exec_vmread32 and exec_vmwrite32 functions to access 32-bit fields in VMCS; Update related variables type for vmread/vmwrite operations; Update related caller according to VMCS fields size. V1--V2: This is new part of this patch serial to only update 32 bit vmread/vmread opertions and related caller. V2-->V3: Update related variables type in data structure for exec_vmread32/exec_vmwrite32. Rename temp variable 'low' into 'value' for exec_vmread32; V3-->V4: Remove useless type conversion. Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
parent
65437960a9
commit
612cdceaca
@ -110,8 +110,8 @@ int vm_set_seg_desc(struct vcpu *vcpu, enum cpu_reg_name seg,
|
||||
}
|
||||
|
||||
exec_vmwrite(base, ret_desc->base);
|
||||
exec_vmwrite(limit, ret_desc->limit);
|
||||
exec_vmwrite(access, ret_desc->access);
|
||||
exec_vmwrite32(limit, ret_desc->limit);
|
||||
exec_vmwrite32(access, ret_desc->access);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -136,8 +136,8 @@ int vm_get_seg_desc(struct vcpu *vcpu, enum cpu_reg_name seg,
|
||||
}
|
||||
|
||||
desc->base = exec_vmread(base);
|
||||
desc->limit = (uint32_t)exec_vmread(limit);
|
||||
desc->access = (uint32_t)exec_vmread(access);
|
||||
desc->limit = exec_vmread32(limit);
|
||||
desc->access = exec_vmread32(access);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -351,7 +351,7 @@ int decode_instruction(struct vcpu *vcpu)
|
||||
return retval;
|
||||
}
|
||||
|
||||
csar = (uint32_t)exec_vmread(VMX_GUEST_CS_ATTR);
|
||||
csar = exec_vmread32(VMX_GUEST_CS_ATTR);
|
||||
get_guest_paging_info(vcpu, emul_ctxt, csar);
|
||||
cpu_mode = get_vcpu_mode(vcpu);
|
||||
|
||||
|
@ -205,24 +205,24 @@ int start_vcpu(struct vcpu *vcpu)
|
||||
|
||||
/* Save guest IA32_EFER register */
|
||||
cur_context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
|
||||
set_vcpu_mode(vcpu, exec_vmread(VMX_GUEST_CS_ATTR));
|
||||
set_vcpu_mode(vcpu, exec_vmread32(VMX_GUEST_CS_ATTR));
|
||||
|
||||
/* Obtain current VCPU instruction pointer and length */
|
||||
cur_context->rip = exec_vmread(VMX_GUEST_RIP);
|
||||
vcpu->arch_vcpu.inst_len = exec_vmread(VMX_EXIT_INSTR_LEN);
|
||||
vcpu->arch_vcpu.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
|
||||
|
||||
cur_context->rsp = exec_vmread(VMX_GUEST_RSP);
|
||||
cur_context->rflags = exec_vmread(VMX_GUEST_RFLAGS);
|
||||
|
||||
/* Obtain VM exit reason */
|
||||
vcpu->arch_vcpu.exit_reason = exec_vmread(VMX_EXIT_REASON);
|
||||
vcpu->arch_vcpu.exit_reason = exec_vmread32(VMX_EXIT_REASON);
|
||||
|
||||
if (status != 0) {
|
||||
/* refer to 64-ia32 spec section 24.9.1 volume#3 */
|
||||
if (vcpu->arch_vcpu.exit_reason & VMX_VMENTRY_FAIL)
|
||||
pr_fatal("vmentry fail reason=%lx", vcpu->arch_vcpu.exit_reason);
|
||||
else
|
||||
pr_fatal("vmexit fail err_inst=%lx", exec_vmread(VMX_INSTR_ERROR));
|
||||
pr_fatal("vmexit fail err_inst=%x", exec_vmread32(VMX_INSTR_ERROR));
|
||||
|
||||
ASSERT(status == 0, "vm fail");
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ int rdmsr_vmexit_handler(struct vcpu *vcpu)
|
||||
/* following MSR not emulated now just left for future */
|
||||
case MSR_IA32_SYSENTER_CS:
|
||||
{
|
||||
v = exec_vmread(VMX_GUEST_IA32_SYSENTER_CS);
|
||||
v = (uint64_t)exec_vmread32(VMX_GUEST_IA32_SYSENTER_CS);
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_SYSENTER_ESP:
|
||||
@ -331,7 +331,7 @@ int wrmsr_vmexit_handler(struct vcpu *vcpu)
|
||||
/* following MSR not emulated now just left for future */
|
||||
case MSR_IA32_SYSENTER_CS:
|
||||
{
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_CS, v);
|
||||
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, (uint32_t)v);
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_SYSENTER_ESP:
|
||||
|
@ -38,16 +38,16 @@ static struct key_info g_key_info = {
|
||||
{ \
|
||||
seg.selector = exec_vmread16(VMX_GUEST_##SEG_NAME##_SEL); \
|
||||
seg.base = exec_vmread(VMX_GUEST_##SEG_NAME##_BASE); \
|
||||
seg.limit = exec_vmread(VMX_GUEST_##SEG_NAME##_LIMIT); \
|
||||
seg.attr = exec_vmread(VMX_GUEST_##SEG_NAME##_ATTR); \
|
||||
seg.limit = exec_vmread32(VMX_GUEST_##SEG_NAME##_LIMIT); \
|
||||
seg.attr = exec_vmread32(VMX_GUEST_##SEG_NAME##_ATTR); \
|
||||
}
|
||||
|
||||
#define load_segment(seg, SEG_NAME) \
|
||||
{ \
|
||||
exec_vmwrite16(VMX_GUEST_##SEG_NAME##_SEL, seg.selector); \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_BASE, seg.base); \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_LIMIT, seg.limit); \
|
||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \
|
||||
exec_vmwrite32(VMX_GUEST_##SEG_NAME##_LIMIT, seg.limit); \
|
||||
exec_vmwrite32(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \
|
||||
}
|
||||
|
||||
#ifndef WORKAROUND_FOR_TRUSTY_4G_MEM
|
||||
@ -234,9 +234,9 @@ static void save_world_ctx(struct run_context *context)
|
||||
*/
|
||||
context->vmx_ia32_pat = exec_vmread(VMX_GUEST_IA32_PAT_FULL);
|
||||
context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
|
||||
context->ia32_sysenter_cs = exec_vmread(VMX_GUEST_IA32_SYSENTER_CS);
|
||||
context->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
|
||||
context->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
|
||||
context->ia32_sysenter_cs = exec_vmread32(VMX_GUEST_IA32_SYSENTER_CS);
|
||||
save_segment(context->cs, CS);
|
||||
save_segment(context->ss, SS);
|
||||
save_segment(context->ds, DS);
|
||||
@ -247,9 +247,9 @@ static void save_world_ctx(struct run_context *context)
|
||||
save_segment(context->ldtr, LDTR);
|
||||
/* Only base and limit for IDTR and GDTR */
|
||||
context->idtr.base = exec_vmread(VMX_GUEST_IDTR_BASE);
|
||||
context->idtr.limit = exec_vmread(VMX_GUEST_IDTR_LIMIT);
|
||||
context->gdtr.base = exec_vmread(VMX_GUEST_GDTR_BASE);
|
||||
context->gdtr.limit = exec_vmread(VMX_GUEST_GDTR_LIMIT);
|
||||
context->idtr.limit = exec_vmread32(VMX_GUEST_IDTR_LIMIT);
|
||||
context->gdtr.limit = exec_vmread32(VMX_GUEST_GDTR_LIMIT);
|
||||
|
||||
/* MSRs which not in the VMCS */
|
||||
context->ia32_star = msr_read(MSR_IA32_STAR);
|
||||
@ -280,7 +280,7 @@ static void load_world_ctx(struct run_context *context)
|
||||
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, context->ia32_debugctl);
|
||||
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, context->vmx_ia32_pat);
|
||||
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_CS, context->ia32_sysenter_cs);
|
||||
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, context->ia32_sysenter_cs);
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, context->ia32_sysenter_esp);
|
||||
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, context->ia32_sysenter_eip);
|
||||
load_segment(context->cs, CS);
|
||||
@ -293,9 +293,9 @@ static void load_world_ctx(struct run_context *context)
|
||||
load_segment(context->ldtr, LDTR);
|
||||
/* Only base and limit for IDTR and GDTR */
|
||||
exec_vmwrite(VMX_GUEST_IDTR_BASE, context->idtr.base);
|
||||
exec_vmwrite(VMX_GUEST_IDTR_LIMIT, context->idtr.limit);
|
||||
exec_vmwrite(VMX_GUEST_GDTR_BASE, context->gdtr.base);
|
||||
exec_vmwrite(VMX_GUEST_GDTR_LIMIT, context->gdtr.limit);
|
||||
exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, context->idtr.limit);
|
||||
exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, context->gdtr.limit);
|
||||
|
||||
/* MSRs which not in the VMCS */
|
||||
msr_write(MSR_IA32_STAR, context->ia32_star);
|
||||
|
@ -63,7 +63,7 @@ static int is_guest_irq_enabled(struct vcpu *vcpu)
|
||||
if ((guest_rflags & HV_ARCH_VCPU_RFLAGS_IF) != 0UL) {
|
||||
/* Interrupts are allowed */
|
||||
/* Check for temporarily disabled interrupts */
|
||||
guest_state = exec_vmread(VMX_GUEST_INTERRUPTIBILITY_INFO);
|
||||
guest_state = exec_vmread32(VMX_GUEST_INTERRUPTIBILITY_INFO);
|
||||
|
||||
if ((guest_state & (HV_ARCH_VCPU_BLOCKED_BY_STI |
|
||||
HV_ARCH_VCPU_BLOCKED_BY_MOVSS)) == 0UL) {
|
||||
@ -139,7 +139,7 @@ static int vcpu_do_pending_event(struct vcpu *vcpu)
|
||||
return -1;
|
||||
}
|
||||
|
||||
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
|
||||
(vector & 0xFFU));
|
||||
|
||||
vlapic_intr_accepted(vlapic, vector);
|
||||
@ -163,7 +163,7 @@ static int vcpu_do_pending_extint(struct vcpu *vcpu)
|
||||
if (vector <= NR_MAX_VECTOR) {
|
||||
dev_dbg(ACRN_DBG_INTR, "VPIC: to inject PIC vector %d\n",
|
||||
vector & 0xFFU);
|
||||
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD,
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
|
||||
VMX_INT_INFO_VALID |
|
||||
(vector & 0xFFU));
|
||||
vpic_intr_accepted(vcpu->vm, vector);
|
||||
@ -245,12 +245,12 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
|
||||
static void _vcpu_inject_exception(struct vcpu *vcpu, uint32_t vector)
|
||||
{
|
||||
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
|
||||
exec_vmwrite(VMX_ENTRY_EXCEPTION_ERROR_CODE,
|
||||
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
|
||||
vcpu->arch_vcpu.exception_info.error);
|
||||
}
|
||||
|
||||
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
|
||||
(exception_type[vector] << 8) | (vector & 0xFFU));
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
|
||||
(exception_type[vector] << 8U) | (vector & 0xFFU));
|
||||
|
||||
vcpu->arch_vcpu.exception_info.exception = VECTOR_INVALID;
|
||||
}
|
||||
@ -324,9 +324,9 @@ int interrupt_window_vmexit_handler(struct vcpu *vcpu)
|
||||
* Disable the interrupt window exiting
|
||||
*/
|
||||
vcpu->arch_vcpu.irq_window_enabled = 0U;
|
||||
value32 = exec_vmread(VMX_PROC_VM_EXEC_CONTROLS);
|
||||
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS);
|
||||
value32 &= ~(VMX_PROCBASED_CTLS_IRQ_WIN);
|
||||
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS, value32);
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32);
|
||||
}
|
||||
|
||||
vcpu_retain_rip(vcpu);
|
||||
@ -338,7 +338,7 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu)
|
||||
uint32_t intr_info;
|
||||
struct intr_excp_ctx ctx;
|
||||
|
||||
intr_info = exec_vmread(VMX_EXIT_INT_INFO);
|
||||
intr_info = exec_vmread32(VMX_EXIT_INT_INFO);
|
||||
if (((intr_info & VMX_INT_INFO_VALID) == 0U) ||
|
||||
(((intr_info & VMX_INT_TYPE_MASK) >> 8)
|
||||
!= VMX_INT_TYPE_EXT_INT)) {
|
||||
@ -383,10 +383,10 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
|
||||
if (vcpu->arch_vcpu.inject_event_pending) {
|
||||
if ((vcpu->arch_vcpu.inject_info.intr_info &
|
||||
(EXCEPTION_ERROR_CODE_VALID << 8)) != 0U)
|
||||
exec_vmwrite(VMX_ENTRY_EXCEPTION_ERROR_CODE,
|
||||
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
|
||||
vcpu->arch_vcpu.inject_info.error_code);
|
||||
|
||||
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD,
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
|
||||
vcpu->arch_vcpu.inject_info.intr_info);
|
||||
|
||||
vcpu->arch_vcpu.inject_event_pending = false;
|
||||
@ -401,8 +401,8 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
|
||||
/* inject NMI before maskable hardware interrupt */
|
||||
if (bitmap_test_and_clear(ACRN_REQUEST_NMI, pending_req_bits)) {
|
||||
/* Inject NMI vector = 2 */
|
||||
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD,
|
||||
VMX_INT_INFO_VALID | (VMX_INT_TYPE_NMI << 8) | IDT_NMI);
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
|
||||
VMX_INT_INFO_VALID | (VMX_INT_TYPE_NMI << 8U) | IDT_NMI);
|
||||
|
||||
goto INTR_WIN;
|
||||
}
|
||||
@ -415,7 +415,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
|
||||
* at next vm exit?
|
||||
*/
|
||||
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
|
||||
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD,
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
|
||||
vcpu->arch_vcpu.idt_vectoring_info);
|
||||
goto INTR_WIN;
|
||||
}
|
||||
@ -450,9 +450,9 @@ INTR_WIN:
|
||||
/* Enable interrupt window exiting if pending */
|
||||
if (intr_pending && vcpu->arch_vcpu.irq_window_enabled == 0U) {
|
||||
vcpu->arch_vcpu.irq_window_enabled = 1U;
|
||||
tmp = exec_vmread(VMX_PROC_VM_EXEC_CONTROLS);
|
||||
tmp = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS);
|
||||
tmp |= (VMX_PROCBASED_CTLS_IRQ_WIN);
|
||||
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS, tmp);
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, tmp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -462,7 +462,7 @@ void cancel_event_injection(struct vcpu *vcpu)
|
||||
{
|
||||
uint32_t intinfo;
|
||||
|
||||
intinfo = exec_vmread(VMX_ENTRY_INT_INFO_FIELD);
|
||||
intinfo = exec_vmread32(VMX_ENTRY_INT_INFO_FIELD);
|
||||
|
||||
/*
|
||||
* If event is injected, we clear VMX_ENTRY_INT_INFO_FIELD,
|
||||
@ -475,10 +475,10 @@ void cancel_event_injection(struct vcpu *vcpu)
|
||||
|
||||
if ((intinfo & (EXCEPTION_ERROR_CODE_VALID << 8)) != 0U)
|
||||
vcpu->arch_vcpu.inject_info.error_code =
|
||||
exec_vmread(VMX_ENTRY_EXCEPTION_ERROR_CODE);
|
||||
exec_vmread32(VMX_ENTRY_EXCEPTION_ERROR_CODE);
|
||||
|
||||
vcpu->arch_vcpu.inject_info.intr_info = intinfo;
|
||||
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD, 0UL);
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0UL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -500,7 +500,7 @@ int exception_vmexit_handler(struct vcpu *vcpu)
|
||||
pr_dbg(" Handling guest exception");
|
||||
|
||||
/* Obtain VM-Exit information field pg 2912 */
|
||||
intinfo = exec_vmread(VMX_EXIT_INT_INFO);
|
||||
intinfo = exec_vmread32(VMX_EXIT_INT_INFO);
|
||||
if ((intinfo & VMX_INT_INFO_VALID) != 0U) {
|
||||
exception_vector = intinfo & 0xFFU;
|
||||
/* Check if exception caused by the guest is a HW exception.
|
||||
@ -508,13 +508,13 @@ int exception_vmexit_handler(struct vcpu *vcpu)
|
||||
* error code to be conveyed to get via the stack
|
||||
*/
|
||||
if ((intinfo & VMX_INT_INFO_ERR_CODE_VALID) != 0U) {
|
||||
int_err_code = exec_vmread(VMX_EXIT_INT_ERROR_CODE);
|
||||
int_err_code = exec_vmread32(VMX_EXIT_INT_ERROR_CODE);
|
||||
|
||||
/* get current privilege level and fault address */
|
||||
cpl = exec_vmread(VMX_GUEST_CS_ATTR);
|
||||
cpl = (cpl >> 5) & 3U;
|
||||
cpl = exec_vmread32(VMX_GUEST_CS_ATTR);
|
||||
cpl = (cpl >> 5U) & 3U;
|
||||
|
||||
if (cpl < 3)
|
||||
if (cpl < 3U)
|
||||
int_err_code &= ~4U;
|
||||
else
|
||||
int_err_code |= 4U;
|
||||
|
@ -141,7 +141,7 @@ int vmexit_handler(struct vcpu *vcpu)
|
||||
|
||||
/* Obtain interrupt info */
|
||||
vcpu->arch_vcpu.idt_vectoring_info =
|
||||
exec_vmread(VMX_IDT_VEC_INFO_FIELD);
|
||||
exec_vmread32(VMX_IDT_VEC_INFO_FIELD);
|
||||
/* Filter out HW exception & NMI */
|
||||
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
|
||||
uint32_t vector_info = vcpu->arch_vcpu.idt_vectoring_info;
|
||||
@ -151,7 +151,7 @@ int vmexit_handler(struct vcpu *vcpu)
|
||||
|
||||
if (type == VMX_INT_TYPE_HW_EXP) {
|
||||
if ((vector_info & VMX_INT_INFO_ERR_CODE_VALID) != 0U)
|
||||
err_code = exec_vmread(VMX_IDT_VEC_ERROR_CODE);
|
||||
err_code = exec_vmread32(VMX_IDT_VEC_ERROR_CODE);
|
||||
vcpu_queue_exception(vcpu, vector, err_code);
|
||||
vcpu->arch_vcpu.idt_vectoring_info = 0U;
|
||||
} else if (type == VMX_INT_TYPE_NMI) {
|
||||
|
@ -220,6 +220,15 @@ uint64_t exec_vmread64(uint32_t field_full)
|
||||
return low;
|
||||
}
|
||||
|
||||
uint32_t exec_vmread32(uint32_t field)
|
||||
{
|
||||
uint64_t value;
|
||||
|
||||
value = exec_vmread64(field);
|
||||
|
||||
return (uint32_t)value;
|
||||
}
|
||||
|
||||
uint16_t exec_vmread16(uint32_t field)
|
||||
{
|
||||
uint64_t value;
|
||||
@ -250,6 +259,11 @@ void exec_vmwrite64(unsigned int field_full, uint64_t value)
|
||||
#endif
|
||||
}
|
||||
|
||||
void exec_vmwrite32(uint32_t field, uint32_t value)
|
||||
{
|
||||
exec_vmwrite64(field, (uint64_t)value);
|
||||
}
|
||||
|
||||
void exec_vmwrite16(uint32_t field, uint16_t value)
|
||||
{
|
||||
exec_vmwrite64(field, (uint64_t)value);
|
||||
@ -402,9 +416,9 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
||||
}
|
||||
/* Enable long mode */
|
||||
pr_dbg("VMM: Enable long mode");
|
||||
entry_ctrls = exec_vmread(VMX_ENTRY_CONTROLS);
|
||||
entry_ctrls = exec_vmread32(VMX_ENTRY_CONTROLS);
|
||||
entry_ctrls |= VMX_ENTRY_CTLS_IA32E_MODE;
|
||||
exec_vmwrite(VMX_ENTRY_CONTROLS, entry_ctrls);
|
||||
exec_vmwrite32(VMX_ENTRY_CONTROLS, entry_ctrls);
|
||||
|
||||
context->ia32_efer |= MSR_IA32_EFER_LMA_BIT;
|
||||
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
|
||||
@ -412,9 +426,9 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
||||
paging_enabled && ((cr0 & CR0_PG) == 0U)){
|
||||
/* Disable long mode */
|
||||
pr_dbg("VMM: Disable long mode");
|
||||
entry_ctrls = exec_vmread(VMX_ENTRY_CONTROLS);
|
||||
entry_ctrls = exec_vmread32(VMX_ENTRY_CONTROLS);
|
||||
entry_ctrls &= ~VMX_ENTRY_CTLS_IA32E_MODE;
|
||||
exec_vmwrite(VMX_ENTRY_CONTROLS, entry_ctrls);
|
||||
exec_vmwrite32(VMX_ENTRY_CONTROLS, entry_ctrls);
|
||||
|
||||
context->ia32_efer &= ~MSR_IA32_EFER_LMA_BIT;
|
||||
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
|
||||
@ -666,12 +680,12 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
|
||||
/* Limit */
|
||||
field = VMX_GUEST_CS_LIMIT;
|
||||
exec_vmwrite(field, limit);
|
||||
exec_vmwrite32(field, limit);
|
||||
pr_dbg("VMX_GUEST_CS_LIMIT: 0x%x ", limit);
|
||||
|
||||
/* Access */
|
||||
field = VMX_GUEST_CS_ATTR;
|
||||
exec_vmwrite(field, access);
|
||||
exec_vmwrite32(field, access);
|
||||
pr_dbg("VMX_GUEST_CS_ATTR: 0x%x ", access);
|
||||
|
||||
/* Base */
|
||||
@ -750,7 +764,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
|
||||
/* GDTR Limit */
|
||||
field = VMX_GUEST_GDTR_LIMIT;
|
||||
exec_vmwrite(field, limit);
|
||||
exec_vmwrite32(field, limit);
|
||||
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%x ", limit);
|
||||
|
||||
/* IDTR - Interrupt Descriptor Table */
|
||||
@ -784,7 +798,7 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
|
||||
/* IDTR Limit */
|
||||
field = VMX_GUEST_IDTR_LIMIT;
|
||||
exec_vmwrite(field, limit);
|
||||
exec_vmwrite32(field, limit);
|
||||
pr_dbg("VMX_GUEST_IDTR_LIMIT: 0x%x ", limit);
|
||||
|
||||
/***************************************************/
|
||||
@ -848,19 +862,19 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
|
||||
/* Limit */
|
||||
field = VMX_GUEST_ES_LIMIT;
|
||||
exec_vmwrite(field, limit);
|
||||
exec_vmwrite32(field, limit);
|
||||
pr_dbg("VMX_GUEST_ES_LIMIT: 0x%x ", limit);
|
||||
field = VMX_GUEST_SS_LIMIT;
|
||||
exec_vmwrite(field, limit);
|
||||
exec_vmwrite32(field, limit);
|
||||
pr_dbg("VMX_GUEST_SS_LIMIT: 0x%x ", limit);
|
||||
field = VMX_GUEST_DS_LIMIT;
|
||||
exec_vmwrite(field, limit);
|
||||
exec_vmwrite32(field, limit);
|
||||
pr_dbg("VMX_GUEST_DS_LIMIT: 0x%x ", limit);
|
||||
field = VMX_GUEST_FS_LIMIT;
|
||||
exec_vmwrite(field, limit);
|
||||
exec_vmwrite32(field, limit);
|
||||
pr_dbg("VMX_GUEST_FS_LIMIT: 0x%x ", limit);
|
||||
field = VMX_GUEST_GS_LIMIT;
|
||||
exec_vmwrite(field, limit);
|
||||
exec_vmwrite32(field, limit);
|
||||
pr_dbg("VMX_GUEST_GS_LIMIT: 0x%x ", limit);
|
||||
|
||||
/* Access */
|
||||
@ -872,19 +886,19 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
}
|
||||
|
||||
field = VMX_GUEST_ES_ATTR;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_ES_ATTR: 0x%x ", value32);
|
||||
field = VMX_GUEST_SS_ATTR;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_SS_ATTR: 0x%x ", value32);
|
||||
field = VMX_GUEST_DS_ATTR;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_DS_ATTR: 0x%x ", value32);
|
||||
field = VMX_GUEST_FS_ATTR;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_FS_ATTR: 0x%x ", value32);
|
||||
field = VMX_GUEST_GS_ATTR;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_GS_ATTR: 0x%x ", value32);
|
||||
|
||||
/* Base */
|
||||
@ -920,12 +934,12 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
|
||||
field = VMX_GUEST_LDTR_LIMIT;
|
||||
value32 = 0xffffffffU;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_LDTR_LIMIT: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_LDTR_ATTR;
|
||||
value32 = 0x10000U;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_LDTR_ATTR: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_LDTR_BASE;
|
||||
@ -941,12 +955,12 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
|
||||
field = VMX_GUEST_TR_LIMIT;
|
||||
value32 = 0xffU;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_TR_LIMIT: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_TR_ATTR;
|
||||
value32 = 0x8bU;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_TR_ATTR: 0x%x ", value32);
|
||||
|
||||
field = VMX_GUEST_TR_BASE;
|
||||
@ -956,24 +970,24 @@ static void init_guest_state(struct vcpu *vcpu)
|
||||
|
||||
field = VMX_GUEST_INTERRUPTIBILITY_INFO;
|
||||
value32 = 0U;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_INTERRUPTIBILITY_INFO: 0x%x ",
|
||||
value32);
|
||||
|
||||
field = VMX_GUEST_ACTIVITY_STATE;
|
||||
value32 = 0U;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_ACTIVITY_STATE: 0x%x ",
|
||||
value32);
|
||||
|
||||
field = VMX_GUEST_SMBASE;
|
||||
value32 = 0U;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_SMBASE: 0x%x ", value32);
|
||||
|
||||
value32 = msr_read(MSR_IA32_SYSENTER_CS) & 0xFFFFFFFFU;
|
||||
field = VMX_GUEST_IA32_SYSENTER_CS;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_GUEST_IA32_SYSENTER_CS: 0x%x ",
|
||||
value32);
|
||||
|
||||
@ -1130,7 +1144,7 @@ static void init_host_state(__unused struct vcpu *vcpu)
|
||||
|
||||
value32 = msr_read(MSR_IA32_SYSENTER_CS) & 0xFFFFFFFFU;
|
||||
field = VMX_HOST_IA32_SYSENTER_CS;
|
||||
exec_vmwrite(field, value32);
|
||||
exec_vmwrite32(field, value32);
|
||||
pr_dbg("VMX_HOST_IA32_SYSENTER_CS: 0x%x ",
|
||||
value32);
|
||||
|
||||
@ -1223,7 +1237,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
/* enable external interrupt VM Exit */
|
||||
value32 |= VMX_PINBASED_CTLS_IRQ_EXIT;
|
||||
|
||||
exec_vmwrite(VMX_PIN_VM_EXEC_CONTROLS, value32);
|
||||
exec_vmwrite32(VMX_PIN_VM_EXEC_CONTROLS, value32);
|
||||
pr_dbg("VMX_PIN_VM_EXEC_CONTROLS: 0x%x ", value32);
|
||||
|
||||
/* Set up primary processor based VM execution controls - pg 2900
|
||||
@ -1262,7 +1276,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
VMX_PROCBASED_CTLS_CR8_STORE);
|
||||
}
|
||||
|
||||
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS, value32);
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32);
|
||||
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS: 0x%x ", value32);
|
||||
|
||||
/* Set up secondary processor based VM execution controls - pg 2901
|
||||
@ -1299,7 +1313,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
* Set up TPR threshold for virtual interrupt delivery
|
||||
* - pg 2904 24.6.8
|
||||
*/
|
||||
exec_vmwrite(VMX_TPR_THRESHOLD, 0);
|
||||
exec_vmwrite32(VMX_TPR_THRESHOLD, 0U);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1308,7 +1322,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
value32 |= VMX_PROCBASED_CTLS2_XSVE_XRSTR;
|
||||
}
|
||||
|
||||
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS2, value32);
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
|
||||
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS2: 0x%x ", value32);
|
||||
|
||||
if (is_vapic_supported()) {
|
||||
@ -1355,26 +1369,26 @@ static void init_exec_ctrl(struct vcpu *vcpu)
|
||||
* enable VM exit on MC only
|
||||
*/
|
||||
value32 = (1U << IDT_MC);
|
||||
exec_vmwrite(VMX_EXCEPTION_BITMAP, value32);
|
||||
exec_vmwrite32(VMX_EXCEPTION_BITMAP, value32);
|
||||
|
||||
/* Set up page fault error code mask - second paragraph * pg 2902
|
||||
* 24.6.3 - guest page fault exception causing * vmexit is governed by
|
||||
* both VMX_EXCEPTION_BITMAP and * VMX_PF_ERROR_CODE_MASK
|
||||
*/
|
||||
exec_vmwrite(VMX_PF_ERROR_CODE_MASK, 0);
|
||||
exec_vmwrite32(VMX_PF_ERROR_CODE_MASK, 0U);
|
||||
|
||||
/* Set up page fault error code match - second paragraph * pg 2902
|
||||
* 24.6.3 - guest page fault exception causing * vmexit is governed by
|
||||
* both VMX_EXCEPTION_BITMAP and * VMX_PF_ERROR_CODE_MATCH
|
||||
*/
|
||||
exec_vmwrite(VMX_PF_ERROR_CODE_MATCH, 0);
|
||||
exec_vmwrite32(VMX_PF_ERROR_CODE_MATCH, 0U);
|
||||
|
||||
/* Set up CR3 target count - An execution of mov to CR3 * by guest
|
||||
* causes HW to evaluate operand match with * one of N CR3-Target Value
|
||||
* registers. The CR3 target * count values tells the number of
|
||||
* target-value regs to evaluate
|
||||
*/
|
||||
exec_vmwrite(VMX_CR3_TARGET_COUNT, 0);
|
||||
exec_vmwrite32(VMX_CR3_TARGET_COUNT, 0U);
|
||||
|
||||
/* Set up IO bitmap register A and B - pg 2902 24.6.4 */
|
||||
value64 = HVA2HPA(vm->arch_vm.iobitmap[0]);
|
||||
@ -1432,23 +1446,23 @@ static void init_entry_ctrl(__unused struct vcpu *vcpu)
|
||||
value32 |= (VMX_ENTRY_CTLS_LOAD_EFER |
|
||||
VMX_ENTRY_CTLS_LOAD_PAT);
|
||||
|
||||
exec_vmwrite(VMX_ENTRY_CONTROLS, value32);
|
||||
exec_vmwrite32(VMX_ENTRY_CONTROLS, value32);
|
||||
pr_dbg("VMX_ENTRY_CONTROLS: 0x%x ", value32);
|
||||
|
||||
/* Set up VMX entry MSR load count - pg 2908 24.8.2 Tells the number of
|
||||
* MSRs on load from memory on VM entry from mem address provided by
|
||||
* VM-entry MSR load address field
|
||||
*/
|
||||
exec_vmwrite(VMX_ENTRY_MSR_LOAD_COUNT, 0);
|
||||
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, 0U);
|
||||
|
||||
/* Set up VM entry interrupt information field pg 2909 24.8.3 */
|
||||
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD, 0);
|
||||
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0U);
|
||||
|
||||
/* Set up VM entry exception error code - pg 2910 24.8.3 */
|
||||
exec_vmwrite(VMX_ENTRY_EXCEPTION_ERROR_CODE, 0);
|
||||
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE, 0U);
|
||||
|
||||
/* Set up VM entry instruction length - pg 2910 24.8.3 */
|
||||
exec_vmwrite(VMX_ENTRY_INSTR_LENGTH, 0);
|
||||
exec_vmwrite32(VMX_ENTRY_INSTR_LENGTH, 0U);
|
||||
}
|
||||
|
||||
static void init_exit_ctrl(__unused struct vcpu *vcpu)
|
||||
@ -1475,7 +1489,7 @@ static void init_exit_ctrl(__unused struct vcpu *vcpu)
|
||||
VMX_EXIT_CTLS_SAVE_EFER |
|
||||
VMX_EXIT_CTLS_HOST_ADDR64);
|
||||
|
||||
exec_vmwrite(VMX_EXIT_CONTROLS, value32);
|
||||
exec_vmwrite32(VMX_EXIT_CONTROLS, value32);
|
||||
pr_dbg("VMX_EXIT_CONTROL: 0x%x ", value32);
|
||||
|
||||
/* Set up VM exit MSR store and load counts pg 2908 24.7.2 - tells the
|
||||
@ -1483,8 +1497,8 @@ static void init_exit_ctrl(__unused struct vcpu *vcpu)
|
||||
* The 64 bit VM-exit MSR store and load address fields provide the
|
||||
* corresponding addresses
|
||||
*/
|
||||
exec_vmwrite(VMX_EXIT_MSR_STORE_COUNT, 0);
|
||||
exec_vmwrite(VMX_EXIT_MSR_LOAD_COUNT, 0);
|
||||
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, 0U);
|
||||
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, 0U);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
@ -1510,7 +1524,7 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
|
||||
|
||||
/* Access */
|
||||
field = VMX_GUEST_CS_ATTR;
|
||||
exec_vmwrite(field, efi_ctx->cs_ar);
|
||||
exec_vmwrite32(field, efi_ctx->cs_ar);
|
||||
pr_dbg("VMX_GUEST_CS_ATTR: 0x%x ", efi_ctx->cs_ar);
|
||||
|
||||
field = VMX_GUEST_ES_SEL;
|
||||
@ -1557,7 +1571,7 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
|
||||
|
||||
/* GDTR Limit */
|
||||
field = VMX_GUEST_GDTR_LIMIT;
|
||||
exec_vmwrite(field, efi_ctx->gdt.limit);
|
||||
exec_vmwrite32(field, efi_ctx->gdt.limit);
|
||||
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%x ", efi_ctx->gdt.limit);
|
||||
|
||||
/* IDTR Base */
|
||||
@ -1567,7 +1581,7 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
|
||||
|
||||
/* IDTR Limit */
|
||||
field = VMX_GUEST_IDTR_LIMIT;
|
||||
exec_vmwrite(field, efi_ctx->idt.limit);
|
||||
exec_vmwrite32(field, efi_ctx->idt.limit);
|
||||
pr_dbg("VMX_GUEST_IDTR_LIMIT: 0x%x ", efi_ctx->idt.limit);
|
||||
}
|
||||
|
||||
|
@ -113,10 +113,10 @@ struct cpu_regs {
|
||||
};
|
||||
|
||||
struct segment {
|
||||
uint64_t selector;
|
||||
uint16_t selector;
|
||||
uint64_t base;
|
||||
uint64_t limit;
|
||||
uint64_t attr;
|
||||
uint32_t limit;
|
||||
uint32_t attr;
|
||||
};
|
||||
|
||||
struct run_context {
|
||||
@ -159,7 +159,7 @@ struct run_context {
|
||||
uint64_t ia32_pat;
|
||||
uint64_t vmx_ia32_pat;
|
||||
uint64_t ia32_efer;
|
||||
uint64_t ia32_sysenter_cs;
|
||||
uint32_t ia32_sysenter_cs;
|
||||
uint64_t ia32_sysenter_esp;
|
||||
uint64_t ia32_sysenter_eip;
|
||||
uint64_t ia32_debugctl;
|
||||
|
@ -415,9 +415,11 @@ int exec_vmxon_instr(uint16_t pcpu_id);
|
||||
uint64_t exec_vmread(uint32_t field);
|
||||
|
||||
uint16_t exec_vmread16(uint32_t field);
|
||||
uint32_t exec_vmread32(uint32_t field);
|
||||
uint64_t exec_vmread64(uint32_t field_full);
|
||||
void exec_vmwrite(uint32_t field, uint64_t value);
|
||||
void exec_vmwrite16(uint32_t field, uint16_t value);
|
||||
void exec_vmwrite32(uint32_t field, uint32_t value);
|
||||
void exec_vmwrite64(uint32_t field_full, uint64_t value);
|
||||
int init_vmcs(struct vcpu *vcpu);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user