diff --git a/hypervisor/arch/x86/guest/guest.c b/hypervisor/arch/x86/guest/guest.c index 8940ceef2..894469a62 100644 --- a/hypervisor/arch/x86/guest/guest.c +++ b/hypervisor/arch/x86/guest/guest.c @@ -286,7 +286,7 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa, pw_info.level = pm; pw_info.is_write_access = ((*err_code & PAGE_FAULT_WR_FLAG) != 0U); pw_info.is_inst_fetch = ((*err_code & PAGE_FAULT_ID_FLAG) != 0U); - pw_info.is_user_mode = ((exec_vmread(VMX_GUEST_CS_SEL) & 0x3UL) == 3UL); + pw_info.is_user_mode = ((exec_vmread16(VMX_GUEST_CS_SEL) & 0x3U) == 3U); pw_info.pse = true; pw_info.nxe = ((cur_context->ia32_efer & MSR_IA32_EFER_NXE_BIT) != 0UL); diff --git a/hypervisor/arch/x86/guest/instr_emul_wrapper.c b/hypervisor/arch/x86/guest/instr_emul_wrapper.c index 889e189bd..4f835c527 100644 --- a/hypervisor/arch/x86/guest/instr_emul_wrapper.c +++ b/hypervisor/arch/x86/guest/instr_emul_wrapper.c @@ -44,7 +44,11 @@ int vm_get_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t *retval) uint32_t field = get_vmcs_field(reg); if (field != VMX_INVALID_VMCS_FIELD) { - *retval = exec_vmread(field); + if (reg < CPU_REG_64BIT_LAST) { + *retval = exec_vmread(field); + } else { + *retval = (uint64_t)exec_vmread16(field); + } } else { return -EINVAL; } @@ -73,7 +77,11 @@ int vm_set_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t val) uint32_t field = get_vmcs_field(reg); if (field != VMX_INVALID_VMCS_FIELD) { - exec_vmwrite(field, val); + if (reg < CPU_REG_64BIT_LAST) { + exec_vmwrite(field, val); + } else { + exec_vmwrite16(field, (uint16_t)val); + } } else { return -EINVAL; } @@ -223,7 +231,21 @@ encode_vmcs_seg_desc(enum cpu_reg_name seg, return 0; } - +/** + * + *Description: + *This local function is to covert register names into + *the corresponding field index MACROs in VMCS. + * + *Post Condition: + *In the non-general register names group (CPU_REG_CR0~CPU_REG_LAST), + *for register names CPU_REG_CR2, CPU_REG_IDTR, CPU_REG_GDTR, + *CPU_REG_NATURAL_LAST, CPU_REG_64BIT_LAST and CPU_REG_LAST, + *this function returns VMX_INVALID_VMCS_FIELD; + *for other register names, it returns correspoding field index MACROs + *in VMCS. + * + **/ static uint32_t get_vmcs_field(enum cpu_reg_name ident) { switch (ident) { diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index a1652986e..3ca8fe782 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -158,7 +158,7 @@ int start_vcpu(struct vcpu *vcpu) vcpu->vm->attr.id, vcpu->vcpu_id); if (vcpu->arch_vcpu.vpid) - exec_vmwrite(VMX_VPID, vcpu->arch_vcpu.vpid); + exec_vmwrite16(VMX_VPID, vcpu->arch_vcpu.vpid); /* * A power-up or a reset invalidates all linear mappings, diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index 92474c120..77fd4b0d1 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -2340,13 +2340,12 @@ apicv_inject_pir(struct vlapic *vlapic) if (pirval != 0UL) { rvi = pirbase + fls64(pirval); - intr_status_old = (uint16_t) - (0xFFFFUL & - exec_vmread(VMX_GUEST_INTR_STATUS)); + intr_status_old = 0xFFFFU & + exec_vmread16(VMX_GUEST_INTR_STATUS); intr_status_new = (intr_status_old & 0xFF00U) | rvi; if (intr_status_new > intr_status_old) { - exec_vmwrite(VMX_GUEST_INTR_STATUS, + exec_vmwrite16(VMX_GUEST_INTR_STATUS, intr_status_new); } } diff --git a/hypervisor/arch/x86/trusty.c b/hypervisor/arch/x86/trusty.c index 8608c7013..0a2370cc6 100644 --- a/hypervisor/arch/x86/trusty.c +++ b/hypervisor/arch/x86/trusty.c @@ -36,7 +36,7 @@ static struct key_info g_key_info = { #define save_segment(seg, SEG_NAME) \ { \ - seg.selector = exec_vmread(VMX_GUEST_##SEG_NAME##_SEL); \ + seg.selector = exec_vmread16(VMX_GUEST_##SEG_NAME##_SEL); \ seg.base = exec_vmread(VMX_GUEST_##SEG_NAME##_BASE); \ seg.limit = exec_vmread(VMX_GUEST_##SEG_NAME##_LIMIT); \ seg.attr = exec_vmread(VMX_GUEST_##SEG_NAME##_ATTR); \ @@ -44,7 +44,7 @@ static struct key_info g_key_info = { #define load_segment(seg, SEG_NAME) \ { \ - exec_vmwrite(VMX_GUEST_##SEG_NAME##_SEL, seg.selector); \ + exec_vmwrite16(VMX_GUEST_##SEG_NAME##_SEL, seg.selector); \ exec_vmwrite(VMX_GUEST_##SEG_NAME##_BASE, seg.base); \ exec_vmwrite(VMX_GUEST_##SEG_NAME##_LIMIT, seg.limit); \ exec_vmwrite(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \ diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c index 2d6800408..806a810f6 100644 --- a/hypervisor/arch/x86/vmx.c +++ b/hypervisor/arch/x86/vmx.c @@ -220,6 +220,15 @@ uint64_t exec_vmread64(uint32_t field_full) return low; } +uint16_t exec_vmread16(uint32_t field) +{ + uint64_t value; + + value = exec_vmread64(field); + + return (uint16_t)value; +} + void exec_vmwrite(uint32_t field, uint64_t value) { asm volatile ( @@ -241,6 +250,11 @@ void exec_vmwrite64(unsigned int field_full, uint64_t value) #endif } +void exec_vmwrite16(uint32_t field, uint16_t value) +{ + exec_vmwrite64(field, (uint64_t)value); +} + #define HV_ARCH_VMX_GET_CS(SEL) \ { \ asm volatile ("movw %%cs, %%ax" : "=a"(sel)); \ @@ -534,6 +548,7 @@ static void init_guest_state(struct vcpu *vcpu) { uint32_t field; uint64_t value; + uint16_t value16; uint32_t value32; uint64_t value64; uint16_t sel; @@ -646,7 +661,7 @@ static void init_guest_state(struct vcpu *vcpu) /* Selector */ field = VMX_GUEST_CS_SEL; - exec_vmwrite(field, sel); + exec_vmwrite16(field, sel); pr_dbg("VMX_GUEST_CS_SEL: 0x%x ", sel); /* Limit */ @@ -812,23 +827,23 @@ static void init_guest_state(struct vcpu *vcpu) /* Selector */ field = VMX_GUEST_ES_SEL; - exec_vmwrite(field, es); + exec_vmwrite16(field, es); pr_dbg("VMX_GUEST_ES_SEL: 0x%x ", es); field = VMX_GUEST_SS_SEL; - exec_vmwrite(field, ss); + exec_vmwrite16(field, ss); pr_dbg("VMX_GUEST_SS_SEL: 0x%x ", ss); field = VMX_GUEST_DS_SEL; - exec_vmwrite(field, ds); + exec_vmwrite16(field, ds); pr_dbg("VMX_GUEST_DS_SEL: 0x%x ", ds); field = VMX_GUEST_FS_SEL; - exec_vmwrite(field, fs); + exec_vmwrite16(field, fs); pr_dbg("VMX_GUEST_FS_SEL: 0x%x ", fs); field = VMX_GUEST_GS_SEL; - exec_vmwrite(field, gs); + exec_vmwrite16(field, gs); pr_dbg("VMX_GUEST_GS_SEL: 0x%x ", gs); /* Limit */ @@ -899,9 +914,9 @@ static void init_guest_state(struct vcpu *vcpu) /* LDT and TR (dummy) */ /***************************************************/ field = VMX_GUEST_LDTR_SEL; - value32 = ldt_idx; - exec_vmwrite(field, value32); - pr_dbg("VMX_GUEST_LDTR_SEL: 0x%x ", value32); + value16 = ldt_idx; + exec_vmwrite16(field, value16); + pr_dbg("VMX_GUEST_LDTR_SEL: 0x%hu ", value16); field = VMX_GUEST_LDTR_LIMIT; value32 = 0xffffffffU; @@ -920,9 +935,9 @@ static void init_guest_state(struct vcpu *vcpu) /* Task Register */ field = VMX_GUEST_TR_SEL; - value32 = lssd32_idx; - exec_vmwrite(field, value32); - pr_dbg("VMX_GUEST_TR_SEL: 0x%x ", value32); + value16 = lssd32_idx; + exec_vmwrite16(field, value16); + pr_dbg("VMX_GUEST_TR_SEL: 0x%hu ", value16); field = VMX_GUEST_TR_LIMIT; value32 = 0xffU; @@ -1023,37 +1038,37 @@ static void init_host_state(__unused struct vcpu *vcpu) ***************************************************/ field = VMX_HOST_ES_SEL; asm volatile ("movw %%es, %%ax":"=a" (value16)); - exec_vmwrite(field, value16); - pr_dbg("VMX_HOST_ES_SEL: 0x%x ", value16); + exec_vmwrite16(field, value16); + pr_dbg("VMX_HOST_ES_SEL: 0x%hu ", value16); field = VMX_HOST_CS_SEL; asm volatile ("movw %%cs, %%ax":"=a" (value16)); - exec_vmwrite(field, value16); - pr_dbg("VMX_HOST_CS_SEL: 0x%x ", value16); + exec_vmwrite16(field, value16); + pr_dbg("VMX_HOST_CS_SEL: 0x%hu ", value16); field = VMX_HOST_SS_SEL; asm volatile ("movw %%ss, %%ax":"=a" (value16)); - exec_vmwrite(field, value16); - pr_dbg("VMX_HOST_SS_SEL: 0x%x ", value16); + exec_vmwrite16(field, value16); + pr_dbg("VMX_HOST_SS_SEL: 0x%hu ", value16); field = VMX_HOST_DS_SEL; asm volatile ("movw %%ds, %%ax":"=a" (value16)); - exec_vmwrite(field, value16); - pr_dbg("VMX_HOST_DS_SEL: 0x%x ", value16); + exec_vmwrite16(field, value16); + pr_dbg("VMX_HOST_DS_SEL: 0x%hu ", value16); field = VMX_HOST_FS_SEL; asm volatile ("movw %%fs, %%ax":"=a" (value16)); - exec_vmwrite(field, value16); - pr_dbg("VMX_HOST_FS_SEL: 0x%x ", value16); + exec_vmwrite16(field, value16); + pr_dbg("VMX_HOST_FS_SEL: 0x%hu ", value16); field = VMX_HOST_GS_SEL; asm volatile ("movw %%gs, %%ax":"=a" (value16)); - exec_vmwrite(field, value16); - pr_dbg("VMX_HOST_GS_SEL: 0x%x ", value16); + exec_vmwrite16(field, value16); + pr_dbg("VMX_HOST_GS_SEL: 0x%hu ", value16); field = VMX_HOST_TR_SEL; asm volatile ("str %%ax":"=a" (tr_sel)); - exec_vmwrite(field, tr_sel); + exec_vmwrite16(field, tr_sel); pr_dbg("VMX_HOST_TR_SEL: 0x%x ", tr_sel); /****************************************************** @@ -1490,7 +1505,7 @@ static void override_uefi_vmcs(struct vcpu *vcpu) /* Selector */ field = VMX_GUEST_CS_SEL; - exec_vmwrite(field, efi_ctx->cs_sel); + exec_vmwrite16(field, efi_ctx->cs_sel); pr_dbg("VMX_GUEST_CS_SEL: 0x%x ", efi_ctx->cs_sel); /* Access */ @@ -1499,23 +1514,23 @@ static void override_uefi_vmcs(struct vcpu *vcpu) pr_dbg("VMX_GUEST_CS_ATTR: 0x%x ", efi_ctx->cs_ar); field = VMX_GUEST_ES_SEL; - exec_vmwrite(field, efi_ctx->es_sel); + exec_vmwrite16(field, efi_ctx->es_sel); pr_dbg("VMX_GUEST_ES_SEL: 0x%x ", efi_ctx->es_sel); field = VMX_GUEST_SS_SEL; - exec_vmwrite(field, efi_ctx->ss_sel); + exec_vmwrite16(field, efi_ctx->ss_sel); pr_dbg("VMX_GUEST_SS_SEL: 0x%x ", efi_ctx->ss_sel); field = VMX_GUEST_DS_SEL; - exec_vmwrite(field, efi_ctx->ds_sel); + exec_vmwrite16(field, efi_ctx->ds_sel); pr_dbg("VMX_GUEST_DS_SEL: 0x%x ", efi_ctx->ds_sel); field = VMX_GUEST_FS_SEL; - exec_vmwrite(field, efi_ctx->fs_sel); + exec_vmwrite16(field, efi_ctx->fs_sel); pr_dbg("VMX_GUEST_FS_SEL: 0x%x ", efi_ctx->fs_sel); field = VMX_GUEST_GS_SEL; - exec_vmwrite(field, efi_ctx->gs_sel); + exec_vmwrite16(field, efi_ctx->gs_sel); pr_dbg("VMX_GUEST_GS_SEL: 0x%x ", efi_ctx->gs_sel); /* Base */ diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 9441b7ca5..4a0c4cd46 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -14,11 +14,11 @@ bool is_hypercall_from_ring0(void) { - uint64_t cs_sel; + uint16_t cs_sel; - cs_sel = exec_vmread(VMX_GUEST_CS_SEL); + cs_sel = exec_vmread16(VMX_GUEST_CS_SEL); /* cs_selector[1:0] is CPL */ - if ((cs_sel & 0x3UL) == 0) { + if ((cs_sel & 0x3U) == 0U) { return true; } diff --git a/hypervisor/include/arch/x86/vmx.h b/hypervisor/include/arch/x86/vmx.h index 66a73c66c..69902305a 100644 --- a/hypervisor/include/arch/x86/vmx.h +++ b/hypervisor/include/arch/x86/vmx.h @@ -414,8 +414,10 @@ int exec_vmxon_instr(uint16_t pcpu_id); */ uint64_t exec_vmread(uint32_t field); +uint16_t exec_vmread16(uint32_t field); uint64_t exec_vmread64(uint32_t field_full); void exec_vmwrite(uint32_t field, uint64_t value); +void exec_vmwrite16(uint32_t field, uint16_t value); void exec_vmwrite64(uint32_t field_full, uint64_t value); int init_vmcs(struct vcpu *vcpu);