HV:treewide:Update exec_vmread/exec_vmwrite and exec_vmread64/exec_vmwrite64

In the hypervisor, VMCS fields include 16-bit fields,
32-bit fields, 64-bit fields and natural-width fields.
In the current implement,  there are exec_vmread/exec_vmwrite
used for accessing 32-bit fields, 64-bit field and
natural-width fields. This usage will confue developer.
So there are many type casting for the return value and
parameters vmread/vmwrite operations.

Since exec_vmread/exec_vmwrite and exec_vmread64/exec_vmwrite64
are the same, update current exec_vmread/exec_vmwrite
implement into exec_vmread64/exec_vmwrite64 implement
and add MACRO define for exec_vmread/exec_vmwrite in
head file;
To access 64-bit fields in VMCS, callers use
exec_vmread64/exec_vmwrite64;
Update related variables type for vmread/vmwrite operations;
Update related caller according to VMCS fields size.

Note:Natural-width fields have 64 bits on processors
that support Intel 64 architecture.To access natural-width
fields in VMCS, callers still use exec_vmread/exec_vmwrite,
keep the current implementation.

V1--V2:
        This is new part of this patch serial to only
        update 64-bit vmread/vmread opertions and related
        caller, for netural width fields, still use exec_vmread
	or exec_vmwrite.
V2-->V3:
	Fix few mistake updations for netural fields in VMCS,
	just keep exec_vmread/exec_vmwrite to access them;
	Fix few mistake updations for 64-bit fields in VMCS.
V3--V4:
	Add "016ll" for 64-bit variable in log function;
	Few updates for coding style;
	Rename lssd32_idx as tr_sel in VMX module.
V4-->V5:
	Use CPU_NATURAL_LAST in the vm_get_register and
	vm_set_register to make condition statement more
	understandable.

Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com>
Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
Xiangyang Wu 2018-07-16 16:37:28 +08:00 committed by lijinxia
parent 612cdceaca
commit f912953539
7 changed files with 94 additions and 113 deletions

View File

@ -471,7 +471,7 @@ int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu)
/* TODO - EPT Violation handler */
pr_info("%s, Guest linear address: 0x%016llx ",
__func__, exec_vmread64(VMX_GUEST_LINEAR_ADDR));
__func__, exec_vmread(VMX_GUEST_LINEAR_ADDR));
pr_info("%s, Guest physical address: 0x%016llx ",
__func__, exec_vmread64(VMX_GUEST_PHYSICAL_ADDR_FULL));

View File

@ -44,8 +44,10 @@ int vm_get_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t *retval)
uint32_t field = get_vmcs_field(reg);
if (field != VMX_INVALID_VMCS_FIELD) {
if (reg < CPU_REG_64BIT_LAST) {
if (reg < CPU_REG_NATURAL_LAST) {
*retval = exec_vmread(field);
} else if (reg < CPU_REG_64BIT_LAST) {
*retval = exec_vmread64(field);
} else {
*retval = (uint64_t)exec_vmread16(field);
}
@ -77,8 +79,10 @@ int vm_set_register(struct vcpu *vcpu, enum cpu_reg_name reg, uint64_t val)
uint32_t field = get_vmcs_field(reg);
if (field != VMX_INVALID_VMCS_FIELD) {
if (reg < CPU_REG_64BIT_LAST) {
if (reg < CPU_REG_NATURAL_LAST) {
exec_vmwrite(field, val);
} else if (reg <= CPU_REG_64BIT_LAST) {
exec_vmwrite64(field, val);
} else {
exec_vmwrite16(field, (uint16_t)val);
}

View File

@ -193,8 +193,8 @@ int start_vcpu(struct vcpu *vcpu)
*/
instlen = vcpu->arch_vcpu.inst_len;
rip = cur_context->rip;
exec_vmwrite(VMX_GUEST_RIP, ((rip + instlen) &
0xFFFFFFFFFFFFFFFF));
exec_vmwrite(VMX_GUEST_RIP, ((rip +(uint64_t)instlen) &
0xFFFFFFFFFFFFFFFFUL));
/* Resume the VM */
status = vmx_vmrun(cur_context, VM_RESUME, ibrs_type);

View File

@ -2221,14 +2221,14 @@ apicv_set_tmr(__unused struct vlapic *vlapic, uint32_t vector, bool level)
mask = 1UL << (vector % 64U);
field = VMX_EOI_EXIT(vector);
val = exec_vmread(field);
val = exec_vmread64(field);
if (level) {
val |= mask;
} else {
val &= ~mask;
}
exec_vmwrite(field, val);
exec_vmwrite64(field, val);
}
/* Update the VMX_EOI_EXIT according to related tmr */

View File

@ -232,7 +232,7 @@ static void save_world_ctx(struct run_context *context)
* the wrmsr handler keeps track of 'ia32_pat', and we only
* need to load 'vmx_ia32_pat' here.
*/
context->vmx_ia32_pat = exec_vmread(VMX_GUEST_IA32_PAT_FULL);
context->vmx_ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL);
context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
context->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
context->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
@ -426,7 +426,7 @@ static bool init_secure_world_env(struct vcpu *vcpu,
exec_vmwrite(VMX_GUEST_RSP,
TRUSTY_EPT_REBASE_GPA + size);
exec_vmwrite(VMX_TSC_OFFSET_FULL,
exec_vmwrite64(VMX_TSC_OFFSET_FULL,
vcpu->arch_vcpu.contexts[SECURE_WORLD].tsc_offset);
return setup_trusty_info(vcpu, size, base_hpa);

View File

@ -195,31 +195,19 @@ int exec_vmptrld(void *addr)
return status;
}
uint64_t exec_vmread(uint32_t field)
uint64_t exec_vmread64(uint32_t field_full)
{
uint64_t value;
asm volatile (
"vmread %%rdx, %%rax "
: "=a" (value)
: "d"(field)
: "d"(field_full)
: "cc");
return value;
}
uint64_t exec_vmread64(uint32_t field_full)
{
uint64_t low;
low = exec_vmread(field_full);
#ifdef __i386__
low += exec_vmread(field_full + 1) << 32;
#endif
return low;
}
uint32_t exec_vmread32(uint32_t field)
{
uint64_t value;
@ -238,27 +226,14 @@ uint16_t exec_vmread16(uint32_t field)
return (uint16_t)value;
}
void exec_vmwrite(uint32_t field, uint64_t value)
void exec_vmwrite64(uint32_t field_full, uint64_t value)
{
asm volatile (
"vmwrite %%rax, %%rdx "
: : "a" (value), "d"(field)
: : "a" (value), "d"(field_full)
: "cc");
}
void exec_vmwrite64(unsigned int field_full, uint64_t value)
{
#ifdef __i386__
int low = (int)(value & 0xFFFFFFFF);
int high = (int)((value >> 32) & 0xFFFFFFFF);
exec_vmwrite(field_full, low);
exec_vmwrite(field_full + 1, high);
#else
exec_vmwrite(field_full, value);
#endif
}
void exec_vmwrite32(uint32_t field, uint32_t value)
{
exec_vmwrite64(field, (uint64_t)value);
@ -363,8 +338,8 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
* If context->cr0.CD is set, we defer any further requests to write
* guest's IA32_PAT, until the time when guest's CR0.CD is being cleared
*/
if ((context->cr0 & CR0_CD) == 0U) {
exec_vmwrite(VMX_GUEST_IA32_PAT_FULL, value);
if ((context->cr0 & CR0_CD) == 0UL) {
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, value);
}
return 0;
}
@ -451,11 +426,11 @@ int vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
* IA32_PAT with all-UC entries to emulate the cache
* disabled behavior
*/
exec_vmwrite(VMX_GUEST_IA32_PAT_FULL, PAT_ALL_UC_VALUE);
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, PAT_ALL_UC_VALUE);
CACHE_FLUSH_INVALIDATE_ALL();
} else {
/* Restore IA32_PAT to enable cache again */
exec_vmwrite(VMX_GUEST_IA32_PAT_FULL, context->ia32_pat);
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, context->ia32_pat);
}
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
@ -566,10 +541,11 @@ static void init_guest_state(struct vcpu *vcpu)
uint32_t value32;
uint64_t value64;
uint16_t sel;
uint32_t limit, access, base;
uint32_t ldt_idx = 0x38;
int es = 0, ss = 0, ds = 0, fs = 0, gs = 0, data32_idx;
uint32_t lssd32_idx = 0x70;
uint32_t limit, access;
uint64_t base;
uint16_t ldt_idx = 0x38U;
uint16_t es = 0U, ss = 0U, ds = 0U, fs = 0U, gs = 0U, data32_idx;
uint16_t tr_sel = 0x70U;
struct vm *vm = vcpu->vm;
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
@ -615,7 +591,7 @@ static void init_guest_state(struct vcpu *vcpu)
/* Set up Flags - the value of RFLAGS on VM entry */
/***************************************************/
field = VMX_GUEST_RFLAGS;
cur_context->rflags = 0x2; /* Bit 1 is a active high reserved bit */
cur_context->rflags = 0x2UL; /* Bit 1 is a active high reserved bit */
exec_vmwrite(field, cur_context->rflags);
pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", cur_context->rflags);
@ -698,31 +674,31 @@ static void init_guest_state(struct vcpu *vcpu)
/***************************************************/
/* Set up guest instruction pointer */
field = VMX_GUEST_RIP;
value32 = 0U;
value64 = 0UL;
if (vcpu_mode == CPU_MODE_REAL) {
/* RIP is set here */
if (is_vcpu_bsp(vcpu)) {
if ((uint64_t)vcpu->entry_addr < 0x100000UL) {
value32 = (uint64_t)vcpu->entry_addr & 0x0FUL;
value64 = (uint64_t)vcpu->entry_addr & 0x0FUL;
}
else {
value32 = 0x0000FFF0U;
value64 = 0x0000FFF0UL;
}
}
} else {
value32 = (uint32_t)((uint64_t)vcpu->entry_addr);
value64 = (uint64_t)vcpu->entry_addr;
}
pr_dbg("GUEST RIP on VMEntry %x ", value32);
exec_vmwrite(field, value32);
pr_dbg("GUEST RIP on VMEntry %016llx ", value64);
exec_vmwrite(field, value64);
if (vcpu_mode == CPU_MODE_64BIT) {
/* Set up guest stack pointer to 0 */
field = VMX_GUEST_RSP;
value32 = 0U;
pr_dbg("GUEST RSP on VMEntry %x ",
value32);
exec_vmwrite(field, value32);
value64 = 0UL;
pr_dbg("GUEST RSP on VMEntry %016llx ",
value64);
exec_vmwrite(field, value64);
}
/***************************************************/
@ -806,9 +782,9 @@ static void init_guest_state(struct vcpu *vcpu)
/***************************************************/
/* Set up guest Debug register */
field = VMX_GUEST_DR7;
value = 0x400;
exec_vmwrite(field, value);
pr_dbg("VMX_GUEST_DR7: 0x%016llx ", value);
value64 = 0x400UL;
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_DR7: 0x%016llx ", value64);
/***************************************************/
/* ES, CS, SS, DS, FS, GS */
@ -903,26 +879,26 @@ static void init_guest_state(struct vcpu *vcpu)
/* Base */
if (vcpu_mode == CPU_MODE_REAL) {
value = es << 4;
value64 = (uint64_t)es << 4U;
} else {
value = 0UL;
value64 = 0UL;
}
field = VMX_GUEST_ES_BASE;
exec_vmwrite(field, value);
pr_dbg("VMX_GUEST_ES_BASE: 0x%016llx ", value);
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_ES_BASE: 0x%016llx ", value64);
field = VMX_GUEST_SS_BASE;
exec_vmwrite(field, value);
pr_dbg("VMX_GUEST_SS_BASE: 0x%016llx ", value);
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_SS_BASE: 0x%016llx ", value64);
field = VMX_GUEST_DS_BASE;
exec_vmwrite(field, value);
pr_dbg("VMX_GUEST_DS_BASE: 0x%016llx ", value);
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_DS_BASE: 0x%016llx ", value64);
field = VMX_GUEST_FS_BASE;
exec_vmwrite(field, value);
pr_dbg("VMX_GUEST_FS_BASE: 0x%016llx ", value);
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_FS_BASE: 0x%016llx ", value64);
field = VMX_GUEST_GS_BASE;
exec_vmwrite(field, value);
pr_dbg("VMX_GUEST_GS_BASE: 0x%016llx ", value);
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_GS_BASE: 0x%016llx ", value64);
/***************************************************/
/* LDT and TR (dummy) */
@ -943,13 +919,13 @@ static void init_guest_state(struct vcpu *vcpu)
pr_dbg("VMX_GUEST_LDTR_ATTR: 0x%x ", value32);
field = VMX_GUEST_LDTR_BASE;
value32 = 0x00U;
exec_vmwrite(field, value32);
pr_dbg("VMX_GUEST_LDTR_BASE: 0x%x ", value32);
value64 = 0x00UL;
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_LDTR_BASE: 0x%016llx ", value64);
/* Task Register */
field = VMX_GUEST_TR_SEL;
value16 = lssd32_idx;
value16 = tr_sel;
exec_vmwrite16(field, value16);
pr_dbg("VMX_GUEST_TR_SEL: 0x%hu ", value16);
@ -964,9 +940,9 @@ static void init_guest_state(struct vcpu *vcpu)
pr_dbg("VMX_GUEST_TR_ATTR: 0x%x ", value32);
field = VMX_GUEST_TR_BASE;
value32 = 0x00U;
exec_vmwrite(field, value32);
pr_dbg("VMX_GUEST_TR_BASE: 0x%x ", value32);
value64 = 0x00UL;
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_TR_BASE: 0x%016llx ", value64);
field = VMX_GUEST_INTERRUPTIBILITY_INFO;
value32 = 0U;
@ -1004,24 +980,24 @@ static void init_guest_state(struct vcpu *vcpu)
/* Set up guest pending debug exception */
field = VMX_GUEST_PENDING_DEBUG_EXCEPT;
value = 0x0UL;
exec_vmwrite(field, value);
pr_dbg("VMX_GUEST_PENDING_DEBUG_EXCEPT: 0x%016llx ", value);
value64 = 0x0UL;
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_PENDING_DEBUG_EXCEPT: 0x%016llx ", value64);
/* These fields manage host and guest system calls * pg 3069 31.10.4.2
* - set up these fields with * contents of current SYSENTER ESP and
* EIP MSR values
*/
field = VMX_GUEST_IA32_SYSENTER_ESP;
value = msr_read(MSR_IA32_SYSENTER_ESP);
exec_vmwrite(field, value);
value64 = msr_read(MSR_IA32_SYSENTER_ESP);
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_IA32_SYSENTER_ESP: 0x%016llx ",
value);
value64);
field = VMX_GUEST_IA32_SYSENTER_EIP;
value = msr_read(MSR_IA32_SYSENTER_EIP);
exec_vmwrite(field, value);
value64 = msr_read(MSR_IA32_SYSENTER_EIP);
exec_vmwrite(field, value64);
pr_dbg("VMX_GUEST_IA32_SYSENTER_EIP: 0x%016llx ",
value);
value64);
}
static void init_host_state(__unused struct vcpu *vcpu)
@ -1198,9 +1174,9 @@ static void init_host_state(__unused struct vcpu *vcpu)
/* Set up host instruction pointer on VM Exit */
field = VMX_HOST_RIP;
value64 = (uint64_t)&vm_exit;
pr_dbg("HOST RIP on VMExit %x ", value32);
pr_dbg("HOST RIP on VMExit %016llx ", value64);
exec_vmwrite(field, value64);
pr_dbg("vm exit return address = %x ", value32);
pr_dbg("vm exit return address = %016llx ", value64);
/* These fields manage host and guest system calls * pg 3069 31.10.4.2
* - set up these fields with * contents of current SYSENTER ESP and
@ -1231,7 +1207,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
/* Set up VM Execution control to enable Set VM-exits on external
* interrupts preemption timer - pg 2899 24.6.1
*/
value32 = msr_read(MSR_IA32_VMX_PINBASED_CTLS);
value32 = (uint32_t)msr_read(MSR_IA32_VMX_PINBASED_CTLS);
/* enable external interrupt VM Exit */
@ -1318,7 +1294,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
}
if (cpu_has_cap(X86_FEATURE_OSXSAVE)) {
exec_vmwrite64(VMX_XSS_EXITING_BITMAP_FULL, 0);
exec_vmwrite64(VMX_XSS_EXITING_BITMAP_FULL, 0UL);
value32 |= VMX_PROCBASED_CTLS2_XSVE_XRSTR;
}
@ -1360,7 +1336,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
* TODO: introduce API to make this data driven based
* on VMX_EPT_VPID_CAP
*/
value64 = vm->arch_vm.nworld_eptp | (3UL << 3) | 6UL;
value64 = vm->arch_vm.nworld_eptp | (3UL << 3U) | 6UL;
exec_vmwrite64(VMX_EPT_POINTER_FULL, value64);
pr_dbg("VMX_EPT_POINTER: 0x%016llx ", value64);
@ -1401,13 +1377,13 @@ static void init_exec_ctrl(struct vcpu *vcpu)
init_msr_emulation(vcpu);
/* Set up executive VMCS pointer - pg 2905 24.6.10 */
exec_vmwrite64(VMX_EXECUTIVE_VMCS_PTR_FULL, 0);
exec_vmwrite64(VMX_EXECUTIVE_VMCS_PTR_FULL, 0UL);
/* Setup Time stamp counter offset - pg 2902 24.6.5 */
exec_vmwrite64(VMX_TSC_OFFSET_FULL, 0);
exec_vmwrite64(VMX_TSC_OFFSET_FULL, 0UL);
/* Set up the link pointer */
exec_vmwrite64(VMX_VMS_LINK_PTR_FULL, 0xFFFFFFFFFFFFFFFF);
exec_vmwrite64(VMX_VMS_LINK_PTR_FULL, 0xFFFFFFFFFFFFFFFFUL);
/* Natural-width */
pr_dbg("Natural-width*********");
@ -1419,10 +1395,10 @@ static void init_exec_ctrl(struct vcpu *vcpu)
* if operand does not match one of these register values a VM exit
* would occur
*/
exec_vmwrite(VMX_CR3_TARGET_0, 0);
exec_vmwrite(VMX_CR3_TARGET_1, 0);
exec_vmwrite(VMX_CR3_TARGET_2, 0);
exec_vmwrite(VMX_CR3_TARGET_3, 0);
exec_vmwrite(VMX_CR3_TARGET_0, 0UL);
exec_vmwrite(VMX_CR3_TARGET_1, 0UL);
exec_vmwrite(VMX_CR3_TARGET_2, 0UL);
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
}
static void init_entry_ctrl(__unused struct vcpu *vcpu)
@ -1549,15 +1525,15 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
/* Base */
field = VMX_GUEST_ES_BASE;
exec_vmwrite(field, efi_ctx->es_sel << 4);
exec_vmwrite(field, efi_ctx->es_sel << 4U);
field = VMX_GUEST_SS_BASE;
exec_vmwrite(field, efi_ctx->ss_sel << 4);
exec_vmwrite(field, efi_ctx->ss_sel << 4U);
field = VMX_GUEST_DS_BASE;
exec_vmwrite(field, efi_ctx->ds_sel << 4);
exec_vmwrite(field, efi_ctx->ds_sel << 4U);
field = VMX_GUEST_FS_BASE;
exec_vmwrite(field, efi_ctx->fs_sel << 4);
exec_vmwrite(field, efi_ctx->fs_sel << 4U);
field = VMX_GUEST_GS_BASE;
exec_vmwrite(field, efi_ctx->gs_sel << 4);
exec_vmwrite(field, efi_ctx->gs_sel << 4U);
/* RSP */
field = VMX_GUEST_RSP;
@ -1566,8 +1542,8 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
/* GDTR Base */
field = VMX_GUEST_GDTR_BASE;
exec_vmwrite(field, (uint64_t)efi_ctx->gdt.base);
pr_dbg("VMX_GUEST_GDTR_BASE: 0x%x ", efi_ctx->gdt.base);
exec_vmwrite(field, efi_ctx->gdt.base);
pr_dbg("VMX_GUEST_GDTR_BASE: 0x%016llx ", efi_ctx->gdt.base);
/* GDTR Limit */
field = VMX_GUEST_GDTR_LIMIT;
@ -1576,8 +1552,8 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
/* IDTR Base */
field = VMX_GUEST_IDTR_BASE;
exec_vmwrite(field, (uint64_t)efi_ctx->idt.base);
pr_dbg("VMX_GUEST_IDTR_BASE: 0x%x ", efi_ctx->idt.base);
exec_vmwrite(field, efi_ctx->idt.base);
pr_dbg("VMX_GUEST_IDTR_BASE: 0x%016llx ", efi_ctx->idt.base);
/* IDTR Limit */
field = VMX_GUEST_IDTR_LIMIT;
@ -1588,7 +1564,7 @@ static void override_uefi_vmcs(struct vcpu *vcpu)
/* Interrupt */
field = VMX_GUEST_RFLAGS;
/* clear flags for CF/PF/AF/ZF/SF/OF */
cur_context->rflags = efi_ctx->rflags & ~(0x8d5);
cur_context->rflags = efi_ctx->rflags & ~(0x8d5UL);
exec_vmwrite(field, cur_context->rflags);
pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", cur_context->rflags);
}

View File

@ -412,15 +412,16 @@ int exec_vmxon_instr(uint16_t pcpu_id);
* @return the lower 32-bit outside IA-32e mode for 64-bit fields.
* @return full contents for 32-bit fields, with higher 32-bit set to 0.
*/
uint64_t exec_vmread(uint32_t field);
uint16_t exec_vmread16(uint32_t field);
uint32_t exec_vmread32(uint32_t field);
uint64_t exec_vmread64(uint32_t field_full);
void exec_vmwrite(uint32_t field, uint64_t value);
#define exec_vmread exec_vmread64
void exec_vmwrite16(uint32_t field, uint16_t value);
void exec_vmwrite32(uint32_t field, uint32_t value);
void exec_vmwrite64(uint32_t field_full, uint64_t value);
#define exec_vmwrite exec_vmwrite64
int init_vmcs(struct vcpu *vcpu);
int vmx_off(uint16_t pcpu_id);