fix x86 dir integer violations

Fix x86 directory violations which related to integer problems,
some of problems are skipped because of ldra's false positive.

V1->V2 1.modified the code style
       2.fix all macro VM_EXIT_IO_INSTRUCTION related

Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Huihuang Shi 2018-07-24 11:28:46 +08:00 committed by lijinxia
parent f1b9f5ac97
commit a661ffa618
10 changed files with 89 additions and 80 deletions

View File

@ -52,7 +52,7 @@ static void print_hv_banner(void);
static uint16_t get_cpu_id_from_lapic_id(uint8_t lapic_id);
static void pcpu_sync_sleep(uint64_t *sync, uint64_t mask_bit);
int ibrs_type;
static uint64_t __attribute__((__section__(".bss_noinit"))) start_tsc;
static uint64_t start_tsc __attribute__((__section__(".bss_noinit")));
/* Push sp magic to top of stack for call trace */
#define SWITCH_TO(rsp, to) \
@ -341,19 +341,19 @@ static void get_cpu_name(void)
{
cpuid(CPUID_EXTEND_FUNCTION_2,
(uint32_t *)(boot_cpu_data.model_name),
(uint32_t *)(boot_cpu_data.model_name + 4),
(uint32_t *)(boot_cpu_data.model_name + 8),
(uint32_t *)(boot_cpu_data.model_name + 12));
(uint32_t *)(&boot_cpu_data.model_name[4]),
(uint32_t *)(&boot_cpu_data.model_name[8]),
(uint32_t *)(&boot_cpu_data.model_name[12]));
cpuid(CPUID_EXTEND_FUNCTION_3,
(uint32_t *)(boot_cpu_data.model_name + 16),
(uint32_t *)(boot_cpu_data.model_name + 20),
(uint32_t *)(boot_cpu_data.model_name + 24),
(uint32_t *)(boot_cpu_data.model_name + 28));
(uint32_t *)(&boot_cpu_data.model_name[16]),
(uint32_t *)(&boot_cpu_data.model_name[20]),
(uint32_t *)(&boot_cpu_data.model_name[24]),
(uint32_t *)(&boot_cpu_data.model_name[28]));
cpuid(CPUID_EXTEND_FUNCTION_4,
(uint32_t *)(boot_cpu_data.model_name + 32),
(uint32_t *)(boot_cpu_data.model_name + 36),
(uint32_t *)(boot_cpu_data.model_name + 40),
(uint32_t *)(boot_cpu_data.model_name + 44));
(uint32_t *)(&boot_cpu_data.model_name[32]),
(uint32_t *)(&boot_cpu_data.model_name[36]),
(uint32_t *)(&boot_cpu_data.model_name[40]),
(uint32_t *)(&boot_cpu_data.model_name[44]));
boot_cpu_data.model_name[48] = '\0';
}
@ -369,7 +369,7 @@ void bsp_boot_init(void)
start_tsc = rdtsc();
/* Clear BSS */
(void)memset(_ld_bss_start, 0, _ld_bss_end - _ld_bss_start);
(void)memset(_ld_bss_start, 0U, (size_t)(_ld_bss_end - _ld_bss_start));
/* Build time sanity checks to make sure hard-coded offset
* is matching the actual offset!
@ -434,7 +434,7 @@ void bsp_boot_init(void)
__bitmap_set(BOOT_CPU_ID, &pcpu_active_bitmap);
misc_en = msr_read(MSR_IA32_MISC_ENABLE);
if ((misc_en & TURBO_MODE_DISABLE) == 0) {
if ((misc_en & TURBO_MODE_DISABLE) == 0UL) {
msr_write(MSR_IA32_MISC_ENABLE, misc_en | TURBO_MODE_DISABLE);
}
@ -583,7 +583,7 @@ void cpu_secondary_init(void)
__bitmap_set(get_cpu_id(), &pcpu_active_bitmap);
misc_en = msr_read(MSR_IA32_MISC_ENABLE);
if ((misc_en & TURBO_MODE_DISABLE) == 0) {
if ((misc_en & TURBO_MODE_DISABLE) == 0UL) {
msr_write(MSR_IA32_MISC_ENABLE, misc_en | TURBO_MODE_DISABLE);
}

View File

@ -76,16 +76,16 @@ static const struct cpu_state_table {
struct cpu_state_info state_info;
} cpu_state_tbl[] = {
{"Intel(R) Atom(TM) Processor A3960 @ 1.90GHz",
{ARRAY_SIZE(px_a3960), px_a3960,
ARRAY_SIZE(cx_a3960), cx_a3960}
{(uint8_t)ARRAY_SIZE(px_a3960), px_a3960,
(uint8_t)ARRAY_SIZE(cx_a3960), cx_a3960}
},
{"Intel(R) Atom(TM) Processor A3950 @ 1.60GHz",
{ARRAY_SIZE(px_a3950), px_a3950,
ARRAY_SIZE(cx_a3960), cx_a3960} /* Cx is same as A3960 */
{(uint8_t)ARRAY_SIZE(px_a3950), px_a3950,
(uint8_t)ARRAY_SIZE(cx_a3960), cx_a3960} /* Cx is same as A3960 */
},
{"Intel(R) Celeron(R) CPU J3455 @ 1.50GHz",
{ARRAY_SIZE(px_j3455), px_j3455,
ARRAY_SIZE(cx_j3455), cx_j3455}
{(uint8_t)ARRAY_SIZE(px_j3455), px_j3455,
(uint8_t)ARRAY_SIZE(cx_j3455), cx_j3455}
}
};

View File

@ -7,17 +7,17 @@
#include <hypervisor.h>
static void set_tss_desc(union tss_64_descriptor *desc,
void *tss, size_t tss_limit, int type)
uint64_t tss, size_t tss_limit, int type)
{
uint32_t u1, u2, u3;
u1 = (uint32_t)(((uint64_t)tss << 16U) & 0xFFFFFFFFU);
u2 = (uint32_t)((uint64_t)tss & 0xFF000000U);
u3 = (uint32_t)(((uint64_t)tss & 0x00FF0000U) >> 16U);
u1 = (uint32_t)((tss << 16U) & 0xFFFFFFFFUL);
u2 = (uint32_t)(tss & 0xFF000000UL);
u3 = (uint32_t)((tss & 0x00FF0000UL) >> 16U);
desc->fields.low32.value = u1 | (tss_limit & 0xFFFFU);
desc->fields.base_addr_63_32 = (uint32_t)((uint64_t)tss >> 32U);
desc->fields.base_addr_63_32 = (uint32_t)(tss >> 32U);
desc->fields.high32.value = (u2 | ((uint32_t)type << 8U) | 0x8000U | u3);
}
@ -41,7 +41,7 @@ void load_gdtr_and_tr(void)
/* tss descriptor */
set_tss_desc(&gdt->host_gdt_tss_descriptors,
(void *)tss, sizeof(struct tss_64), TSS_AVAIL);
(uint64_t)tss, sizeof(struct tss_64), TSS_AVAIL);
gdtr.len = sizeof(struct host_gdt) - 1U;
gdtr.gdt = gdt;

View File

@ -448,9 +448,9 @@ static void get_rte_info(union ioapic_rte rte, bool *mask, bool *irr,
*mask = ((rte.full & IOAPIC_RTE_INTMASK) == IOAPIC_RTE_INTMSET);
*irr = ((rte.full & IOAPIC_RTE_REM_IRR) == IOAPIC_RTE_REM_IRR);
*phys = ((rte.full & IOAPIC_RTE_DESTMOD) == IOAPIC_RTE_DESTPHY);
*delmode = rte.full & IOAPIC_RTE_DELMOD;
*delmode = (uint32_t)(rte.full & IOAPIC_RTE_DELMOD);
*level = ((rte.full & IOAPIC_RTE_TRGRLVL) != 0UL);
*vector = rte.full & IOAPIC_RTE_INTVEC;
*vector = (uint32_t)(rte.full & IOAPIC_RTE_INTVEC);
*dest = (uint32_t)(rte.full >> APIC_ID_SHIFT);
}

View File

@ -7,7 +7,7 @@
#include <hypervisor.h>
/* Rate range 1 to 1000 or 1uSec to 1mSec */
#define APIC_TIMER_MAX 0xffffffff
#define APIC_TIMER_MAX 0xffffffffU
#define HYPE_PERIOD_MAX 1000
#define APIC_DIVIDE_BY_ONE 0x0b
#define PIT_TARGET 0x3FFFU
@ -308,8 +308,8 @@ void suspend_lapic(void)
/* disable APIC with software flag */
val = read_lapic_reg32(LAPIC_SPURIOUS_VECTOR_REGISTER);
write_lapic_reg32(LAPIC_SPURIOUS_VECTOR_REGISTER,
(~LAPIC_SVR_APIC_ENABLE_MASK) & val);
val = (~LAPIC_SVR_APIC_ENABLE_MASK) & val;
write_lapic_reg32(LAPIC_SPURIOUS_VECTOR_REGISTER, val);
}
void resume_lapic(void)
@ -396,7 +396,7 @@ send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand,
icr.value_32.lo_32 = 0U;
icr.bits.shorthand = shorthand;
icr.bits.delivery_mode = INTR_LAPIC_ICR_STARTUP;
icr.bits.vector = cpu_startup_start_address >> 12U;
icr.bits.vector = (uint8_t)(cpu_startup_start_address >> 12U);
write_lapic_reg32(LAPIC_INT_COMMAND_REGISTER_0, icr.value_32.lo_32);
wait_for_delivery();

View File

@ -236,10 +236,11 @@ static uint64_t pit_calibrate_tsc(uint16_t cal_ms)
#define PIT_TARGET 0x3FFFU
#define PIT_MAX_COUNT 0xFFFFU
uint16_t initial_pit;
uint32_t initial_pit;
uint16_t current_pit;
uint16_t max_cal_ms;
uint64_t current_tsc;
uint8_t initial_pit_high, initial_pit_low;
max_cal_ms = ((PIT_MAX_COUNT - PIT_TARGET) * 1000U) / PIT_TICK_RATE;
cal_ms = min(cal_ms, max_cal_ms);
@ -247,16 +248,18 @@ static uint64_t pit_calibrate_tsc(uint16_t cal_ms)
/* Assume the 8254 delivers 18.2 ticks per second when 16 bits fully
* wrap. This is about 1.193MHz or a clock period of 0.8384uSec
*/
initial_pit = (uint16_t)((cal_ms * PIT_TICK_RATE) / 1000U);
initial_pit = ((uint32_t)cal_ms * PIT_TICK_RATE) / 1000U;
initial_pit += PIT_TARGET;
initial_pit_high = (uint8_t)(initial_pit >> 8U);
initial_pit_low = (uint8_t)initial_pit;
/* Port 0x43 ==> Control word write; Data 0x30 ==> Select Counter 0,
* Read/Write least significant byte first, mode 0, 16 bits.
*/
io_write_byte(0x30U, 0x43U);
io_write_byte(initial_pit & 0x00ffU, 0x40U); /* Write LSB */
io_write_byte(initial_pit >> 8U, 0x40U); /* Write MSB */
io_write_byte(initial_pit_low, 0x40U); /* Write LSB */
io_write_byte(initial_pit_high, 0x40U); /* Write MSB */
current_tsc = rdtsc();

View File

@ -50,12 +50,12 @@ static const uint16_t exception_type[32] = {
[31] = VMX_INT_TYPE_HW_EXP
};
static int is_guest_irq_enabled(struct vcpu *vcpu)
static bool is_guest_irq_enabled(struct vcpu *vcpu)
{
struct run_context *cur_context =
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
uint64_t guest_rflags, guest_state;
int status = false;
bool status = false;
/* Read the RFLAGS of the guest */
guest_rflags = cur_context->rflags;
@ -204,6 +204,7 @@ static int get_excep_class(uint32_t vector)
int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
uint32_t err_code)
{
struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu;
/* VECTOR_INVALID is also greater than 32 */
if (vector >= 32U) {
pr_err("invalid exception vector %d", vector);
@ -211,7 +212,7 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
}
uint32_t prev_vector =
vcpu->arch_vcpu.exception_info.exception;
arch_vcpu->exception_info.exception;
int32_t new_class, prev_class;
/* SDM vol3 - 6.15, Table 6-5 - conditions for generating a
@ -235,12 +236,12 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
* double/triple fault. */
}
vcpu->arch_vcpu.exception_info.exception = vector;
arch_vcpu->exception_info.exception = vector;
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U)
vcpu->arch_vcpu.exception_info.error = err_code;
arch_vcpu->exception_info.error = err_code;
else
vcpu->arch_vcpu.exception_info.error = 0;
arch_vcpu->exception_info.error = 0U;
return 0;
}
@ -364,9 +365,12 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu)
int acrn_handle_pending_request(struct vcpu *vcpu)
{
int ret = 0;
uint64_t tmp;
uint32_t tmp;
bool intr_pending = false;
uint64_t *pending_req_bits = &vcpu->arch_vcpu.pending_req;
uint32_t intr_info;
uint32_t error_code;
struct vcpu_arch * arch_vcpu = &vcpu->arch_vcpu;
uint64_t *pending_req_bits = &arch_vcpu->pending_req;
if (bitmap_test_and_clear(ACRN_REQUEST_TRP_FAULT, pending_req_bits)) {
pr_fatal("Triple fault happen -> shutdown!");
@ -377,22 +381,24 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
invept(vcpu);
if (bitmap_test_and_clear(ACRN_REQUEST_VPID_FLUSH, pending_req_bits))
flush_vpid_single(vcpu->arch_vcpu.vpid);
flush_vpid_single(arch_vcpu->vpid);
if (bitmap_test_and_clear(ACRN_REQUEST_TMR_UPDATE, pending_req_bits))
vioapic_update_tmr(vcpu);
/* handling cancelled event injection when vcpu is switched out */
if (vcpu->arch_vcpu.inject_event_pending) {
if ((vcpu->arch_vcpu.inject_info.intr_info &
(EXCEPTION_ERROR_CODE_VALID << 8)) != 0U)
if (arch_vcpu->inject_event_pending) {
if ((arch_vcpu->inject_info.intr_info &
(EXCEPTION_ERROR_CODE_VALID << 8U)) != 0U) {
error_code = arch_vcpu->inject_info.error_code;
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
vcpu->arch_vcpu.inject_info.error_code);
error_code);
}
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
vcpu->arch_vcpu.inject_info.intr_info);
intr_info = arch_vcpu->inject_info.intr_info;
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, intr_info);
vcpu->arch_vcpu.inject_event_pending = false;
arch_vcpu->inject_event_pending = false;
goto INTR_WIN;
}
@ -417,14 +423,14 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
* - external interrupt, if IF clear, will keep in IDT_VEC_INFO_FIELD
* at next vm exit?
*/
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
if ((arch_vcpu->idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
vcpu->arch_vcpu.idt_vectoring_info);
arch_vcpu->idt_vectoring_info);
goto INTR_WIN;
}
/* Guest interruptable or not */
if (is_guest_irq_enabled(vcpu) != 0) {
if (is_guest_irq_enabled(vcpu)) {
/* Inject external interrupt first */
if (bitmap_test_and_clear(ACRN_REQUEST_EXTINT,
pending_req_bits)) {
@ -451,10 +457,10 @@ INTR_WIN:
intr_pending = vcpu_pending_request(vcpu);
/* Enable interrupt window exiting if pending */
if (intr_pending && vcpu->arch_vcpu.irq_window_enabled == 0U) {
vcpu->arch_vcpu.irq_window_enabled = 1U;
if (intr_pending && arch_vcpu->irq_window_enabled == 0U) {
arch_vcpu->irq_window_enabled = 1U;
tmp = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS);
tmp |= (VMX_PROCBASED_CTLS_IRQ_WIN);
tmp |= VMX_PROCBASED_CTLS_IRQ_WIN;
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, tmp);
}
@ -481,7 +487,7 @@ void cancel_event_injection(struct vcpu *vcpu)
exec_vmread32(VMX_ENTRY_EXCEPTION_ERROR_CODE);
vcpu->arch_vcpu.inject_info.intr_info = intinfo;
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0UL);
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0U);
}
}

View File

@ -186,7 +186,7 @@ int vmexit_handler(struct vcpu *vcpu)
}
/* Calculate basic exit reason (low 16-bits) */
basic_exit_reason = vcpu->arch_vcpu.exit_reason & 0xFFFFU;
basic_exit_reason = (uint16_t)(vcpu->arch_vcpu.exit_reason & 0xFFFFU);
/* Log details for exit */
pr_dbg("Exit Reason: 0x%016llx ", vcpu->arch_vcpu.exit_reason);

View File

@ -637,8 +637,8 @@ static void init_guest_state(struct vcpu *vcpu)
/* AP is initialized with real mode
* and CS value is left shift 8 bits from sipi vector.
*/
sel = vcpu->arch_vcpu.sipi_vector << 8U;
base = sel << 4U;
sel = (uint16_t)(vcpu->arch_vcpu.sipi_vector << 8U);
base = (uint64_t)sel << 4U;
}
limit = 0xffffU;
access = REAL_MODE_CODE_SEG_AR;
@ -972,7 +972,7 @@ static void init_guest_state(struct vcpu *vcpu)
exec_vmwrite32(field, value32);
pr_dbg("VMX_GUEST_SMBASE: 0x%x ", value32);
value32 = msr_read(MSR_IA32_SYSENTER_CS) & 0xFFFFFFFFU;
value32 = ((uint32_t)msr_read(MSR_IA32_SYSENTER_CS) & 0xFFFFFFFFU);
field = VMX_GUEST_IA32_SYSENTER_CS;
exec_vmwrite32(field, value32);
pr_dbg("VMX_GUEST_IA32_SYSENTER_CS: 0x%x ",
@ -1129,7 +1129,7 @@ static void init_host_state(__unused struct vcpu *vcpu)
exec_vmwrite(field, idtb.base);
pr_dbg("VMX_HOST_IDTR_BASE: 0x%x ", idtb.base);
value32 = msr_read(MSR_IA32_SYSENTER_CS) & 0xFFFFFFFFU;
value32 = (uint32_t)(msr_read(MSR_IA32_SYSENTER_CS) & 0xFFFFFFFFUL);
field = VMX_HOST_IA32_SYSENTER_CS;
exec_vmwrite32(field, value32);
pr_dbg("VMX_HOST_IA32_SYSENTER_CS: 0x%x ",
@ -1235,7 +1235,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
/* These are bits 1,4-6,8,13-16, and 26, the corresponding bits of
* the IA32_VMX_PROCBASED_CTRLS MSR are always read as 1 --- A.3.2
*/
value32 = msr_read(MSR_IA32_VMX_PROCBASED_CTLS);
value32 = (uint32_t)msr_read(MSR_IA32_VMX_PROCBASED_CTLS);
value32 |= (VMX_PROCBASED_CTLS_TSC_OFF |
/* VMX_PROCBASED_CTLS_RDTSC | */
VMX_PROCBASED_CTLS_IO_BITMAP |
@ -1267,7 +1267,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
* 24.6.2. Set up for: * Enable EPT * Enable RDTSCP * Unrestricted
* guest (optional)
*/
value32 = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
value32 = (uint32_t)msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
value32 |= (VMX_PROCBASED_CTLS2_EPT |
VMX_PROCBASED_CTLS2_RDTSCP |
VMX_PROCBASED_CTLS2_UNRESTRICT);
@ -1465,7 +1465,7 @@ static void init_exit_ctrl(__unused struct vcpu *vcpu)
* Enable saving and loading of IA32_PAT and IA32_EFER on VMEXIT Enable
* saving of pre-emption timer on VMEXIT
*/
value32 = msr_read(MSR_IA32_VMX_EXIT_CTLS);
value32 = (uint32_t)msr_read(MSR_IA32_VMX_EXIT_CTLS);
value32 |= (VMX_EXIT_CTLS_ACK_IRQ |
VMX_EXIT_CTLS_SAVE_PAT |
VMX_EXIT_CTLS_LOAD_PAT |

View File

@ -18,34 +18,34 @@ int cpuid_vmexit_handler(struct vcpu *vcpu);
int cr_access_vmexit_handler(struct vcpu *vcpu);
#define VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, MSB, LSB) \
(exit_qual & (((1UL << (MSB+1))-1) - ((1UL << (LSB))-1)))
(exit_qual & (((1UL << (MSB+1U))-1UL) - ((1UL << (LSB))-1UL)))
/* MACROs to access Control-Register Info using exit qualification field */
#define VM_EXIT_CR_ACCESS_CR_NUM(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3, 0) >> 0)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3U, 0U) >> 0U)
#define VM_EXIT_CR_ACCESS_ACCESS_TYPE(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5, 4) >> 4)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5U, 4U) >> 4U)
#define VM_EXIT_CR_ACCESS_LMSW_OP(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6, 6) >> 6)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6U, 6U) >> 6U)
#define VM_EXIT_CR_ACCESS_REG_IDX(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 11, 8) >> 8)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 11U, 8U) >> 8U)
#define VM_EXIT_CR_ACCESS_LMSW_SRC_DATE(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31, 16) >> 16)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31U, 16U) >> 16U)
/* MACROs to access IO Access Info using exit qualification field */
#define VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 2, 0) >> 0)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 2U, 0U) >> 0U)
#define VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3, 3) >> 3)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3U, 3U) >> 3U)
#define VM_EXIT_IO_INSTRUCTION_IS_STRING(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 4, 4) >> 4)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 4U, 4U) >> 4U)
#define VM_EXIT_IO_INSTRUCTION_IS_REP_PREFIXED(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5, 5) >> 5)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5U, 5U) >> 5U)
#define VM_EXIT_IO_INSTRUCTION_IS_OPERAND_ENCODING(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6, 6) >> 6)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6U, 6U) >> 6U)
#define VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31, 16) >> 16)
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31U, 16U) >> 16U)
#ifdef HV_DEBUG
void get_vmexit_profile(char *str, int str_max);