mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-22 05:30:24 +00:00
hv: treewide: convert some MACROs to inline functions
MISRA-C requires that each parameter in the MACRO shall be in brackets. In some cases, adding brackets for all of the parameters may not be a perfect solution. For example, it may affect the code readability when there are many parameters used in the MACRO. And duplicated brackets will appear when one MACRO called another MACRO which is using same parameters. This patch convert some MACROs to inline functions to avoid such cases. v1 -> v2: * Remove the unnecessary changes in hypervisor/bsp/uefi/efi/boot.h Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
parent
37fd3871b7
commit
67038794af
@ -23,8 +23,11 @@ uint64_t get_microcode_version(void)
|
||||
* According to SDM vol 3 Table 9-7. If data_size field of uCode
|
||||
* header is zero, the ucode length is 2000
|
||||
*/
|
||||
#define UCODE_GET_DATA_SIZE(uhdr) \
|
||||
((uhdr.data_size != 0U) ? uhdr.data_size : 2000U)
|
||||
static inline size_t get_ucode_data_size(struct ucode_header *uhdr)
|
||||
{
|
||||
return ((uhdr->data_size != 0U) ? uhdr->data_size : 2000U);
|
||||
}
|
||||
|
||||
void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
|
||||
{
|
||||
uint64_t gva, fault_addr;
|
||||
@ -47,7 +50,7 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
|
||||
return;
|
||||
}
|
||||
|
||||
data_size = UCODE_GET_DATA_SIZE(uhdr) + sizeof(struct ucode_header);
|
||||
data_size = get_ucode_data_size(&uhdr) + sizeof(struct ucode_header);
|
||||
data_page_num =
|
||||
((data_size + CPU_PAGE_SIZE) - 1U) >> CPU_PAGE_SHIFT;
|
||||
|
||||
|
@ -336,9 +336,9 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
|
||||
exit_qual = vcpu->arch_vcpu.exit_qualification;
|
||||
|
||||
io_req->type = REQ_PORTIO;
|
||||
pio_req->size = VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) + 1UL;
|
||||
pio_req->address = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual);
|
||||
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) == 0UL) {
|
||||
pio_req->size = vm_exit_io_instruction_size(exit_qual) + 1UL;
|
||||
pio_req->address = vm_exit_io_instruction_port_number(exit_qual);
|
||||
if (vm_exit_io_instruction_access_direction(exit_qual) == 0UL) {
|
||||
pio_req->direction = REQUEST_WRITE;
|
||||
pio_req->value = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RAX);
|
||||
} else {
|
||||
|
@ -264,23 +264,26 @@ int cpuid_vmexit_handler(struct vcpu *vcpu)
|
||||
int cr_access_vmexit_handler(struct vcpu *vcpu)
|
||||
{
|
||||
uint64_t reg;
|
||||
int idx = VM_EXIT_CR_ACCESS_REG_IDX(vcpu->arch_vcpu.exit_qualification);
|
||||
uint32_t idx;
|
||||
uint64_t exit_qual;
|
||||
|
||||
ASSERT((idx>=0) && (idx<=15), "index out of range");
|
||||
exit_qual = vcpu->arch_vcpu.exit_qualification;
|
||||
idx = (uint32_t)vm_exit_cr_access_reg_idx(exit_qual);
|
||||
|
||||
ASSERT((idx <= 15U), "index out of range");
|
||||
reg = vcpu_get_gpreg(vcpu, idx);
|
||||
|
||||
switch ((VM_EXIT_CR_ACCESS_ACCESS_TYPE
|
||||
(vcpu->arch_vcpu.exit_qualification) << 4) |
|
||||
VM_EXIT_CR_ACCESS_CR_NUM(vcpu->arch_vcpu.exit_qualification)) {
|
||||
case 0x00U:
|
||||
switch ((vm_exit_cr_access_type(exit_qual) << 4U) |
|
||||
vm_exit_cr_access_cr_num(exit_qual)) {
|
||||
case 0x00UL:
|
||||
/* mov to cr0 */
|
||||
vcpu_set_cr0(vcpu, reg);
|
||||
break;
|
||||
case 0x04U:
|
||||
case 0x04UL:
|
||||
/* mov to cr4 */
|
||||
vcpu_set_cr4(vcpu, reg);
|
||||
break;
|
||||
case 0x08U:
|
||||
case 0x08UL:
|
||||
/* mov to cr8 */
|
||||
/* According to SDM 6.15 "Exception and interrupt Reference":
|
||||
*
|
||||
@ -293,7 +296,7 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
|
||||
}
|
||||
vlapic_set_cr8(vcpu->arch_vcpu.vlapic, reg);
|
||||
break;
|
||||
case 0x18U:
|
||||
case 0x18UL:
|
||||
/* mov from cr8 */
|
||||
reg = vlapic_get_cr8(vcpu->arch_vcpu.vlapic);
|
||||
vcpu_set_gpreg(vcpu, idx, reg);
|
||||
@ -303,11 +306,8 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
TRACE_2L(TRACE_VMEXIT_CR_ACCESS,
|
||||
VM_EXIT_CR_ACCESS_ACCESS_TYPE
|
||||
(vcpu->arch_vcpu.exit_qualification),
|
||||
VM_EXIT_CR_ACCESS_CR_NUM
|
||||
(vcpu->arch_vcpu.exit_qualification));
|
||||
TRACE_2L(TRACE_VMEXIT_CR_ACCESS, vm_exit_cr_access_type(exit_qual),
|
||||
vm_exit_cr_access_cr_num(exit_qual));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ void vcpu_thread(struct vcpu *vcpu)
|
||||
|
||||
/* Restore guest TSC_AUX */
|
||||
if (vcpu->launched) {
|
||||
CPU_MSR_WRITE(MSR_IA32_TSC_AUX,
|
||||
cpu_msr_write(MSR_IA32_TSC_AUX,
|
||||
vcpu->msr_tsc_aux_guest);
|
||||
}
|
||||
|
||||
@ -87,9 +87,9 @@ void vcpu_thread(struct vcpu *vcpu)
|
||||
|
||||
vcpu->arch_vcpu.nrexits++;
|
||||
/* Save guest TSC_AUX */
|
||||
CPU_MSR_READ(MSR_IA32_TSC_AUX, &vcpu->msr_tsc_aux_guest);
|
||||
cpu_msr_read(MSR_IA32_TSC_AUX, &vcpu->msr_tsc_aux_guest);
|
||||
/* Restore native TSC_AUX */
|
||||
CPU_MSR_WRITE(MSR_IA32_TSC_AUX, tsc_aux_hyp_cpu);
|
||||
cpu_msr_write(MSR_IA32_TSC_AUX, tsc_aux_hyp_cpu);
|
||||
|
||||
CPU_IRQ_ENABLE();
|
||||
/* Dispatch handler */
|
||||
|
@ -349,22 +349,22 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
|
||||
}
|
||||
|
||||
/* Read MSR */
|
||||
#define CPU_MSR_READ(reg, msr_val_ptr) \
|
||||
{ \
|
||||
uint32_t msrl, msrh; \
|
||||
asm volatile (" rdmsr ":"=a"(msrl), \
|
||||
"=d"(msrh) : "c" (reg)); \
|
||||
*msr_val_ptr = ((uint64_t)msrh<<32) | msrl; \
|
||||
static inline void cpu_msr_read(uint32_t reg, uint64_t *msr_val_ptr)
|
||||
{
|
||||
uint32_t msrl, msrh;
|
||||
|
||||
asm volatile (" rdmsr ":"=a"(msrl), "=d"(msrh) : "c" (reg));
|
||||
*msr_val_ptr = ((uint64_t)msrh << 32U) | msrl;
|
||||
}
|
||||
|
||||
/* Write MSR */
|
||||
#define CPU_MSR_WRITE(reg, msr_val) \
|
||||
{ \
|
||||
uint32_t msrl, msrh; \
|
||||
msrl = (uint32_t)msr_val; \
|
||||
msrh = (uint32_t)(msr_val >> 32); \
|
||||
asm volatile (" wrmsr " : : "c" (reg), \
|
||||
"a" (msrl), "d" (msrh)); \
|
||||
static inline void cpu_msr_write(uint32_t reg, uint64_t msr_val)
|
||||
{
|
||||
uint32_t msrl, msrh;
|
||||
|
||||
msrl = (uint32_t)msr_val;
|
||||
msrh = (uint32_t)(msr_val >> 32U);
|
||||
asm volatile (" wrmsr " : : "c" (reg), "a" (msrl), "d" (msrh));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARTITION_MODE
|
||||
@ -388,10 +388,11 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
|
||||
#endif
|
||||
|
||||
/* This macro writes the stack pointer. */
|
||||
#define CPU_SP_WRITE(stack_ptr) \
|
||||
{ \
|
||||
uint64_t rsp = (uint64_t)stack_ptr & ~(CPU_STACK_ALIGN - 1UL); \
|
||||
asm volatile ("movq %0, %%rsp" : : "r"(rsp)); \
|
||||
static inline void cpu_sp_write(uint64_t *stack_ptr)
|
||||
{
|
||||
uint64_t rsp = (uint64_t)stack_ptr & ~(CPU_STACK_ALIGN - 1UL);
|
||||
|
||||
asm volatile ("movq %0, %%rsp" : : "r"(rsp));
|
||||
}
|
||||
|
||||
/* Synchronizes all read accesses from memory */
|
||||
@ -419,12 +420,13 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
|
||||
}
|
||||
|
||||
/* Read time-stamp counter / processor ID */
|
||||
#define CPU_RDTSCP_EXECUTE(timestamp_ptr, cpu_id_ptr) \
|
||||
{ \
|
||||
uint32_t tsl, tsh; \
|
||||
asm volatile ("rdtscp":"=a"(tsl), "=d"(tsh), \
|
||||
"=c"(*cpu_id_ptr)); \
|
||||
*timestamp_ptr = ((uint64_t)tsh << 32) | tsl; \
|
||||
static inline void
|
||||
cpu_rdtscp_execute(uint64_t *timestamp_ptr, uint32_t *cpu_id_ptr)
|
||||
{
|
||||
uint32_t tsl, tsh;
|
||||
|
||||
asm volatile ("rdtscp":"=a"(tsl), "=d"(tsh), "=c"(*cpu_id_ptr));
|
||||
*timestamp_ptr = ((uint64_t)tsh << 32U) | tsl;
|
||||
}
|
||||
|
||||
/* Macro to save rflags register */
|
||||
@ -500,21 +502,19 @@ static inline uint64_t cpu_rbp_get(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline uint64_t
|
||||
msr_read(uint32_t reg_num)
|
||||
{
|
||||
uint64_t msr_val;
|
||||
|
||||
CPU_MSR_READ(reg_num, &msr_val);
|
||||
cpu_msr_read(reg_num, &msr_val);
|
||||
return msr_val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
msr_write(uint32_t reg_num, uint64_t value64)
|
||||
{
|
||||
CPU_MSR_WRITE(reg_num, value64);
|
||||
cpu_msr_write(reg_num, value64);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -17,35 +17,72 @@ int vmcall_vmexit_handler(struct vcpu *vcpu);
|
||||
int cpuid_vmexit_handler(struct vcpu *vcpu);
|
||||
int cr_access_vmexit_handler(struct vcpu *vcpu);
|
||||
|
||||
#define VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, MSB, LSB) \
|
||||
(exit_qual & (((1UL << (MSB+1U))-1UL) - ((1UL << (LSB))-1UL)))
|
||||
static inline uint64_t
|
||||
vm_exit_qualification_bit_mask(uint64_t exit_qual, uint32_t msb, uint32_t lsb)
|
||||
{
|
||||
return (exit_qual &
|
||||
(((1UL << (msb + 1U)) - 1UL) - ((1UL << lsb) - 1UL)));
|
||||
}
|
||||
|
||||
/* access Control-Register Info using exit qualification field */
|
||||
static inline uint64_t vm_exit_cr_access_cr_num(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 3U, 0U) >> 0U);
|
||||
}
|
||||
|
||||
/* MACROs to access Control-Register Info using exit qualification field */
|
||||
#define VM_EXIT_CR_ACCESS_CR_NUM(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3U, 0U) >> 0U)
|
||||
#define VM_EXIT_CR_ACCESS_ACCESS_TYPE(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5U, 4U) >> 4U)
|
||||
#define VM_EXIT_CR_ACCESS_LMSW_OP(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6U, 6U) >> 6U)
|
||||
#define VM_EXIT_CR_ACCESS_REG_IDX(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 11U, 8U) >> 8U)
|
||||
#define VM_EXIT_CR_ACCESS_LMSW_SRC_DATE(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31U, 16U) >> 16U)
|
||||
static inline uint64_t vm_exit_cr_access_type(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 5U, 4U) >> 4U);
|
||||
}
|
||||
|
||||
/* MACROs to access IO Access Info using exit qualification field */
|
||||
#define VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 2U, 0U) >> 0U)
|
||||
#define VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3U, 3U) >> 3U)
|
||||
#define VM_EXIT_IO_INSTRUCTION_IS_STRING(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 4U, 4U) >> 4U)
|
||||
#define VM_EXIT_IO_INSTRUCTION_IS_REP_PREFIXED(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5U, 5U) >> 5U)
|
||||
#define VM_EXIT_IO_INSTRUCTION_IS_OPERAND_ENCODING(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6U, 6U) >> 6U)
|
||||
#define VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual) \
|
||||
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31U, 16U) >> 16U)
|
||||
static inline uint64_t vm_exit_cr_access_lmsw_op(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
|
||||
}
|
||||
|
||||
static inline uint64_t vm_exit_cr_access_reg_idx(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 11U, 8U) >> 8U);
|
||||
}
|
||||
|
||||
static inline uint64_t vm_exit_cr_access_lmsw_src_date(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
|
||||
}
|
||||
|
||||
/* access IO Access Info using exit qualification field */
|
||||
static inline uint64_t vm_exit_io_instruction_size(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 2U, 0U) >> 0U);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
vm_exit_io_instruction_access_direction(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 3U, 3U) >> 3U);
|
||||
}
|
||||
|
||||
static inline uint64_t vm_exit_io_instruction_is_string(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 4U, 4U) >> 4U);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
vm_exit_io_instruction_is_rep_prefixed(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 5U, 5U) >> 5U);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
vm_exit_io_instruction_is_operand_encoding(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
|
||||
}
|
||||
|
||||
static inline uint64_t vm_exit_io_instruction_port_number(uint64_t exit_qual)
|
||||
{
|
||||
return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
|
||||
}
|
||||
|
||||
#ifdef HV_DEBUG
|
||||
void get_vmexit_profile(char *str_arg, int str_max);
|
||||
|
Loading…
Reference in New Issue
Block a user