hv: treewide: convert some MACROs to inline functions

MISRA-C requires that each parameter in the MACRO shall be in brackets.

In some cases, adding brackets for all of the parameters may not be a
perfect solution.
For example, it may affect the code readability when there are many
parameters used in the MACRO.
And duplicated brackets will appear when one MACRO called another MACRO
which is using same parameters.

This patch convert some MACROs to inline functions to avoid such cases.

v1 -> v2:
 * Remove the unnecessary changes in hypervisor/bsp/uefi/efi/boot.h

Tracked-On: #861
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
Shiqing Gao
2018-09-03 16:24:20 +08:00
committed by lijinxia
parent 37fd3871b7
commit 67038794af
6 changed files with 116 additions and 76 deletions

View File

@@ -349,22 +349,22 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
}
/* Read MSR */
#define CPU_MSR_READ(reg, msr_val_ptr) \
{ \
uint32_t msrl, msrh; \
asm volatile (" rdmsr ":"=a"(msrl), \
"=d"(msrh) : "c" (reg)); \
*msr_val_ptr = ((uint64_t)msrh<<32) | msrl; \
static inline void cpu_msr_read(uint32_t reg, uint64_t *msr_val_ptr)
{
uint32_t msrl, msrh;
asm volatile (" rdmsr ":"=a"(msrl), "=d"(msrh) : "c" (reg));
*msr_val_ptr = ((uint64_t)msrh << 32U) | msrl;
}
/* Write MSR */
#define CPU_MSR_WRITE(reg, msr_val) \
{ \
uint32_t msrl, msrh; \
msrl = (uint32_t)msr_val; \
msrh = (uint32_t)(msr_val >> 32); \
asm volatile (" wrmsr " : : "c" (reg), \
"a" (msrl), "d" (msrh)); \
static inline void cpu_msr_write(uint32_t reg, uint64_t msr_val)
{
uint32_t msrl, msrh;
msrl = (uint32_t)msr_val;
msrh = (uint32_t)(msr_val >> 32U);
asm volatile (" wrmsr " : : "c" (reg), "a" (msrl), "d" (msrh));
}
#ifdef CONFIG_PARTITION_MODE
@@ -388,10 +388,11 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
#endif
/* This macro writes the stack pointer. */
#define CPU_SP_WRITE(stack_ptr) \
{ \
uint64_t rsp = (uint64_t)stack_ptr & ~(CPU_STACK_ALIGN - 1UL); \
asm volatile ("movq %0, %%rsp" : : "r"(rsp)); \
static inline void cpu_sp_write(uint64_t *stack_ptr)
{
uint64_t rsp = (uint64_t)stack_ptr & ~(CPU_STACK_ALIGN - 1UL);
asm volatile ("movq %0, %%rsp" : : "r"(rsp));
}
/* Synchronizes all read accesses from memory */
@@ -419,12 +420,13 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
}
/* Read time-stamp counter / processor ID */
#define CPU_RDTSCP_EXECUTE(timestamp_ptr, cpu_id_ptr) \
{ \
uint32_t tsl, tsh; \
asm volatile ("rdtscp":"=a"(tsl), "=d"(tsh), \
"=c"(*cpu_id_ptr)); \
*timestamp_ptr = ((uint64_t)tsh << 32) | tsl; \
static inline void
cpu_rdtscp_execute(uint64_t *timestamp_ptr, uint32_t *cpu_id_ptr)
{
uint32_t tsl, tsh;
asm volatile ("rdtscp":"=a"(tsl), "=d"(tsh), "=c"(*cpu_id_ptr));
*timestamp_ptr = ((uint64_t)tsh << 32U) | tsl;
}
/* Macro to save rflags register */
@@ -500,21 +502,19 @@ static inline uint64_t cpu_rbp_get(void)
return ret;
}
static inline uint64_t
msr_read(uint32_t reg_num)
{
uint64_t msr_val;
CPU_MSR_READ(reg_num, &msr_val);
cpu_msr_read(reg_num, &msr_val);
return msr_val;
}
static inline void
msr_write(uint32_t reg_num, uint64_t value64)
{
CPU_MSR_WRITE(reg_num, value64);
cpu_msr_write(reg_num, value64);
}
static inline void

View File

@@ -17,35 +17,72 @@ int vmcall_vmexit_handler(struct vcpu *vcpu);
int cpuid_vmexit_handler(struct vcpu *vcpu);
int cr_access_vmexit_handler(struct vcpu *vcpu);
#define VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, MSB, LSB) \
(exit_qual & (((1UL << (MSB+1U))-1UL) - ((1UL << (LSB))-1UL)))
static inline uint64_t
vm_exit_qualification_bit_mask(uint64_t exit_qual, uint32_t msb, uint32_t lsb)
{
return (exit_qual &
(((1UL << (msb + 1U)) - 1UL) - ((1UL << lsb) - 1UL)));
}
/* access Control-Register Info using exit qualification field */
static inline uint64_t vm_exit_cr_access_cr_num(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 3U, 0U) >> 0U);
}
/* MACROs to access Control-Register Info using exit qualification field */
#define VM_EXIT_CR_ACCESS_CR_NUM(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3U, 0U) >> 0U)
#define VM_EXIT_CR_ACCESS_ACCESS_TYPE(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5U, 4U) >> 4U)
#define VM_EXIT_CR_ACCESS_LMSW_OP(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6U, 6U) >> 6U)
#define VM_EXIT_CR_ACCESS_REG_IDX(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 11U, 8U) >> 8U)
#define VM_EXIT_CR_ACCESS_LMSW_SRC_DATE(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31U, 16U) >> 16U)
static inline uint64_t vm_exit_cr_access_type(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 5U, 4U) >> 4U);
}
/* MACROs to access IO Access Info using exit qualification field */
#define VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 2U, 0U) >> 0U)
#define VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3U, 3U) >> 3U)
#define VM_EXIT_IO_INSTRUCTION_IS_STRING(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 4U, 4U) >> 4U)
#define VM_EXIT_IO_INSTRUCTION_IS_REP_PREFIXED(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5U, 5U) >> 5U)
#define VM_EXIT_IO_INSTRUCTION_IS_OPERAND_ENCODING(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6U, 6U) >> 6U)
#define VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual) \
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31U, 16U) >> 16U)
static inline uint64_t vm_exit_cr_access_lmsw_op(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
}
static inline uint64_t vm_exit_cr_access_reg_idx(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 11U, 8U) >> 8U);
}
static inline uint64_t vm_exit_cr_access_lmsw_src_date(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
}
/* access IO Access Info using exit qualification field */
static inline uint64_t vm_exit_io_instruction_size(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 2U, 0U) >> 0U);
}
static inline uint64_t
vm_exit_io_instruction_access_direction(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 3U, 3U) >> 3U);
}
static inline uint64_t vm_exit_io_instruction_is_string(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 4U, 4U) >> 4U);
}
static inline uint64_t
vm_exit_io_instruction_is_rep_prefixed(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 5U, 5U) >> 5U);
}
static inline uint64_t
vm_exit_io_instruction_is_operand_encoding(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
}
static inline uint64_t vm_exit_io_instruction_port_number(uint64_t exit_qual)
{
return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
}
#ifdef HV_DEBUG
void get_vmexit_profile(char *str_arg, int str_max);