mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-23 09:47:44 +00:00
hv: treewide: fix 'Use of function like macro'
- convert function like macros to inline functions based on MISRA-C requirement - remove some unused and duplicated macros Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
@@ -243,12 +243,6 @@ extern spinlock_t trampoline_spinlock;
|
||||
* to locate the per cpu data.
|
||||
*/
|
||||
|
||||
#define PER_CPU_DATA_OFFSET(sym_addr) \
|
||||
((uint64_t)(sym_addr) - (uint64_t)(_ld_cpu_data_start))
|
||||
|
||||
#define PER_CPU_DATA_SIZE \
|
||||
((uint64_t)_ld_cpu_data_end - (uint64_t)(_ld_cpu_data_start))
|
||||
|
||||
/* CPUID feature words */
|
||||
#define FEAT_1_ECX 0U /* CPUID[1].ECX */
|
||||
#define FEAT_1_EDX 1U /* CPUID[1].EDX */
|
||||
|
@@ -254,7 +254,11 @@ struct vcpu_dump {
|
||||
int str_max;
|
||||
};
|
||||
|
||||
#define is_vcpu_bsp(vcpu) ((vcpu)->vcpu_id == BOOT_CPU_ID)
|
||||
static inline bool is_vcpu_bsp(struct vcpu *vcpu)
|
||||
{
|
||||
return (vcpu->vcpu_id == BOOT_CPU_ID);
|
||||
}
|
||||
|
||||
/* do not update Guest RIP for next VM Enter */
|
||||
static inline void vcpu_retain_rip(struct vcpu *vcpu)
|
||||
{
|
||||
|
@@ -43,9 +43,15 @@
|
||||
#define IA32E_REF_MASK \
|
||||
(boot_cpu_data.physical_address_mask)
|
||||
|
||||
#define ROUND_PAGE_UP(addr) \
|
||||
((((addr) + (uint64_t)CPU_PAGE_SIZE) - 1UL) & CPU_PAGE_MASK)
|
||||
#define ROUND_PAGE_DOWN(addr) ((addr) & CPU_PAGE_MASK)
|
||||
static inline uint64_t round_page_up(uint64_t addr)
|
||||
{
|
||||
return (((addr + (uint64_t)CPU_PAGE_SIZE) - 1UL) & CPU_PAGE_MASK);
|
||||
}
|
||||
|
||||
static inline uint64_t round_page_down(uint64_t addr)
|
||||
{
|
||||
return (addr & CPU_PAGE_MASK);
|
||||
}
|
||||
|
||||
enum _page_table_type {
|
||||
PTT_PRIMARY = 0, /* Mapping for hypervisor */
|
||||
@@ -102,12 +108,12 @@ struct e820_entry {
|
||||
#pragma pack()
|
||||
|
||||
/* E820 memory types */
|
||||
#define E820_TYPE_RAM 1U /* EFI 1, 2, 3, 4, 5, 6, 7 */
|
||||
#define E820_TYPE_RESERVED 2U
|
||||
#define E820_TYPE_RAM 1U /* EFI 1, 2, 3, 4, 5, 6, 7 */
|
||||
#define E820_TYPE_RESERVED 2U
|
||||
/* EFI 0, 11, 12, 13 (everything not used elsewhere) */
|
||||
#define E820_TYPE_ACPI_RECLAIM 3U /* EFI 9 */
|
||||
#define E820_TYPE_ACPI_NVS 4U /* EFI 10 */
|
||||
#define E820_TYPE_UNUSABLE 5U /* EFI 8 */
|
||||
#define E820_TYPE_ACPI_RECLAIM 3U /* EFI 9 */
|
||||
#define E820_TYPE_ACPI_NVS 4U /* EFI 10 */
|
||||
#define E820_TYPE_UNUSABLE 5U /* EFI 8 */
|
||||
|
||||
/** Calculates the page table address for a given address.
|
||||
*
|
||||
@@ -122,9 +128,9 @@ static inline void *mmu_pt_for_pde(uint32_t *pd, uint32_t vaddr)
|
||||
return pd + (((vaddr >> 22U) + 1U) * 1024U);
|
||||
}
|
||||
|
||||
#define CACHE_FLUSH_INVALIDATE_ALL() \
|
||||
{ \
|
||||
asm volatile (" wbinvd\n" : : : "memory"); \
|
||||
static inline void cache_flush_invalidate_all(void)
|
||||
{
|
||||
asm volatile (" wbinvd\n" : : : "memory");
|
||||
}
|
||||
|
||||
static inline void clflush(volatile void *p)
|
||||
@@ -133,20 +139,19 @@ static inline void clflush(volatile void *p)
|
||||
}
|
||||
|
||||
/* External Interfaces */
|
||||
void destroy_ept(struct vm *vm);
|
||||
uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa);
|
||||
uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size);
|
||||
uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa);
|
||||
void destroy_ept(struct vm *vm);
|
||||
uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa);
|
||||
uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size);
|
||||
uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa);
|
||||
int ept_mr_add(const struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
|
||||
uint64_t gpa, uint64_t size, uint64_t prot_orig);
|
||||
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr);
|
||||
int ept_mr_del(const struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size);
|
||||
uint64_t gpa, uint64_t size, uint64_t prot_orig);
|
||||
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
uint64_t size, uint64_t prot_set, uint64_t prot_clr);
|
||||
int ept_mr_del(const struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
uint64_t size);
|
||||
void free_ept_mem(uint64_t *pml4_page);
|
||||
int ept_violation_vmexit_handler(struct vcpu *vcpu);
|
||||
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu);
|
||||
int ept_violation_vmexit_handler(struct vcpu *vcpu);
|
||||
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu);
|
||||
|
||||
#endif /* ASSEMBLER not defined */
|
||||
|
||||
|
@@ -509,12 +509,15 @@
|
||||
#define PAT_MEM_TYPE_WP 0x05UL /* write protected */
|
||||
#define PAT_MEM_TYPE_WB 0x06UL /* writeback */
|
||||
#define PAT_MEM_TYPE_UCM 0x07UL /* uncached minus */
|
||||
#define PAT_MEM_TYPE_INVALID(x) (((x) != PAT_MEM_TYPE_UC) && \
|
||||
((x) != PAT_MEM_TYPE_WC) && \
|
||||
((x) != PAT_MEM_TYPE_WT) && \
|
||||
((x) != PAT_MEM_TYPE_WP) && \
|
||||
((x) != PAT_MEM_TYPE_WB) && \
|
||||
((x) != PAT_MEM_TYPE_UCM))
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
static inline bool pat_mem_type_invalid(uint64_t x)
|
||||
{
|
||||
return ((x != PAT_MEM_TYPE_UC) && (x != PAT_MEM_TYPE_WC) &&
|
||||
(x != PAT_MEM_TYPE_WT) && (x != PAT_MEM_TYPE_WP) &&
|
||||
(x != PAT_MEM_TYPE_WB) && (x != PAT_MEM_TYPE_UCM));
|
||||
}
|
||||
#endif /* ASSEMBLER */
|
||||
|
||||
/* 5 high-order bits in every field are reserved */
|
||||
#define PAT_FIELD_RSV_BITS (0xF8U)
|
||||
|
@@ -58,7 +58,7 @@
|
||||
#define VMX_EOI_EXIT2_HIGH 0x00002021U
|
||||
#define VMX_EOI_EXIT3_FULL 0x00002022U
|
||||
#define VMX_EOI_EXIT3_HIGH 0x00002023U
|
||||
#define VMX_EOI_EXIT(vector) (VMX_EOI_EXIT0_FULL + (((vector) >> 6U) * 2U))
|
||||
|
||||
#define VMX_XSS_EXITING_BITMAP_FULL 0x0000202CU
|
||||
#define VMX_XSS_EXITING_BITMAP_HIGH 0x0000202DU
|
||||
/* 64-bit read-only data fields */
|
||||
@@ -374,7 +374,19 @@
|
||||
#define VMX_INT_TYPE_HW_EXP 3U
|
||||
#define VMX_INT_TYPE_SW_EXP 6U
|
||||
|
||||
/*VM exit qulifications for APIC-access
|
||||
#define VM_SUCCESS 0
|
||||
#define VM_FAIL -1
|
||||
|
||||
#define VMX_VMENTRY_FAIL 0x80000000U
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
static inline uint32_t vmx_eoi_exit(uint32_t vector)
|
||||
{
|
||||
return (VMX_EOI_EXIT0_FULL + ((vector >> 6U) * 2U));
|
||||
}
|
||||
|
||||
/* VM exit qulifications for APIC-access
|
||||
* Access type:
|
||||
* 0 = linear access for a data read during instruction execution
|
||||
* 1 = linear access for a data write during instruction execution
|
||||
@@ -384,16 +396,15 @@
|
||||
* 15 = guest-physical access for an instructon fetch or during
|
||||
* instruction execution
|
||||
*/
|
||||
#define APIC_ACCESS_TYPE(qual) (((qual) >> 12U) & 0xFUL)
|
||||
#define APIC_ACCESS_OFFSET(qual) ((qual) & 0xFFFU)
|
||||
static inline uint64_t apic_access_type(uint64_t qual)
|
||||
{
|
||||
return ((qual >> 12U) & 0xFUL);
|
||||
}
|
||||
|
||||
|
||||
#define VM_SUCCESS 0
|
||||
#define VM_FAIL -1
|
||||
|
||||
#define VMX_VMENTRY_FAIL 0x80000000U
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
static inline uint64_t apic_access_offset(uint64_t qual)
|
||||
{
|
||||
return (qual & 0xFFFUL);
|
||||
}
|
||||
|
||||
#define RFLAGS_C (1U<<0)
|
||||
#define RFLAGS_Z (1U<<6)
|
||||
|
Reference in New Issue
Block a user