mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2026-01-04 23:24:56 +00:00
hv: treewide: fix 'Use of function like macro'
- convert function like macros to inline functions based on MISRA-C requirement - remove some unused and duplicated macros Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
@@ -243,12 +243,6 @@ extern spinlock_t trampoline_spinlock;
|
||||
* to locate the per cpu data.
|
||||
*/
|
||||
|
||||
#define PER_CPU_DATA_OFFSET(sym_addr) \
|
||||
((uint64_t)(sym_addr) - (uint64_t)(_ld_cpu_data_start))
|
||||
|
||||
#define PER_CPU_DATA_SIZE \
|
||||
((uint64_t)_ld_cpu_data_end - (uint64_t)(_ld_cpu_data_start))
|
||||
|
||||
/* CPUID feature words */
|
||||
#define FEAT_1_ECX 0U /* CPUID[1].ECX */
|
||||
#define FEAT_1_EDX 1U /* CPUID[1].EDX */
|
||||
|
||||
@@ -254,7 +254,11 @@ struct vcpu_dump {
|
||||
int str_max;
|
||||
};
|
||||
|
||||
#define is_vcpu_bsp(vcpu) ((vcpu)->vcpu_id == BOOT_CPU_ID)
|
||||
static inline bool is_vcpu_bsp(struct vcpu *vcpu)
|
||||
{
|
||||
return (vcpu->vcpu_id == BOOT_CPU_ID);
|
||||
}
|
||||
|
||||
/* do not update Guest RIP for next VM Enter */
|
||||
static inline void vcpu_retain_rip(struct vcpu *vcpu)
|
||||
{
|
||||
|
||||
@@ -43,9 +43,15 @@
|
||||
#define IA32E_REF_MASK \
|
||||
(boot_cpu_data.physical_address_mask)
|
||||
|
||||
#define ROUND_PAGE_UP(addr) \
|
||||
((((addr) + (uint64_t)CPU_PAGE_SIZE) - 1UL) & CPU_PAGE_MASK)
|
||||
#define ROUND_PAGE_DOWN(addr) ((addr) & CPU_PAGE_MASK)
|
||||
static inline uint64_t round_page_up(uint64_t addr)
|
||||
{
|
||||
return (((addr + (uint64_t)CPU_PAGE_SIZE) - 1UL) & CPU_PAGE_MASK);
|
||||
}
|
||||
|
||||
static inline uint64_t round_page_down(uint64_t addr)
|
||||
{
|
||||
return (addr & CPU_PAGE_MASK);
|
||||
}
|
||||
|
||||
enum _page_table_type {
|
||||
PTT_PRIMARY = 0, /* Mapping for hypervisor */
|
||||
@@ -102,12 +108,12 @@ struct e820_entry {
|
||||
#pragma pack()
|
||||
|
||||
/* E820 memory types */
|
||||
#define E820_TYPE_RAM 1U /* EFI 1, 2, 3, 4, 5, 6, 7 */
|
||||
#define E820_TYPE_RESERVED 2U
|
||||
#define E820_TYPE_RAM 1U /* EFI 1, 2, 3, 4, 5, 6, 7 */
|
||||
#define E820_TYPE_RESERVED 2U
|
||||
/* EFI 0, 11, 12, 13 (everything not used elsewhere) */
|
||||
#define E820_TYPE_ACPI_RECLAIM 3U /* EFI 9 */
|
||||
#define E820_TYPE_ACPI_NVS 4U /* EFI 10 */
|
||||
#define E820_TYPE_UNUSABLE 5U /* EFI 8 */
|
||||
#define E820_TYPE_ACPI_RECLAIM 3U /* EFI 9 */
|
||||
#define E820_TYPE_ACPI_NVS 4U /* EFI 10 */
|
||||
#define E820_TYPE_UNUSABLE 5U /* EFI 8 */
|
||||
|
||||
/** Calculates the page table address for a given address.
|
||||
*
|
||||
@@ -122,9 +128,9 @@ static inline void *mmu_pt_for_pde(uint32_t *pd, uint32_t vaddr)
|
||||
return pd + (((vaddr >> 22U) + 1U) * 1024U);
|
||||
}
|
||||
|
||||
#define CACHE_FLUSH_INVALIDATE_ALL() \
|
||||
{ \
|
||||
asm volatile (" wbinvd\n" : : : "memory"); \
|
||||
static inline void cache_flush_invalidate_all(void)
|
||||
{
|
||||
asm volatile (" wbinvd\n" : : : "memory");
|
||||
}
|
||||
|
||||
static inline void clflush(volatile void *p)
|
||||
@@ -133,20 +139,19 @@ static inline void clflush(volatile void *p)
|
||||
}
|
||||
|
||||
/* External Interfaces */
|
||||
void destroy_ept(struct vm *vm);
|
||||
uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa);
|
||||
uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size);
|
||||
uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa);
|
||||
void destroy_ept(struct vm *vm);
|
||||
uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa);
|
||||
uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size);
|
||||
uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa);
|
||||
int ept_mr_add(const struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
|
||||
uint64_t gpa, uint64_t size, uint64_t prot_orig);
|
||||
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr);
|
||||
int ept_mr_del(const struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size);
|
||||
uint64_t gpa, uint64_t size, uint64_t prot_orig);
|
||||
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
uint64_t size, uint64_t prot_set, uint64_t prot_clr);
|
||||
int ept_mr_del(const struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
uint64_t size);
|
||||
void free_ept_mem(uint64_t *pml4_page);
|
||||
int ept_violation_vmexit_handler(struct vcpu *vcpu);
|
||||
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu);
|
||||
int ept_violation_vmexit_handler(struct vcpu *vcpu);
|
||||
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu);
|
||||
|
||||
#endif /* ASSEMBLER not defined */
|
||||
|
||||
|
||||
@@ -509,12 +509,15 @@
|
||||
#define PAT_MEM_TYPE_WP 0x05UL /* write protected */
|
||||
#define PAT_MEM_TYPE_WB 0x06UL /* writeback */
|
||||
#define PAT_MEM_TYPE_UCM 0x07UL /* uncached minus */
|
||||
#define PAT_MEM_TYPE_INVALID(x) (((x) != PAT_MEM_TYPE_UC) && \
|
||||
((x) != PAT_MEM_TYPE_WC) && \
|
||||
((x) != PAT_MEM_TYPE_WT) && \
|
||||
((x) != PAT_MEM_TYPE_WP) && \
|
||||
((x) != PAT_MEM_TYPE_WB) && \
|
||||
((x) != PAT_MEM_TYPE_UCM))
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
static inline bool pat_mem_type_invalid(uint64_t x)
|
||||
{
|
||||
return ((x != PAT_MEM_TYPE_UC) && (x != PAT_MEM_TYPE_WC) &&
|
||||
(x != PAT_MEM_TYPE_WT) && (x != PAT_MEM_TYPE_WP) &&
|
||||
(x != PAT_MEM_TYPE_WB) && (x != PAT_MEM_TYPE_UCM));
|
||||
}
|
||||
#endif /* ASSEMBLER */
|
||||
|
||||
/* 5 high-order bits in every field are reserved */
|
||||
#define PAT_FIELD_RSV_BITS (0xF8U)
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
#define VMX_EOI_EXIT2_HIGH 0x00002021U
|
||||
#define VMX_EOI_EXIT3_FULL 0x00002022U
|
||||
#define VMX_EOI_EXIT3_HIGH 0x00002023U
|
||||
#define VMX_EOI_EXIT(vector) (VMX_EOI_EXIT0_FULL + (((vector) >> 6U) * 2U))
|
||||
|
||||
#define VMX_XSS_EXITING_BITMAP_FULL 0x0000202CU
|
||||
#define VMX_XSS_EXITING_BITMAP_HIGH 0x0000202DU
|
||||
/* 64-bit read-only data fields */
|
||||
@@ -374,7 +374,19 @@
|
||||
#define VMX_INT_TYPE_HW_EXP 3U
|
||||
#define VMX_INT_TYPE_SW_EXP 6U
|
||||
|
||||
/*VM exit qulifications for APIC-access
|
||||
#define VM_SUCCESS 0
|
||||
#define VM_FAIL -1
|
||||
|
||||
#define VMX_VMENTRY_FAIL 0x80000000U
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
static inline uint32_t vmx_eoi_exit(uint32_t vector)
|
||||
{
|
||||
return (VMX_EOI_EXIT0_FULL + ((vector >> 6U) * 2U));
|
||||
}
|
||||
|
||||
/* VM exit qulifications for APIC-access
|
||||
* Access type:
|
||||
* 0 = linear access for a data read during instruction execution
|
||||
* 1 = linear access for a data write during instruction execution
|
||||
@@ -384,16 +396,15 @@
|
||||
* 15 = guest-physical access for an instructon fetch or during
|
||||
* instruction execution
|
||||
*/
|
||||
#define APIC_ACCESS_TYPE(qual) (((qual) >> 12U) & 0xFUL)
|
||||
#define APIC_ACCESS_OFFSET(qual) ((qual) & 0xFFFU)
|
||||
static inline uint64_t apic_access_type(uint64_t qual)
|
||||
{
|
||||
return ((qual >> 12U) & 0xFUL);
|
||||
}
|
||||
|
||||
|
||||
#define VM_SUCCESS 0
|
||||
#define VM_FAIL -1
|
||||
|
||||
#define VMX_VMENTRY_FAIL 0x80000000U
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
static inline uint64_t apic_access_offset(uint64_t qual)
|
||||
{
|
||||
return (qual & 0xFFFUL);
|
||||
}
|
||||
|
||||
#define RFLAGS_C (1U<<0)
|
||||
#define RFLAGS_Z (1U<<6)
|
||||
|
||||
@@ -156,13 +156,19 @@ build_atomic_swap(atomic_swap64, "q", uint64_t, p, v)
|
||||
* #define atomic_readandclear32(P) \
|
||||
* (return (*(uint32_t *)(P)); *(uint32_t *)(P) = 0U;)
|
||||
*/
|
||||
#define atomic_readandclear32(p) atomic_swap32(p, 0U)
|
||||
static inline uint32_t atomic_readandclear32(uint32_t *p)
|
||||
{
|
||||
return atomic_swap32(p, 0U);
|
||||
}
|
||||
|
||||
/*
|
||||
* #define atomic_readandclear64(P) \
|
||||
* (return (*(uint64_t *)(P)); *(uint64_t *)(P) = 0UL;)
|
||||
*/
|
||||
#define atomic_readandclear64(p) atomic_swap64(p, 0UL)
|
||||
static inline uint64_t atomic_readandclear64(uint64_t *p)
|
||||
{
|
||||
return atomic_swap64(p, 0UL);
|
||||
}
|
||||
|
||||
#define build_atomic_cmpxchg(name, size, type, ptr, old, new) \
|
||||
static inline type name(volatile type *ptr, \
|
||||
@@ -188,19 +194,47 @@ static inline type name(type *ptr, type v) \
|
||||
return v; \
|
||||
}
|
||||
build_atomic_xadd(atomic_xadd16, "w", uint16_t, p, v)
|
||||
build_atomic_xadd(atomic_xadd32, "l", int, p, v)
|
||||
build_atomic_xadd(atomic_xadd64, "q", long, p, v)
|
||||
build_atomic_xadd(atomic_xadd32, "l", int32_t, p, v)
|
||||
build_atomic_xadd(atomic_xadd64, "q", int64_t, p, v)
|
||||
|
||||
#define atomic_add_return(p, v) ( atomic_xadd32(p, v) + v )
|
||||
#define atomic_sub_return(p, v) ( atomic_xadd32(p, -v) - v )
|
||||
static inline int32_t atomic_add_return(int32_t *p, int32_t v)
|
||||
{
|
||||
return (atomic_xadd32(p, v) + v);
|
||||
}
|
||||
|
||||
#define atomic_inc_return(v) atomic_add_return((v), 1)
|
||||
#define atomic_dec_return(v) atomic_sub_return((v), 1)
|
||||
static inline int32_t atomic_sub_return(int32_t *p, int32_t v)
|
||||
{
|
||||
return (atomic_xadd32(p, -v) - v);
|
||||
}
|
||||
|
||||
#define atomic_add64_return(p, v) ( atomic_xadd64(p, v) + v )
|
||||
#define atomic_sub64_return(p, v) ( atomic_xadd64(p, -v) - v )
|
||||
static inline int32_t atomic_inc_return(int32_t *v)
|
||||
{
|
||||
return atomic_add_return(v, 1);
|
||||
}
|
||||
|
||||
#define atomic_inc64_return(v) atomic_add64_return((v), 1)
|
||||
#define atomic_dec64_return(v) atomic_sub64_return((v), 1)
|
||||
static inline int32_t atomic_dec_return(int32_t *v)
|
||||
{
|
||||
return atomic_sub_return(v, 1);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_add64_return(int64_t *p, int64_t v)
|
||||
{
|
||||
return (atomic_xadd64(p, v) + v);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_sub64_return(int64_t *p, int64_t v)
|
||||
{
|
||||
return (atomic_xadd64(p, -v) - v);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_inc64_return(int64_t *v)
|
||||
{
|
||||
return atomic_add64_return(v, 1);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_dec64_return(int64_t *v)
|
||||
{
|
||||
return atomic_sub64_return(v, 1);
|
||||
}
|
||||
|
||||
#endif /* ATOMIC_H*/
|
||||
|
||||
@@ -14,13 +14,9 @@
|
||||
* Returns TRUE if aligned; FALSE if not aligned
|
||||
* NOTE: The required alignment must be a power of 2 (2, 4, 8, 16, 32, etc)
|
||||
*/
|
||||
#define MEM_ALIGNED_CHECK(value, req_align) \
|
||||
(((uint64_t)(value) & ((uint64_t)(req_align) - 1UL)) == 0UL)
|
||||
|
||||
#if !defined(ASSEMBLER) && !defined(LINKER_SCRIPT)
|
||||
|
||||
#define ARRAY_LENGTH(x) (sizeof(x)/sizeof((x)[0]))
|
||||
|
||||
#endif
|
||||
static inline bool mem_aligned_check(uint64_t value, uint64_t req_align)
|
||||
{
|
||||
return ((value & (req_align - 1UL)) == 0UL);
|
||||
}
|
||||
|
||||
#endif /* INCLUDE_MACROS_H defined */
|
||||
|
||||
@@ -7,14 +7,6 @@
|
||||
#ifndef UTIL_H
|
||||
#define UTIL_H
|
||||
|
||||
/** Add an offset (in bytes) to an (base)address.
|
||||
*
|
||||
* @param addr Baseaddress
|
||||
* @param off Offset
|
||||
* @return Returns baseaddress + offset in bytes.
|
||||
*/
|
||||
#define ADD_OFFSET(addr, off) (void *)(((uint8_t *)(addr))+(off))
|
||||
|
||||
#define offsetof(st, m) __builtin_offsetof(st, m)
|
||||
|
||||
/** Roundup (x/y) to ( x/y + (x%y) ? 1 : 0) **/
|
||||
|
||||
Reference in New Issue
Block a user