mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-08 03:35:14 +00:00
hv: treewide: fix 'Use of function like macro'
- convert function like macros to inline functions based on MISRA-C requirement - remove some unused and duplicated macros Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
parent
d72e65c91a
commit
bcaede0c0e
@ -709,7 +709,7 @@ void cpu_dead(uint16_t pcpu_id)
|
||||
/* clean up native stuff */
|
||||
timer_cleanup();
|
||||
vmx_off(pcpu_id);
|
||||
CACHE_FLUSH_INVALIDATE_ALL();
|
||||
cache_flush_invalidate_all();
|
||||
|
||||
/* Set state to show CPU is dead */
|
||||
cpu_set_current_state(pcpu_id, CPU_STATE_DEAD);
|
||||
|
@ -605,8 +605,8 @@ uint64_t e820_alloc_low_memory(uint32_t size_arg)
|
||||
entry = &e820[i];
|
||||
uint64_t start, end, length;
|
||||
|
||||
start = ROUND_PAGE_UP(entry->baseaddr);
|
||||
end = ROUND_PAGE_DOWN(entry->baseaddr + entry->length);
|
||||
start = round_page_up(entry->baseaddr);
|
||||
end = round_page_down(entry->baseaddr + entry->length);
|
||||
length = end - start;
|
||||
length = (end > start) ? (end - start) : 0;
|
||||
|
||||
|
@ -410,7 +410,7 @@ static bool is_desc_valid(struct seg_desc *desc, uint32_t prot)
|
||||
uint32_t type;
|
||||
|
||||
/* The descriptor type must indicate a code/data segment. */
|
||||
type = SEG_DESC_TYPE(desc->access);
|
||||
type = seg_desc_type(desc->access);
|
||||
if (type < 16U || type > 31U) {
|
||||
return false;
|
||||
}
|
||||
@ -2250,7 +2250,7 @@ int decode_instruction(struct vcpu *vcpu)
|
||||
get_guest_paging_info(vcpu, emul_ctxt, csar);
|
||||
cpu_mode = get_vcpu_mode(vcpu);
|
||||
|
||||
retval = local_decode_instruction(cpu_mode, SEG_DESC_DEF32(csar),
|
||||
retval = local_decode_instruction(cpu_mode, seg_desc_def32(csar),
|
||||
&emul_ctxt->vie);
|
||||
|
||||
if (retval != 0) {
|
||||
|
@ -159,10 +159,25 @@ struct seg_desc {
|
||||
#define PROT_READ 0x01U /* pages can be read */
|
||||
#define PROT_WRITE 0x02U /* pages can be written */
|
||||
|
||||
#define SEG_DESC_TYPE(access) ((access) & 0x001fU)
|
||||
#define SEG_DESC_PRESENT(access) (((access) & 0x0080U) != 0U)
|
||||
#define SEG_DESC_DEF32(access) (((access) & 0x4000U) != 0U)
|
||||
#define SEG_DESC_UNUSABLE(access) (((access) & 0x10000U) != 0U)
|
||||
static inline uint32_t seg_desc_type(uint32_t access)
|
||||
{
|
||||
return (access & 0x001fU);
|
||||
}
|
||||
|
||||
static inline bool seg_desc_present(uint32_t access)
|
||||
{
|
||||
return ((access & 0x0080U) != 0U);
|
||||
}
|
||||
|
||||
static inline bool seg_desc_def32(uint32_t access)
|
||||
{
|
||||
return ((access & 0x4000U) != 0U);
|
||||
}
|
||||
|
||||
static inline bool seg_desc_unusable(uint32_t access)
|
||||
{
|
||||
return ((access & 0x10000U) != 0U);
|
||||
}
|
||||
|
||||
struct vm_guest_paging {
|
||||
uint64_t cr3;
|
||||
|
@ -37,7 +37,11 @@
|
||||
#include "vlapic.h"
|
||||
|
||||
#define VLAPIC_VERBOS 0
|
||||
#define PRIO(x) ((x) >> 4)
|
||||
|
||||
static inline uint32_t prio(uint32_t x)
|
||||
{
|
||||
return (x >> 4U);
|
||||
}
|
||||
|
||||
#define VLAPIC_VERSION (16U)
|
||||
|
||||
@ -70,8 +74,11 @@ static inline void vlapic_dump_isr(struct acrn_vlapic *vlapic, char *msg)
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define vlapic_dump_irr(vlapic, msg)
|
||||
#define vlapic_dump_isr(vlapic, msg)
|
||||
static inline void
|
||||
vlapic_dump_irr(__unused struct acrn_vlapic *vlapic, __unused char *msg) {}
|
||||
|
||||
static inline void
|
||||
vlapic_dump_isr(__unused struct acrn_vlapic *vlapic, __unused char *msg) {}
|
||||
#endif
|
||||
|
||||
/*APIC-v APIC-access address */
|
||||
@ -770,7 +777,7 @@ vlapic_update_ppr(struct acrn_vlapic *vlapic)
|
||||
lastprio = -1;
|
||||
for (i = 1U; i <= vlapic->isrvec_stk_top; i++) {
|
||||
isrvec = (uint32_t)vlapic->isrvec_stk[i];
|
||||
curprio = (int32_t)PRIO(isrvec);
|
||||
curprio = (int32_t)prio(isrvec);
|
||||
if (curprio <= lastprio) {
|
||||
dump_isrvec_stk(vlapic);
|
||||
panic("isrvec_stk does not satisfy invariant");
|
||||
@ -800,7 +807,7 @@ vlapic_update_ppr(struct acrn_vlapic *vlapic)
|
||||
}
|
||||
}
|
||||
|
||||
if (PRIO(tpr) >= PRIO(top_isrvec)) {
|
||||
if (prio(tpr) >= prio(top_isrvec)) {
|
||||
ppr = tpr;
|
||||
} else {
|
||||
ppr = top_isrvec & 0xf0U;
|
||||
@ -1232,7 +1239,7 @@ vlapic_pending_intr(struct acrn_vlapic *vlapic, uint32_t *vecptr)
|
||||
bitpos = (uint32_t)fls32(val);
|
||||
if (bitpos != INVALID_BIT_INDEX) {
|
||||
vector = (i * 32U) + bitpos;
|
||||
if (PRIO(vector) > PRIO(lapic->ppr)) {
|
||||
if (prio(vector) > prio(lapic->ppr)) {
|
||||
if (vecptr != NULL) {
|
||||
*vecptr = vector;
|
||||
}
|
||||
@ -2079,7 +2086,7 @@ apicv_batch_set_tmr(struct acrn_vlapic *vlapic)
|
||||
val = ptr[(s/TMR_STEP_LEN) + 1].val;
|
||||
val <<= TMR_STEP_LEN;
|
||||
val |= ptr[s/TMR_STEP_LEN].val;
|
||||
exec_vmwrite64(VMX_EOI_EXIT(s), val);
|
||||
exec_vmwrite64(vmx_eoi_exit(s), val);
|
||||
|
||||
s += EOI_STEP_LEN;
|
||||
}
|
||||
@ -2189,11 +2196,11 @@ int apic_access_vmexit_handler(struct vcpu *vcpu)
|
||||
struct mmio_request *mmio = &vcpu->req.reqs.mmio;
|
||||
|
||||
qual = vcpu->arch_vcpu.exit_qualification;
|
||||
access_type = APIC_ACCESS_TYPE(qual);
|
||||
access_type = apic_access_type(qual);
|
||||
|
||||
/*parse offset if linear access*/
|
||||
if (access_type <= 3UL) {
|
||||
offset = (uint32_t)APIC_ACCESS_OFFSET(qual);
|
||||
offset = (uint32_t)apic_access_offset(qual);
|
||||
}
|
||||
|
||||
vlapic = vcpu->arch_vcpu.vlapic;
|
||||
|
@ -150,7 +150,7 @@ static int modify_or_del_pde(uint64_t *pdpte,
|
||||
}
|
||||
if (pde_large(*pde) != 0UL) {
|
||||
if (vaddr_next > vaddr_end ||
|
||||
!MEM_ALIGNED_CHECK(vaddr, PDE_SIZE)) {
|
||||
!mem_aligned_check(vaddr, PDE_SIZE)) {
|
||||
ret = split_large_page(pde, IA32E_PD, ptt);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
@ -205,7 +205,7 @@ static int modify_or_del_pdpte(uint64_t *pml4e,
|
||||
}
|
||||
if (pdpte_large(*pdpte) != 0UL) {
|
||||
if (vaddr_next > vaddr_end ||
|
||||
!MEM_ALIGNED_CHECK(vaddr, PDPTE_SIZE)) {
|
||||
!mem_aligned_check(vaddr, PDPTE_SIZE)) {
|
||||
ret = split_large_page(pdpte, IA32E_PDPT, ptt);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
@ -254,8 +254,8 @@ int mmu_modify_or_del(uint64_t *pml4_page,
|
||||
uint64_t *pml4e;
|
||||
int ret;
|
||||
|
||||
if (!MEM_ALIGNED_CHECK(vaddr, PAGE_SIZE_4K) ||
|
||||
!MEM_ALIGNED_CHECK(size, PAGE_SIZE_4K) ||
|
||||
if (!mem_aligned_check(vaddr, (uint64_t)PAGE_SIZE_4K) ||
|
||||
!mem_aligned_check(size, (uint64_t)PAGE_SIZE_4K) ||
|
||||
(type != MR_MODIFY && type != MR_DEL)) {
|
||||
pr_err("%s, invalid parameters!\n", __func__);
|
||||
return -EINVAL;
|
||||
@ -337,8 +337,8 @@ static int add_pde(uint64_t *pdpte, uint64_t paddr_start,
|
||||
uint64_t vaddr_next = (vaddr & PDE_MASK) + PDE_SIZE;
|
||||
|
||||
if (pgentry_present(ptt, *pde) == 0UL) {
|
||||
if (MEM_ALIGNED_CHECK(paddr, PDE_SIZE) &&
|
||||
MEM_ALIGNED_CHECK(vaddr, PDE_SIZE) &&
|
||||
if (mem_aligned_check(paddr, PDE_SIZE) &&
|
||||
mem_aligned_check(vaddr, PDE_SIZE) &&
|
||||
(vaddr_next <= vaddr_end)) {
|
||||
set_pgentry(pde, paddr | (prot | PAGE_PSE));
|
||||
if (vaddr_next < vaddr_end) {
|
||||
@ -386,8 +386,8 @@ static int add_pdpte(uint64_t *pml4e, uint64_t paddr_start,
|
||||
uint64_t vaddr_next = (vaddr & PDPTE_MASK) + PDPTE_SIZE;
|
||||
|
||||
if (pgentry_present(ptt, *pdpte) == 0UL) {
|
||||
if (MEM_ALIGNED_CHECK(paddr, PDPTE_SIZE) &&
|
||||
MEM_ALIGNED_CHECK(vaddr, PDPTE_SIZE) &&
|
||||
if (mem_aligned_check(paddr, PDPTE_SIZE) &&
|
||||
mem_aligned_check(vaddr, PDPTE_SIZE) &&
|
||||
(vaddr_next <= vaddr_end)) {
|
||||
set_pgentry(pdpte, paddr | (prot | PAGE_PSE));
|
||||
if (vaddr_next < vaddr_end) {
|
||||
@ -432,9 +432,9 @@ int mmu_add(uint64_t *pml4_page, uint64_t paddr_base,
|
||||
__func__, paddr_base, vaddr_base, size);
|
||||
|
||||
/* align address to page size*/
|
||||
vaddr = ROUND_PAGE_UP(vaddr_base);
|
||||
paddr = ROUND_PAGE_UP(paddr_base);
|
||||
vaddr_end = vaddr + ROUND_PAGE_DOWN(size);
|
||||
vaddr = round_page_up(vaddr_base);
|
||||
paddr = round_page_up(paddr_base);
|
||||
vaddr_end = vaddr + round_page_down(size);
|
||||
|
||||
while (vaddr < vaddr_end) {
|
||||
vaddr_next = (vaddr & PML4E_MASK) + PML4E_SIZE;
|
||||
|
@ -271,7 +271,7 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
|
||||
|
||||
for (i = 0U; i < 8U; i++) {
|
||||
field = (value >> (i * 8U)) & 0xffUL;
|
||||
if (PAT_MEM_TYPE_INVALID(field) ||
|
||||
if (pat_mem_type_invalid(field) ||
|
||||
((PAT_FIELD_RSV_BITS & field) != 0UL)) {
|
||||
pr_err("invalid guest IA32_PAT: 0x%016llx", value);
|
||||
vcpu_inject_gp(vcpu, 0U);
|
||||
@ -399,7 +399,7 @@ void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
|
||||
* disabled behavior
|
||||
*/
|
||||
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, PAT_ALL_UC_VALUE);
|
||||
CACHE_FLUSH_INVALIDATE_ALL();
|
||||
cache_flush_invalidate_all();
|
||||
} else {
|
||||
/* Restore IA32_PAT to enable cache again */
|
||||
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL,
|
||||
|
@ -1319,6 +1319,6 @@ void init_iommu_vm0_domain(struct vm *vm0)
|
||||
(uint8_t)bus, (uint8_t)devfun);
|
||||
}
|
||||
}
|
||||
CACHE_FLUSH_INVALIDATE_ALL();
|
||||
cache_flush_invalidate_all();
|
||||
enable_iommu();
|
||||
}
|
||||
|
@ -24,8 +24,12 @@ struct Elf64_Rel {
|
||||
uint64_t reserved;
|
||||
};
|
||||
|
||||
#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
|
||||
#define R_X86_64_RELATIVE 8
|
||||
static inline uint64_t elf64_r_type(uint64_t i)
|
||||
{
|
||||
return (i & 0xffffffffUL);
|
||||
}
|
||||
|
||||
#define R_X86_64_RELATIVE 8UL
|
||||
|
||||
uint64_t trampoline_start16_paddr;
|
||||
|
||||
@ -109,7 +113,7 @@ void _relocate(void)
|
||||
primary_32_end = (uint64_t)(&cpu_primary_start_64) - delta;
|
||||
|
||||
while (start < end) {
|
||||
if ((ELF64_R_TYPE(start->r_info)) == R_X86_64_RELATIVE) {
|
||||
if ((elf64_r_type(start->r_info)) == R_X86_64_RELATIVE) {
|
||||
addr = (uint64_t *)(delta + start->r_offset);
|
||||
|
||||
/*
|
||||
|
@ -286,5 +286,5 @@ void dump_exception(struct intr_excp_ctx *ctx, uint16_t pcpu_id)
|
||||
|
||||
/* Save registers*/
|
||||
crash_ctx = ctx;
|
||||
CACHE_FLUSH_INVALIDATE_ALL();
|
||||
cache_flush_invalidate_all();
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
struct logmsg {
|
||||
uint32_t flags;
|
||||
int seq;
|
||||
int32_t seq;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
#include <hypervisor.h>
|
||||
|
||||
static int npk_log_enabled, npk_log_setup_ref;
|
||||
static int32_t npk_log_enabled, npk_log_setup_ref;
|
||||
static uint64_t base;
|
||||
|
||||
static inline int npk_write(const char *value, void *addr, size_t sz)
|
||||
@ -90,7 +90,7 @@ void npk_log_write(const char *buf, size_t buf_len)
|
||||
return;
|
||||
|
||||
/* calculate the channel offset based on cpu_id and npk_log_ref */
|
||||
ref = (atomic_inc_return((int *)&per_cpu(npk_log_ref, cpu_id)) - 1)
|
||||
ref = (atomic_inc_return((int32_t *)&per_cpu(npk_log_ref, cpu_id)) - 1)
|
||||
& HV_NPK_LOG_REF_MASK;
|
||||
channel += (cpu_id << HV_NPK_LOG_REF_SHIFT) + ref;
|
||||
len = min(buf_len, HV_NPK_LOG_MAX);
|
||||
|
@ -243,12 +243,6 @@ extern spinlock_t trampoline_spinlock;
|
||||
* to locate the per cpu data.
|
||||
*/
|
||||
|
||||
#define PER_CPU_DATA_OFFSET(sym_addr) \
|
||||
((uint64_t)(sym_addr) - (uint64_t)(_ld_cpu_data_start))
|
||||
|
||||
#define PER_CPU_DATA_SIZE \
|
||||
((uint64_t)_ld_cpu_data_end - (uint64_t)(_ld_cpu_data_start))
|
||||
|
||||
/* CPUID feature words */
|
||||
#define FEAT_1_ECX 0U /* CPUID[1].ECX */
|
||||
#define FEAT_1_EDX 1U /* CPUID[1].EDX */
|
||||
|
@ -254,7 +254,11 @@ struct vcpu_dump {
|
||||
int str_max;
|
||||
};
|
||||
|
||||
#define is_vcpu_bsp(vcpu) ((vcpu)->vcpu_id == BOOT_CPU_ID)
|
||||
static inline bool is_vcpu_bsp(struct vcpu *vcpu)
|
||||
{
|
||||
return (vcpu->vcpu_id == BOOT_CPU_ID);
|
||||
}
|
||||
|
||||
/* do not update Guest RIP for next VM Enter */
|
||||
static inline void vcpu_retain_rip(struct vcpu *vcpu)
|
||||
{
|
||||
|
@ -43,9 +43,15 @@
|
||||
#define IA32E_REF_MASK \
|
||||
(boot_cpu_data.physical_address_mask)
|
||||
|
||||
#define ROUND_PAGE_UP(addr) \
|
||||
((((addr) + (uint64_t)CPU_PAGE_SIZE) - 1UL) & CPU_PAGE_MASK)
|
||||
#define ROUND_PAGE_DOWN(addr) ((addr) & CPU_PAGE_MASK)
|
||||
static inline uint64_t round_page_up(uint64_t addr)
|
||||
{
|
||||
return (((addr + (uint64_t)CPU_PAGE_SIZE) - 1UL) & CPU_PAGE_MASK);
|
||||
}
|
||||
|
||||
static inline uint64_t round_page_down(uint64_t addr)
|
||||
{
|
||||
return (addr & CPU_PAGE_MASK);
|
||||
}
|
||||
|
||||
enum _page_table_type {
|
||||
PTT_PRIMARY = 0, /* Mapping for hypervisor */
|
||||
@ -102,12 +108,12 @@ struct e820_entry {
|
||||
#pragma pack()
|
||||
|
||||
/* E820 memory types */
|
||||
#define E820_TYPE_RAM 1U /* EFI 1, 2, 3, 4, 5, 6, 7 */
|
||||
#define E820_TYPE_RESERVED 2U
|
||||
#define E820_TYPE_RAM 1U /* EFI 1, 2, 3, 4, 5, 6, 7 */
|
||||
#define E820_TYPE_RESERVED 2U
|
||||
/* EFI 0, 11, 12, 13 (everything not used elsewhere) */
|
||||
#define E820_TYPE_ACPI_RECLAIM 3U /* EFI 9 */
|
||||
#define E820_TYPE_ACPI_NVS 4U /* EFI 10 */
|
||||
#define E820_TYPE_UNUSABLE 5U /* EFI 8 */
|
||||
#define E820_TYPE_ACPI_RECLAIM 3U /* EFI 9 */
|
||||
#define E820_TYPE_ACPI_NVS 4U /* EFI 10 */
|
||||
#define E820_TYPE_UNUSABLE 5U /* EFI 8 */
|
||||
|
||||
/** Calculates the page table address for a given address.
|
||||
*
|
||||
@ -122,9 +128,9 @@ static inline void *mmu_pt_for_pde(uint32_t *pd, uint32_t vaddr)
|
||||
return pd + (((vaddr >> 22U) + 1U) * 1024U);
|
||||
}
|
||||
|
||||
#define CACHE_FLUSH_INVALIDATE_ALL() \
|
||||
{ \
|
||||
asm volatile (" wbinvd\n" : : : "memory"); \
|
||||
static inline void cache_flush_invalidate_all(void)
|
||||
{
|
||||
asm volatile (" wbinvd\n" : : : "memory");
|
||||
}
|
||||
|
||||
static inline void clflush(volatile void *p)
|
||||
@ -133,20 +139,19 @@ static inline void clflush(volatile void *p)
|
||||
}
|
||||
|
||||
/* External Interfaces */
|
||||
void destroy_ept(struct vm *vm);
|
||||
uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa);
|
||||
uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size);
|
||||
uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa);
|
||||
void destroy_ept(struct vm *vm);
|
||||
uint64_t gpa2hpa(const struct vm *vm, uint64_t gpa);
|
||||
uint64_t local_gpa2hpa(const struct vm *vm, uint64_t gpa, uint32_t *size);
|
||||
uint64_t hpa2gpa(const struct vm *vm, uint64_t hpa);
|
||||
int ept_mr_add(const struct vm *vm, uint64_t *pml4_page, uint64_t hpa,
|
||||
uint64_t gpa, uint64_t size, uint64_t prot_orig);
|
||||
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size,
|
||||
uint64_t prot_set, uint64_t prot_clr);
|
||||
int ept_mr_del(const struct vm *vm, uint64_t *pml4_page,
|
||||
uint64_t gpa, uint64_t size);
|
||||
uint64_t gpa, uint64_t size, uint64_t prot_orig);
|
||||
int ept_mr_modify(const struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
uint64_t size, uint64_t prot_set, uint64_t prot_clr);
|
||||
int ept_mr_del(const struct vm *vm, uint64_t *pml4_page, uint64_t gpa,
|
||||
uint64_t size);
|
||||
void free_ept_mem(uint64_t *pml4_page);
|
||||
int ept_violation_vmexit_handler(struct vcpu *vcpu);
|
||||
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu);
|
||||
int ept_violation_vmexit_handler(struct vcpu *vcpu);
|
||||
int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu);
|
||||
|
||||
#endif /* ASSEMBLER not defined */
|
||||
|
||||
|
@ -509,12 +509,15 @@
|
||||
#define PAT_MEM_TYPE_WP 0x05UL /* write protected */
|
||||
#define PAT_MEM_TYPE_WB 0x06UL /* writeback */
|
||||
#define PAT_MEM_TYPE_UCM 0x07UL /* uncached minus */
|
||||
#define PAT_MEM_TYPE_INVALID(x) (((x) != PAT_MEM_TYPE_UC) && \
|
||||
((x) != PAT_MEM_TYPE_WC) && \
|
||||
((x) != PAT_MEM_TYPE_WT) && \
|
||||
((x) != PAT_MEM_TYPE_WP) && \
|
||||
((x) != PAT_MEM_TYPE_WB) && \
|
||||
((x) != PAT_MEM_TYPE_UCM))
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
static inline bool pat_mem_type_invalid(uint64_t x)
|
||||
{
|
||||
return ((x != PAT_MEM_TYPE_UC) && (x != PAT_MEM_TYPE_WC) &&
|
||||
(x != PAT_MEM_TYPE_WT) && (x != PAT_MEM_TYPE_WP) &&
|
||||
(x != PAT_MEM_TYPE_WB) && (x != PAT_MEM_TYPE_UCM));
|
||||
}
|
||||
#endif /* ASSEMBLER */
|
||||
|
||||
/* 5 high-order bits in every field are reserved */
|
||||
#define PAT_FIELD_RSV_BITS (0xF8U)
|
||||
|
@ -58,7 +58,7 @@
|
||||
#define VMX_EOI_EXIT2_HIGH 0x00002021U
|
||||
#define VMX_EOI_EXIT3_FULL 0x00002022U
|
||||
#define VMX_EOI_EXIT3_HIGH 0x00002023U
|
||||
#define VMX_EOI_EXIT(vector) (VMX_EOI_EXIT0_FULL + (((vector) >> 6U) * 2U))
|
||||
|
||||
#define VMX_XSS_EXITING_BITMAP_FULL 0x0000202CU
|
||||
#define VMX_XSS_EXITING_BITMAP_HIGH 0x0000202DU
|
||||
/* 64-bit read-only data fields */
|
||||
@ -374,7 +374,19 @@
|
||||
#define VMX_INT_TYPE_HW_EXP 3U
|
||||
#define VMX_INT_TYPE_SW_EXP 6U
|
||||
|
||||
/*VM exit qulifications for APIC-access
|
||||
#define VM_SUCCESS 0
|
||||
#define VM_FAIL -1
|
||||
|
||||
#define VMX_VMENTRY_FAIL 0x80000000U
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
static inline uint32_t vmx_eoi_exit(uint32_t vector)
|
||||
{
|
||||
return (VMX_EOI_EXIT0_FULL + ((vector >> 6U) * 2U));
|
||||
}
|
||||
|
||||
/* VM exit qulifications for APIC-access
|
||||
* Access type:
|
||||
* 0 = linear access for a data read during instruction execution
|
||||
* 1 = linear access for a data write during instruction execution
|
||||
@ -384,16 +396,15 @@
|
||||
* 15 = guest-physical access for an instructon fetch or during
|
||||
* instruction execution
|
||||
*/
|
||||
#define APIC_ACCESS_TYPE(qual) (((qual) >> 12U) & 0xFUL)
|
||||
#define APIC_ACCESS_OFFSET(qual) ((qual) & 0xFFFU)
|
||||
static inline uint64_t apic_access_type(uint64_t qual)
|
||||
{
|
||||
return ((qual >> 12U) & 0xFUL);
|
||||
}
|
||||
|
||||
|
||||
#define VM_SUCCESS 0
|
||||
#define VM_FAIL -1
|
||||
|
||||
#define VMX_VMENTRY_FAIL 0x80000000U
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
static inline uint64_t apic_access_offset(uint64_t qual)
|
||||
{
|
||||
return (qual & 0xFFFUL);
|
||||
}
|
||||
|
||||
#define RFLAGS_C (1U<<0)
|
||||
#define RFLAGS_Z (1U<<6)
|
||||
|
@ -156,13 +156,19 @@ build_atomic_swap(atomic_swap64, "q", uint64_t, p, v)
|
||||
* #define atomic_readandclear32(P) \
|
||||
* (return (*(uint32_t *)(P)); *(uint32_t *)(P) = 0U;)
|
||||
*/
|
||||
#define atomic_readandclear32(p) atomic_swap32(p, 0U)
|
||||
static inline uint32_t atomic_readandclear32(uint32_t *p)
|
||||
{
|
||||
return atomic_swap32(p, 0U);
|
||||
}
|
||||
|
||||
/*
|
||||
* #define atomic_readandclear64(P) \
|
||||
* (return (*(uint64_t *)(P)); *(uint64_t *)(P) = 0UL;)
|
||||
*/
|
||||
#define atomic_readandclear64(p) atomic_swap64(p, 0UL)
|
||||
static inline uint64_t atomic_readandclear64(uint64_t *p)
|
||||
{
|
||||
return atomic_swap64(p, 0UL);
|
||||
}
|
||||
|
||||
#define build_atomic_cmpxchg(name, size, type, ptr, old, new) \
|
||||
static inline type name(volatile type *ptr, \
|
||||
@ -188,19 +194,47 @@ static inline type name(type *ptr, type v) \
|
||||
return v; \
|
||||
}
|
||||
build_atomic_xadd(atomic_xadd16, "w", uint16_t, p, v)
|
||||
build_atomic_xadd(atomic_xadd32, "l", int, p, v)
|
||||
build_atomic_xadd(atomic_xadd64, "q", long, p, v)
|
||||
build_atomic_xadd(atomic_xadd32, "l", int32_t, p, v)
|
||||
build_atomic_xadd(atomic_xadd64, "q", int64_t, p, v)
|
||||
|
||||
#define atomic_add_return(p, v) ( atomic_xadd32(p, v) + v )
|
||||
#define atomic_sub_return(p, v) ( atomic_xadd32(p, -v) - v )
|
||||
static inline int32_t atomic_add_return(int32_t *p, int32_t v)
|
||||
{
|
||||
return (atomic_xadd32(p, v) + v);
|
||||
}
|
||||
|
||||
#define atomic_inc_return(v) atomic_add_return((v), 1)
|
||||
#define atomic_dec_return(v) atomic_sub_return((v), 1)
|
||||
static inline int32_t atomic_sub_return(int32_t *p, int32_t v)
|
||||
{
|
||||
return (atomic_xadd32(p, -v) - v);
|
||||
}
|
||||
|
||||
#define atomic_add64_return(p, v) ( atomic_xadd64(p, v) + v )
|
||||
#define atomic_sub64_return(p, v) ( atomic_xadd64(p, -v) - v )
|
||||
static inline int32_t atomic_inc_return(int32_t *v)
|
||||
{
|
||||
return atomic_add_return(v, 1);
|
||||
}
|
||||
|
||||
#define atomic_inc64_return(v) atomic_add64_return((v), 1)
|
||||
#define atomic_dec64_return(v) atomic_sub64_return((v), 1)
|
||||
static inline int32_t atomic_dec_return(int32_t *v)
|
||||
{
|
||||
return atomic_sub_return(v, 1);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_add64_return(int64_t *p, int64_t v)
|
||||
{
|
||||
return (atomic_xadd64(p, v) + v);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_sub64_return(int64_t *p, int64_t v)
|
||||
{
|
||||
return (atomic_xadd64(p, -v) - v);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_inc64_return(int64_t *v)
|
||||
{
|
||||
return atomic_add64_return(v, 1);
|
||||
}
|
||||
|
||||
static inline int64_t atomic_dec64_return(int64_t *v)
|
||||
{
|
||||
return atomic_sub64_return(v, 1);
|
||||
}
|
||||
|
||||
#endif /* ATOMIC_H*/
|
||||
|
@ -14,13 +14,9 @@
|
||||
* Returns TRUE if aligned; FALSE if not aligned
|
||||
* NOTE: The required alignment must be a power of 2 (2, 4, 8, 16, 32, etc)
|
||||
*/
|
||||
#define MEM_ALIGNED_CHECK(value, req_align) \
|
||||
(((uint64_t)(value) & ((uint64_t)(req_align) - 1UL)) == 0UL)
|
||||
|
||||
#if !defined(ASSEMBLER) && !defined(LINKER_SCRIPT)
|
||||
|
||||
#define ARRAY_LENGTH(x) (sizeof(x)/sizeof((x)[0]))
|
||||
|
||||
#endif
|
||||
static inline bool mem_aligned_check(uint64_t value, uint64_t req_align)
|
||||
{
|
||||
return ((value & (req_align - 1UL)) == 0UL);
|
||||
}
|
||||
|
||||
#endif /* INCLUDE_MACROS_H defined */
|
||||
|
@ -7,14 +7,6 @@
|
||||
#ifndef UTIL_H
|
||||
#define UTIL_H
|
||||
|
||||
/** Add an offset (in bytes) to an (base)address.
|
||||
*
|
||||
* @param addr Baseaddress
|
||||
* @param off Offset
|
||||
* @return Returns baseaddress + offset in bytes.
|
||||
*/
|
||||
#define ADD_OFFSET(addr, off) (void *)(((uint8_t *)(addr))+(off))
|
||||
|
||||
#define offsetof(st, m) __builtin_offsetof(st, m)
|
||||
|
||||
/** Roundup (x/y) to ( x/y + (x%y) ? 1 : 0) **/
|
||||
|
@ -391,8 +391,8 @@ void *memcpy_s(void *d, size_t dmax, const void *s, size_t slen_arg)
|
||||
}
|
||||
|
||||
/* make sure 8bytes-aligned for at least one addr. */
|
||||
if ((!MEM_ALIGNED_CHECK(src8, 8UL)) &&
|
||||
(!MEM_ALIGNED_CHECK(dest8, 8UL))) {
|
||||
if ((!mem_aligned_check((uint64_t)src8, 8UL)) &&
|
||||
(!mem_aligned_check((uint64_t)dest8, 8UL))) {
|
||||
for (; (slen != 0U) && ((((uint64_t)src8) & 7UL) != 0UL);
|
||||
slen--) {
|
||||
*dest8 = *src8;
|
||||
|
@ -9,7 +9,10 @@
|
||||
#define LONG_MAX (ULONG_MAX >> 1U) /* 0x7FFFFFFF */
|
||||
#define LONG_MIN (~LONG_MAX) /* 0x80000000 */
|
||||
|
||||
#define ISSPACE(c) ((c == ' ') || (c == '\t'))
|
||||
static inline bool is_space(char c)
|
||||
{
|
||||
return ((c == ' ') || (c == '\t'));
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a string to a long integer - decimal support only.
|
||||
@ -28,7 +31,7 @@ long strtol_deci(const char *nptr)
|
||||
do {
|
||||
c = *s;
|
||||
s++;
|
||||
} while (ISSPACE(c));
|
||||
} while (is_space(c));
|
||||
|
||||
if (c == '-') {
|
||||
neg = 1;
|
||||
@ -107,7 +110,7 @@ uint64_t strtoul_hex(const char *nptr)
|
||||
do {
|
||||
c = *s;
|
||||
s++;
|
||||
} while (ISSPACE(c));
|
||||
} while (is_space(c));
|
||||
|
||||
if ((c == '0') && ((*s == 'x') || (*s == 'X'))) {
|
||||
c = s[1];
|
||||
|
Loading…
Reference in New Issue
Block a user