mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-22 09:17:58 +00:00
fix "Procedure is not pure assembler"
Misra C reqires assembly code should comply with the rules list below: The assembly code's functionality should match the function's name.If not,pls encapsulate the assembly code and give a suitable name for describing the functionality. V1->V2: 1.remove the dead code 2.update detail comment V2->V3: 1.replace the macro name with upper case. 2.remove the typedef and rename the struct name "_descriptor_table_" to "descriptor_table". Tracked-On: #861 Signed-off-by: Huihuang Shi <huihuang.shi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
@@ -724,7 +724,7 @@ void cpu_dead(uint16_t pcpu_id)
|
||||
|
||||
/* Halt the CPU */
|
||||
do {
|
||||
asm volatile ("hlt");
|
||||
hlt_cpu();
|
||||
} while (halt != 0);
|
||||
}
|
||||
|
||||
|
@@ -23,6 +23,11 @@ static void set_tss_desc(struct tss_64_descriptor *desc,
|
||||
desc->high32_value = u2 | (type << 8U) | 0x8000U | u3;
|
||||
}
|
||||
|
||||
static inline void load_gdt(struct host_gdt_descriptor *gdtr)
|
||||
{
|
||||
asm volatile ("lgdt %0" ::"m"(*gdtr));
|
||||
}
|
||||
|
||||
void load_gdtr_and_tr(void)
|
||||
{
|
||||
struct host_gdt *gdt = &get_cpu_var(gdt);
|
||||
@@ -48,7 +53,7 @@ void load_gdtr_and_tr(void)
|
||||
gdtr.len = sizeof(struct host_gdt) - 1U;
|
||||
gdtr.gdt = gdt;
|
||||
|
||||
asm volatile ("lgdt %0" ::"m"(gdtr));
|
||||
load_gdt(&gdtr);
|
||||
|
||||
CPU_LTR_EXECUTE(HOST_GDT_RING0_CPU_TSS_SEL);
|
||||
}
|
||||
|
@@ -123,7 +123,7 @@ static void init_tsc_deadline_timer(void)
|
||||
val = VECTOR_TIMER;
|
||||
val |= APIC_LVTT_TM_TSCDLT; /* TSC deadline and unmask */
|
||||
msr_write(MSR_IA32_EXT_APIC_LVT_TIMER, val);
|
||||
asm volatile("mfence" : : : "memory");
|
||||
cpu_memory_barrier();
|
||||
|
||||
/* disarm timer */
|
||||
msr_write(MSR_IA32_TSC_DEADLINE, 0UL);
|
||||
|
@@ -181,6 +181,17 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
|
||||
|
||||
}
|
||||
|
||||
static inline void save_fxstore_guest_area(struct ext_context *ext_ctx)
|
||||
{
|
||||
asm volatile("fxsave (%0)"
|
||||
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
|
||||
}
|
||||
|
||||
static inline void rstor_fxstore_guest_area(const struct ext_context *ext_ctx)
|
||||
{
|
||||
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
|
||||
}
|
||||
|
||||
static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
|
||||
{
|
||||
/* cache on-demand run_context for efer/rflags/rsp/rip */
|
||||
@@ -231,8 +242,7 @@ static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
|
||||
ext_ctx->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE);
|
||||
|
||||
/* FX area */
|
||||
asm volatile("fxsave (%0)"
|
||||
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
|
||||
save_fxstore_guest_area(ext_ctx);
|
||||
}
|
||||
|
||||
static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext_ctx)
|
||||
@@ -279,7 +289,7 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext
|
||||
msr_write(MSR_IA32_KERNEL_GS_BASE, ext_ctx->ia32_kernel_gs_base);
|
||||
|
||||
/* FX area */
|
||||
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
|
||||
rstor_fxstore_guest_area(ext_ctx);
|
||||
}
|
||||
|
||||
static void copy_smc_param(const struct run_context *prev_ctx,
|
||||
|
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <hypervisor.h>
|
||||
#include <vm0_boot.h>
|
||||
#include <cpu.h>
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
extern struct efi_context* efi_ctx;
|
||||
#endif
|
||||
@@ -51,19 +52,6 @@ bool is_vmx_disabled(void)
|
||||
*/
|
||||
static inline void exec_vmxon(void *addr)
|
||||
{
|
||||
uint64_t tmp64;
|
||||
|
||||
/* Read Feature ControL MSR */
|
||||
tmp64 = msr_read(MSR_IA32_FEATURE_CONTROL);
|
||||
|
||||
/* Check if feature control is locked */
|
||||
if ((tmp64 & MSR_IA32_FEATURE_CONTROL_LOCK) == 0U) {
|
||||
/* Lock and enable VMX support */
|
||||
tmp64 |= (MSR_IA32_FEATURE_CONTROL_LOCK |
|
||||
MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX);
|
||||
msr_write(MSR_IA32_FEATURE_CONTROL, tmp64);
|
||||
}
|
||||
|
||||
/* Turn VMX on, pre-conditions can avoid VMfailInvalid
|
||||
* here no need check RFLAGS since it will generate #GP or #UD
|
||||
* except VMsuccess. SDM 30.3
|
||||
@@ -98,6 +86,17 @@ void exec_vmxon_instr(uint16_t pcpu_id)
|
||||
CPU_CR_READ(cr4, &tmp64);
|
||||
CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE);
|
||||
|
||||
/* Read Feature ControL MSR */
|
||||
tmp64 = msr_read(MSR_IA32_FEATURE_CONTROL);
|
||||
|
||||
/* Check if feature control is locked */
|
||||
if ((tmp64 & MSR_IA32_FEATURE_CONTROL_LOCK) == 0U) {
|
||||
/* Lock and enable VMX support */
|
||||
tmp64 |= (MSR_IA32_FEATURE_CONTROL_LOCK |
|
||||
MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX);
|
||||
msr_write(MSR_IA32_FEATURE_CONTROL, tmp64);
|
||||
}
|
||||
|
||||
/* Turn ON VMX */
|
||||
vmxon_region_pa = hva2hpa(vmxon_region_va);
|
||||
exec_vmxon(&vmxon_region_pa);
|
||||
@@ -106,6 +105,11 @@ void exec_vmxon_instr(uint16_t pcpu_id)
|
||||
exec_vmptrld(&vmcs_pa);
|
||||
}
|
||||
|
||||
static inline void exec_vmxoff(void)
|
||||
{
|
||||
asm volatile ("vmxoff" : : : "memory");
|
||||
}
|
||||
|
||||
void vmx_off(uint16_t pcpu_id)
|
||||
{
|
||||
|
||||
@@ -115,7 +119,7 @@ void vmx_off(uint16_t pcpu_id)
|
||||
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
|
||||
exec_vmclear((void *)&vmcs_pa);
|
||||
|
||||
asm volatile ("vmxoff" : : : "memory");
|
||||
exec_vmxoff();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -605,8 +609,8 @@ static void init_host_state(void)
|
||||
uint64_t value64;
|
||||
uint64_t value;
|
||||
uint64_t tss_addr;
|
||||
descriptor_table gdtb = {0U, 0UL};
|
||||
descriptor_table idtb = {0U, 0UL};
|
||||
uint64_t gdt_base;
|
||||
uint64_t idt_base;
|
||||
|
||||
pr_dbg("*********************");
|
||||
pr_dbg("Initialize host state");
|
||||
@@ -619,27 +623,27 @@ static void init_host_state(void)
|
||||
* GS), * Task Register (TR), * Local Descriptor Table Register (LDTR)
|
||||
*
|
||||
***************************************************/
|
||||
asm volatile ("movw %%es, %%ax":"=a" (value16));
|
||||
CPU_SEG_WRITE(es, value16);
|
||||
exec_vmwrite16(VMX_HOST_ES_SEL, value16);
|
||||
pr_dbg("VMX_HOST_ES_SEL: 0x%hu ", value16);
|
||||
|
||||
asm volatile ("movw %%cs, %%ax":"=a" (value16));
|
||||
CPU_SEG_WRITE(cs, value16);
|
||||
exec_vmwrite16(VMX_HOST_CS_SEL, value16);
|
||||
pr_dbg("VMX_HOST_CS_SEL: 0x%hu ", value16);
|
||||
|
||||
asm volatile ("movw %%ss, %%ax":"=a" (value16));
|
||||
CPU_SEG_WRITE(ss, value16);
|
||||
exec_vmwrite16(VMX_HOST_SS_SEL, value16);
|
||||
pr_dbg("VMX_HOST_SS_SEL: 0x%hu ", value16);
|
||||
|
||||
asm volatile ("movw %%ds, %%ax":"=a" (value16));
|
||||
CPU_SEG_WRITE(ds, value16);
|
||||
exec_vmwrite16(VMX_HOST_DS_SEL, value16);
|
||||
pr_dbg("VMX_HOST_DS_SEL: 0x%hu ", value16);
|
||||
|
||||
asm volatile ("movw %%fs, %%ax":"=a" (value16));
|
||||
CPU_SEG_WRITE(fs, value16);
|
||||
exec_vmwrite16(VMX_HOST_FS_SEL, value16);
|
||||
pr_dbg("VMX_HOST_FS_SEL: 0x%hu ", value16);
|
||||
|
||||
asm volatile ("movw %%gs, %%ax":"=a" (value16));
|
||||
CPU_SEG_WRITE(gs, value16);
|
||||
exec_vmwrite16(VMX_HOST_GS_SEL, value16);
|
||||
pr_dbg("VMX_HOST_GS_SEL: 0x%hu ", value16);
|
||||
|
||||
@@ -654,15 +658,15 @@ static void init_host_state(void)
|
||||
|
||||
/* TODO: Should guest GDTB point to host GDTB ? */
|
||||
/* Obtain the current global descriptor table base */
|
||||
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
|
||||
gdt_base = sgdt();
|
||||
|
||||
if (((gdtb.base >> 47U) & 0x1UL) != 0UL) {
|
||||
gdtb.base |= 0xffff000000000000UL;
|
||||
if (((gdt_base >> 47U) & 0x1UL) != 0UL) {
|
||||
gdt_base |= 0xffff000000000000UL;
|
||||
}
|
||||
|
||||
/* Set up the guest and host GDTB base fields with current GDTB base */
|
||||
exec_vmwrite(VMX_HOST_GDTR_BASE, gdtb.base);
|
||||
pr_dbg("VMX_HOST_GDTR_BASE: 0x%x ", gdtb.base);
|
||||
exec_vmwrite(VMX_HOST_GDTR_BASE, gdt_base);
|
||||
pr_dbg("VMX_HOST_GDTR_BASE: 0x%x ", gdt_base);
|
||||
|
||||
tss_addr = hva2hpa((void *)&get_cpu_var(tss));
|
||||
/* Set up host TR base fields */
|
||||
@@ -670,14 +674,14 @@ static void init_host_state(void)
|
||||
pr_dbg("VMX_HOST_TR_BASE: 0x%016llx ", tss_addr);
|
||||
|
||||
/* Obtain the current interrupt descriptor table base */
|
||||
asm volatile ("sidt %0":"=m"(idtb)::"memory");
|
||||
idt_base = sidt();
|
||||
/* base */
|
||||
if (((idtb.base >> 47U) & 0x1UL) != 0UL) {
|
||||
idtb.base |= 0xffff000000000000UL;
|
||||
if (((idt_base >> 47U) & 0x1UL) != 0UL) {
|
||||
idt_base |= 0xffff000000000000UL;
|
||||
}
|
||||
|
||||
exec_vmwrite(VMX_HOST_IDTR_BASE, idtb.base);
|
||||
pr_dbg("VMX_HOST_IDTR_BASE: 0x%x ", idtb.base);
|
||||
exec_vmwrite(VMX_HOST_IDTR_BASE, idt_base);
|
||||
pr_dbg("VMX_HOST_IDTR_BASE: 0x%x ", idt_base);
|
||||
|
||||
/**************************************************/
|
||||
/* 64-bit fields */
|
||||
|
@@ -264,7 +264,7 @@ dmar_wait_completion(const struct dmar_drhd_rt *dmar_uint, uint32_t offset,
|
||||
}
|
||||
ASSERT(((rdtsc() - start) < CYCLES_PER_MS),
|
||||
"DMAR OP Timeout!");
|
||||
asm volatile ("pause" ::: "memory");
|
||||
pause_cpu();
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user