fix "Procedure is not pure assembler"

Misra C reqires assembly code should comply with
the rules list below:
  The assembly code's functionality should match the function's
name.If not,pls encapsulate the assembly code and give a suitable
name for describing the functionality.
V1->V2:
    1.remove the dead code
    2.update detail comment

V2->V3:
    1.replace the macro name with upper case.
    2.remove the typedef and rename the struct name
"_descriptor_table_" to "descriptor_table".

Tracked-On: #861
Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Huihuang Shi 2018-11-06 10:05:19 +08:00 committed by lijinxia
parent 91fb441d7a
commit 7bb09f75da
9 changed files with 96 additions and 59 deletions

View File

@ -724,7 +724,7 @@ void cpu_dead(uint16_t pcpu_id)
/* Halt the CPU */
do {
asm volatile ("hlt");
hlt_cpu();
} while (halt != 0);
}

View File

@ -23,6 +23,11 @@ static void set_tss_desc(struct tss_64_descriptor *desc,
desc->high32_value = u2 | (type << 8U) | 0x8000U | u3;
}
static inline void load_gdt(struct host_gdt_descriptor *gdtr)
{
asm volatile ("lgdt %0" ::"m"(*gdtr));
}
void load_gdtr_and_tr(void)
{
struct host_gdt *gdt = &get_cpu_var(gdt);
@ -48,7 +53,7 @@ void load_gdtr_and_tr(void)
gdtr.len = sizeof(struct host_gdt) - 1U;
gdtr.gdt = gdt;
asm volatile ("lgdt %0" ::"m"(gdtr));
load_gdt(&gdtr);
CPU_LTR_EXECUTE(HOST_GDT_RING0_CPU_TSS_SEL);
}

View File

@ -123,7 +123,7 @@ static void init_tsc_deadline_timer(void)
val = VECTOR_TIMER;
val |= APIC_LVTT_TM_TSCDLT; /* TSC deadline and unmask */
msr_write(MSR_IA32_EXT_APIC_LVT_TIMER, val);
asm volatile("mfence" : : : "memory");
cpu_memory_barrier();
/* disarm timer */
msr_write(MSR_IA32_TSC_DEADLINE, 0UL);

View File

@ -181,6 +181,17 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
}
static inline void save_fxstore_guest_area(struct ext_context *ext_ctx)
{
asm volatile("fxsave (%0)"
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
}
static inline void rstor_fxstore_guest_area(const struct ext_context *ext_ctx)
{
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
}
static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
{
/* cache on-demand run_context for efer/rflags/rsp/rip */
@ -231,8 +242,7 @@ static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
ext_ctx->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE);
/* FX area */
asm volatile("fxsave (%0)"
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
save_fxstore_guest_area(ext_ctx);
}
static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext_ctx)
@ -279,7 +289,7 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext
msr_write(MSR_IA32_KERNEL_GS_BASE, ext_ctx->ia32_kernel_gs_base);
/* FX area */
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
rstor_fxstore_guest_area(ext_ctx);
}
static void copy_smc_param(const struct run_context *prev_ctx,

View File

@ -6,6 +6,7 @@
#include <hypervisor.h>
#include <vm0_boot.h>
#include <cpu.h>
#ifdef CONFIG_EFI_STUB
extern struct efi_context* efi_ctx;
#endif
@ -51,19 +52,6 @@ bool is_vmx_disabled(void)
*/
static inline void exec_vmxon(void *addr)
{
uint64_t tmp64;
/* Read Feature ControL MSR */
tmp64 = msr_read(MSR_IA32_FEATURE_CONTROL);
/* Check if feature control is locked */
if ((tmp64 & MSR_IA32_FEATURE_CONTROL_LOCK) == 0U) {
/* Lock and enable VMX support */
tmp64 |= (MSR_IA32_FEATURE_CONTROL_LOCK |
MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX);
msr_write(MSR_IA32_FEATURE_CONTROL, tmp64);
}
/* Turn VMX on, pre-conditions can avoid VMfailInvalid
* here no need check RFLAGS since it will generate #GP or #UD
* except VMsuccess. SDM 30.3
@ -98,6 +86,17 @@ void exec_vmxon_instr(uint16_t pcpu_id)
CPU_CR_READ(cr4, &tmp64);
CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE);
/* Read Feature ControL MSR */
tmp64 = msr_read(MSR_IA32_FEATURE_CONTROL);
/* Check if feature control is locked */
if ((tmp64 & MSR_IA32_FEATURE_CONTROL_LOCK) == 0U) {
/* Lock and enable VMX support */
tmp64 |= (MSR_IA32_FEATURE_CONTROL_LOCK |
MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX);
msr_write(MSR_IA32_FEATURE_CONTROL, tmp64);
}
/* Turn ON VMX */
vmxon_region_pa = hva2hpa(vmxon_region_va);
exec_vmxon(&vmxon_region_pa);
@ -106,6 +105,11 @@ void exec_vmxon_instr(uint16_t pcpu_id)
exec_vmptrld(&vmcs_pa);
}
static inline void exec_vmxoff(void)
{
asm volatile ("vmxoff" : : : "memory");
}
void vmx_off(uint16_t pcpu_id)
{
@ -115,7 +119,7 @@ void vmx_off(uint16_t pcpu_id)
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
exec_vmclear((void *)&vmcs_pa);
asm volatile ("vmxoff" : : : "memory");
exec_vmxoff();
}
/**
@ -605,8 +609,8 @@ static void init_host_state(void)
uint64_t value64;
uint64_t value;
uint64_t tss_addr;
descriptor_table gdtb = {0U, 0UL};
descriptor_table idtb = {0U, 0UL};
uint64_t gdt_base;
uint64_t idt_base;
pr_dbg("*********************");
pr_dbg("Initialize host state");
@ -619,27 +623,27 @@ static void init_host_state(void)
* GS), * Task Register (TR), * Local Descriptor Table Register (LDTR)
*
***************************************************/
asm volatile ("movw %%es, %%ax":"=a" (value16));
CPU_SEG_WRITE(es, value16);
exec_vmwrite16(VMX_HOST_ES_SEL, value16);
pr_dbg("VMX_HOST_ES_SEL: 0x%hu ", value16);
asm volatile ("movw %%cs, %%ax":"=a" (value16));
CPU_SEG_WRITE(cs, value16);
exec_vmwrite16(VMX_HOST_CS_SEL, value16);
pr_dbg("VMX_HOST_CS_SEL: 0x%hu ", value16);
asm volatile ("movw %%ss, %%ax":"=a" (value16));
CPU_SEG_WRITE(ss, value16);
exec_vmwrite16(VMX_HOST_SS_SEL, value16);
pr_dbg("VMX_HOST_SS_SEL: 0x%hu ", value16);
asm volatile ("movw %%ds, %%ax":"=a" (value16));
CPU_SEG_WRITE(ds, value16);
exec_vmwrite16(VMX_HOST_DS_SEL, value16);
pr_dbg("VMX_HOST_DS_SEL: 0x%hu ", value16);
asm volatile ("movw %%fs, %%ax":"=a" (value16));
CPU_SEG_WRITE(fs, value16);
exec_vmwrite16(VMX_HOST_FS_SEL, value16);
pr_dbg("VMX_HOST_FS_SEL: 0x%hu ", value16);
asm volatile ("movw %%gs, %%ax":"=a" (value16));
CPU_SEG_WRITE(gs, value16);
exec_vmwrite16(VMX_HOST_GS_SEL, value16);
pr_dbg("VMX_HOST_GS_SEL: 0x%hu ", value16);
@ -654,15 +658,15 @@ static void init_host_state(void)
/* TODO: Should guest GDTB point to host GDTB ? */
/* Obtain the current global descriptor table base */
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
gdt_base = sgdt();
if (((gdtb.base >> 47U) & 0x1UL) != 0UL) {
gdtb.base |= 0xffff000000000000UL;
if (((gdt_base >> 47U) & 0x1UL) != 0UL) {
gdt_base |= 0xffff000000000000UL;
}
/* Set up the guest and host GDTB base fields with current GDTB base */
exec_vmwrite(VMX_HOST_GDTR_BASE, gdtb.base);
pr_dbg("VMX_HOST_GDTR_BASE: 0x%x ", gdtb.base);
exec_vmwrite(VMX_HOST_GDTR_BASE, gdt_base);
pr_dbg("VMX_HOST_GDTR_BASE: 0x%x ", gdt_base);
tss_addr = hva2hpa((void *)&get_cpu_var(tss));
/* Set up host TR base fields */
@ -670,14 +674,14 @@ static void init_host_state(void)
pr_dbg("VMX_HOST_TR_BASE: 0x%016llx ", tss_addr);
/* Obtain the current interrupt descriptor table base */
asm volatile ("sidt %0":"=m"(idtb)::"memory");
idt_base = sidt();
/* base */
if (((idtb.base >> 47U) & 0x1UL) != 0UL) {
idtb.base |= 0xffff000000000000UL;
if (((idt_base >> 47U) & 0x1UL) != 0UL) {
idt_base |= 0xffff000000000000UL;
}
exec_vmwrite(VMX_HOST_IDTR_BASE, idtb.base);
pr_dbg("VMX_HOST_IDTR_BASE: 0x%x ", idtb.base);
exec_vmwrite(VMX_HOST_IDTR_BASE, idt_base);
pr_dbg("VMX_HOST_IDTR_BASE: 0x%x ", idt_base);
/**************************************************/
/* 64-bit fields */

View File

@ -264,7 +264,7 @@ dmar_wait_completion(const struct dmar_drhd_rt *dmar_uint, uint32_t offset,
}
ASSERT(((rdtsc() - start) < CYCLES_PER_MS),
"DMAR OP Timeout!");
asm volatile ("pause" ::: "memory");
pause_cpu();
}
}

View File

@ -243,7 +243,7 @@ void asm_assert(int32_t line, const char *file, const char *txt)
show_host_call_trace(rsp, rbp, pcpu_id);
dump_guest_context(pcpu_id);
do {
asm volatile ("pause" ::: "memory");
pause_cpu();
} while (1);
}

View File

@ -264,6 +264,11 @@ extern spinlock_t trampoline_spinlock;
*/
#define BROADCAST_CPU_ID 0xfffeU
struct descriptor_table {
uint16_t limit;
uint64_t base;
} __attribute__((packed));
/* CPU states defined */
enum pcpu_boot_state {
PCPU_STATE_RESET = 0U,
@ -326,6 +331,11 @@ void stop_cpus(void);
void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
void cpu_l1d_flush(void);
#define CPU_SEG_WRITE(seg, value16) \
{ \
asm volatile ("mov %%" STRINGIFY(seg) ", %%ax": "=a" (value16)); \
}
/* Read control register */
#define CPU_CR_READ(cr, result_ptr) \
{ \
@ -341,6 +351,20 @@ void cpu_l1d_flush(void);
: "r"(value)); \
}
static inline uint64_t sgdt(void)
{
struct descriptor_table gdtb = {0U, 0UL};
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
return gdtb.base;
}
static inline uint64_t sidt(void)
{
struct descriptor_table idtb = {0U, 0UL};
asm volatile ("sidt %0":"=m"(idtb)::"memory");
return idtb.base;
}
/* Read MSR */
static inline void cpu_msr_read(uint32_t reg, uint64_t *msr_val_ptr)
{
@ -360,6 +384,16 @@ static inline void cpu_msr_write(uint32_t reg, uint64_t msr_val)
asm volatile (" wrmsr " : : "c" (reg), "a" (msrl), "d" (msrh));
}
static inline void pause_cpu(void)
{
asm volatile ("pause" ::: "memory");
}
static inline void hlt_cpu(void)
{
asm volatile ("hlt");
}
#ifdef CONFIG_PARTITION_MODE
#define CPU_IRQ_DISABLE()
#else
@ -388,22 +422,10 @@ static inline void cpu_sp_write(uint64_t *stack_ptr)
asm volatile ("movq %0, %%rsp" : : "r"(rsp));
}
/* Synchronizes all read accesses from memory */
#define CPU_MEMORY_READ_BARRIER() \
{ \
asm volatile ("lfence\n" : : : "memory"); \
}
/* Synchronizes all write accesses to memory */
#define CPU_MEMORY_WRITE_BARRIER() \
{ \
asm volatile ("sfence\n" : : : "memory"); \
}
/* Synchronizes all read and write accesses to/from memory */
#define CPU_MEMORY_BARRIER() \
{ \
asm volatile ("mfence\n" : : : "memory"); \
static inline void cpu_memory_barrier(void)
{
asm volatile ("mfence\n" : : : "memory");
}
/* Write the task register */

View File

@ -477,10 +477,6 @@ static inline bool cpu_has_vmx_unrestricted_guest_cap(void)
!= 0UL);
}
typedef struct _descriptor_table_{
uint16_t limit;
uint64_t base;
}__attribute__((packed)) descriptor_table;
#endif /* ASSEMBLER */
#endif /* VMX_H_ */