HV:treewide:avoid using multiple # or ## in a macro

In the C99 standard, the order of evaluation associated with
multiple #, multiple ## or a mix of # and ## preprocessor
operator is unspecified. For this case, gcc 7.3.0 manual
does not specify related implementation. So it is unsafe
to use multiple # or ## in a macro.
BTW, there are some macros with one or more "##" which are
not used by hypervisor.

Update relate codes to avoid using multiple # or ## in a macro;
Remove unused macros with one or more "##";
Remove "struct __hack;" at the end of GETCC since it is useless.

Note:
     '##' operator usage constraints: A ## preprocessing token shall
     not occur at the beginning or at the end of a replacement list
     for either form of macro definition.
V1--V2:
	Update relate codes to avoid using multiple # or ## in a macro.
V2-->V3:
	Remove unused macros with one or more "##";
	Remove "struct __hack;" at the end of GETCC since it is useless.

Signed-off-by: Xiangyang Wu <xiangyang.wu@linux.intel.com>
Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
Xiangyang Wu 2018-08-01 13:27:35 +08:00 committed by lijinxia
parent 581a336bc8
commit 77c3917544
3 changed files with 36 additions and 60 deletions

View File

@ -366,21 +366,19 @@ static int vie_update_rflags(struct vcpu *vcpu, uint64_t rflags2, uint64_t psl)
/*
* Return the status flags that would result from doing (x - y).
*/
#define GETCC(sz) \
static uint64_t \
getcc##sz(uint##sz##_t x, uint##sz##_t y) \
#define build_getcc(name, type, x, y) \
static uint64_t name(type x, type y) \
{ \
uint64_t rflags; \
\
__asm __volatile("sub %2,%1; pushfq; popq %0" : \
"=r" (rflags), "+r" (x) : "m" (y)); \
return rflags; \
} struct __hack
GETCC(8);
GETCC(16);
GETCC(32);
GETCC(64);
}
build_getcc(getcc8, uint8_t, x, y)
build_getcc(getcc16, uint16_t, x, y)
build_getcc(getcc32, uint32_t, x, y)
build_getcc(getcc64, uint64_t, x, y)
static uint64_t
getcc(uint8_t opsize, uint64_t x, uint64_t y)

View File

@ -33,21 +33,20 @@ static struct trusty_key_info g_key_info = {
.platform = 3U,
.num_seeds = 1U
};
#define save_segment(seg, SEG_NAME) \
{ \
seg.selector = exec_vmread16(VMX_GUEST_##SEG_NAME##_SEL); \
seg.base = exec_vmread(VMX_GUEST_##SEG_NAME##_BASE); \
seg.limit = exec_vmread32(VMX_GUEST_##SEG_NAME##_LIMIT); \
seg.attr = exec_vmread32(VMX_GUEST_##SEG_NAME##_ATTR); \
seg.selector = exec_vmread16(SEG_NAME##_SEL); \
seg.base = exec_vmread(SEG_NAME##_BASE); \
seg.limit = exec_vmread32(SEG_NAME##_LIMIT); \
seg.attr = exec_vmread32(SEG_NAME##_ATTR); \
}
#define load_segment(seg, SEG_NAME) \
{ \
exec_vmwrite16(VMX_GUEST_##SEG_NAME##_SEL, seg.selector); \
exec_vmwrite(VMX_GUEST_##SEG_NAME##_BASE, seg.base); \
exec_vmwrite32(VMX_GUEST_##SEG_NAME##_LIMIT, seg.limit); \
exec_vmwrite32(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \
exec_vmwrite16(SEG_NAME##_SEL, seg.selector); \
exec_vmwrite(SEG_NAME##_BASE, seg.base); \
exec_vmwrite32(SEG_NAME##_LIMIT, seg.limit); \
exec_vmwrite32(SEG_NAME##_ATTR, seg.attr); \
}
#ifndef WORKAROUND_FOR_TRUSTY_4G_MEM
@ -231,14 +230,14 @@ static void save_world_ctx(struct run_context *context)
context->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
context->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
context->ia32_sysenter_cs = exec_vmread32(VMX_GUEST_IA32_SYSENTER_CS);
save_segment(context->cs, CS);
save_segment(context->ss, SS);
save_segment(context->ds, DS);
save_segment(context->es, ES);
save_segment(context->fs, FS);
save_segment(context->gs, GS);
save_segment(context->tr, TR);
save_segment(context->ldtr, LDTR);
save_segment(context->cs, VMX_GUEST_CS);
save_segment(context->ss, VMX_GUEST_SS);
save_segment(context->ds, VMX_GUEST_DS);
save_segment(context->es, VMX_GUEST_ES);
save_segment(context->fs, VMX_GUEST_FS);
save_segment(context->gs, VMX_GUEST_GS);
save_segment(context->tr, VMX_GUEST_TR);
save_segment(context->ldtr, VMX_GUEST_LDTR);
/* Only base and limit for IDTR and GDTR */
context->idtr.base = exec_vmread(VMX_GUEST_IDTR_BASE);
context->gdtr.base = exec_vmread(VMX_GUEST_GDTR_BASE);
@ -277,14 +276,14 @@ static void load_world_ctx(struct run_context *context)
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, context->ia32_sysenter_cs);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, context->ia32_sysenter_esp);
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, context->ia32_sysenter_eip);
load_segment(context->cs, CS);
load_segment(context->ss, SS);
load_segment(context->ds, DS);
load_segment(context->es, ES);
load_segment(context->fs, FS);
load_segment(context->gs, GS);
load_segment(context->tr, TR);
load_segment(context->ldtr, LDTR);
load_segment(context->cs, VMX_GUEST_CS);
load_segment(context->ss, VMX_GUEST_SS);
load_segment(context->ds, VMX_GUEST_DS);
load_segment(context->es, VMX_GUEST_ES);
load_segment(context->fs, VMX_GUEST_FS);
load_segment(context->gs, VMX_GUEST_GS);
load_segment(context->tr, VMX_GUEST_TR);
load_segment(context->ldtr, VMX_GUEST_LDTR);
/* Only base and limit for IDTR and GDTR */
exec_vmwrite(VMX_GUEST_IDTR_BASE, context->idtr.base);
exec_vmwrite(VMX_GUEST_GDTR_BASE, context->gdtr.base);

View File

@ -12,27 +12,6 @@
/** Replaces 'x' by its value. */
#define CPP_STRING(x) __CPP_STRING(x)
/** Creates a bitfield mask.
*
* @param pos The position of the LSB within the mask.
* @param width The width of the bitfield in bits.
*
* @return The bitfield mask.
*/
#define BITFIELD_MASK(pos, width) (((1<<(width))-1)<<(pos))
#define BITFIELD_VALUE(v, pos, width) (((v)<<(pos)) & (((1<<(width))-1)<<(pos)))
#define MAKE_BITFIELD_MASK(id) BITFIELD_MASK(id ## _POS, id ## _WIDTH)
#define MAKE_BITFIELD_VALUE(v, id) BITFIELD_VALUE(v, id ## _POS, id ## _WIDTH)
/** Defines a register within a register block. */
#define REGISTER(base, off) (base ## _BASE + (off))
#define MAKE_MMIO_REGISTER_ADDRESS(chip, module, register) \
(chip ## _ ## module ## _BASE + \
(chip ## _ ## module ## _ ## register ## _REGISTER))
/* Macro used to check if a value is aligned to the required boundary.
* Returns TRUE if aligned; FALSE if not aligned
* NOTE: The required alignment must be a power of 2 (2, 4, 8, 16, 32, etc)