hv: cpu: fix "Procedure has more than one exit point"

IEC 61508,ISO 26262 standards highly recommend single-exit rule.

Reduce the count of the "return entries".
Fix the violations which is comply with the cases list below:
1.Function has 2 return entries.
2.The first return entry is used to return the error code of
checking variable whether is valid.

Fix the violations in "if else" format.

Tracked-On: #861
Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Huihuang Shi 2018-11-29 11:08:54 +08:00 committed by wenlingz
parent 7f08ad8375
commit ddb548367a
2 changed files with 113 additions and 113 deletions

View File

@ -67,12 +67,15 @@ bool cpu_has_cap(uint32_t bit)
{ {
uint32_t feat_idx = bit >> 5U; uint32_t feat_idx = bit >> 5U;
uint32_t feat_bit = bit & 0x1fU; uint32_t feat_bit = bit & 0x1fU;
bool ret;
if (feat_idx >= FEATURE_WORDS) { if (feat_idx >= FEATURE_WORDS) {
return false; ret = false;
} else {
ret = ((boot_cpu_data.cpuid_leaves[feat_idx] & (1U << feat_bit)) != 0U);
} }
return ((boot_cpu_data.cpuid_leaves[feat_idx] & (1U << feat_bit)) != 0U); return ret;
} }
static inline bool get_monitor_cap(void) static inline bool get_monitor_cap(void)
@ -710,22 +713,21 @@ void cpu_dead(uint16_t pcpu_id)
*/ */
int halt = 1; int halt = 1;
if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap) == false) { if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap)) {
/* clean up native stuff */
vmx_off(pcpu_id);
cache_flush_invalidate_all();
/* Set state to show CPU is dead */
cpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
/* Halt the CPU */
do {
hlt_cpu();
} while (halt != 0);
} else {
pr_err("pcpu%hu already dead", pcpu_id); pr_err("pcpu%hu already dead", pcpu_id);
return;
} }
/* clean up native stuff */
vmx_off(pcpu_id);
cache_flush_invalidate_all();
/* Set state to show CPU is dead */
cpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
/* Halt the CPU */
do {
hlt_cpu();
} while (halt != 0);
} }
static void set_current_cpu_id(uint16_t pcpu_id) static void set_current_cpu_id(uint16_t pcpu_id)
@ -804,15 +806,13 @@ static void ept_cap_detect(void)
msr_val = msr_val >> 32U; msr_val = msr_val >> 32U;
/* Check if secondary processor based VM control is available. */ /* Check if secondary processor based VM control is available. */
if ((msr_val & VMX_PROCBASED_CTLS_SECONDARY) == 0UL) { if ((msr_val & VMX_PROCBASED_CTLS_SECONDARY) != 0UL) {
return; /* Read secondary processor based VM control. */
} msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
/* Read secondary processor based VM control. */ if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_EPT)) {
msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2); cpu_caps.ept_features = 1U;
}
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_EPT)) {
cpu_caps.ept_features = 1U;
} }
} }

View File

@ -63,22 +63,24 @@ static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcp
return entry; return entry;
} }
static inline int set_vcpuid_entry(struct acrn_vm *vm, static inline int32_t set_vcpuid_entry(struct acrn_vm *vm,
const struct vcpuid_entry *entry) const struct vcpuid_entry *entry)
{ {
struct vcpuid_entry *tmp; struct vcpuid_entry *tmp;
size_t entry_size = sizeof(struct vcpuid_entry); size_t entry_size = sizeof(struct vcpuid_entry);
int32_t ret;
if (vm->vcpuid_entry_nr == MAX_VM_VCPUID_ENTRIES) { if (vm->vcpuid_entry_nr == MAX_VM_VCPUID_ENTRIES) {
pr_err("%s, vcpuid entry over MAX_VM_VCPUID_ENTRIES(%u)\n", pr_err("%s, vcpuid entry over MAX_VM_VCPUID_ENTRIES(%u)\n",
__func__, MAX_VM_VCPUID_ENTRIES); __func__, MAX_VM_VCPUID_ENTRIES);
return -ENOMEM; ret = -ENOMEM;
} else {
tmp = &vm->vcpuid_entries[vm->vcpuid_entry_nr];
vm->vcpuid_entry_nr++;
(void)memcpy_s(tmp, entry_size, entry, entry_size);
ret = 0;
} }
return ret;
tmp = &vm->vcpuid_entries[vm->vcpuid_entry_nr];
vm->vcpuid_entry_nr++;
(void)memcpy_s(tmp, entry_size, entry, entry_size);
return 0;
} }
/** /**
@ -320,102 +322,100 @@ void guest_cpuid(struct acrn_vcpu *vcpu,
*ecx = 0U; *ecx = 0U;
*edx = 0U; *edx = 0U;
} }
} else {
return; /* percpu related */
} switch (leaf) {
case 0x01U:
/* percpu related */ {
switch (leaf) { cpuid(leaf, eax, ebx, ecx, edx);
case 0x01U: uint32_t apicid = vlapic_get_apicid(vcpu_vlapic(vcpu));
{ /* Patching initial APIC ID */
cpuid(leaf, eax, ebx, ecx, edx); *ebx &= ~APIC_ID_MASK;
uint32_t apicid = vlapic_get_apicid(vcpu_vlapic(vcpu)); *ebx |= (apicid << APIC_ID_SHIFT);
/* Patching initial APIC ID */
*ebx &= ~APIC_ID_MASK;
*ebx |= (apicid << APIC_ID_SHIFT);
#ifndef CONFIG_MTRR_ENABLED #ifndef CONFIG_MTRR_ENABLED
/* mask mtrr */ /* mask mtrr */
*edx &= ~CPUID_EDX_MTRR; *edx &= ~CPUID_EDX_MTRR;
#endif #endif
/* mask pcid */ /* mask pcid */
*ecx &= ~CPUID_ECX_PCID; *ecx &= ~CPUID_ECX_PCID;
/*mask vmx to guest os */ /*mask vmx to guest os */
*ecx &= ~CPUID_ECX_VMX; *ecx &= ~CPUID_ECX_VMX;
/*no xsave support for guest if it is not enabled on host*/ /*no xsave support for guest if it is not enabled on host*/
if ((*ecx & CPUID_ECX_OSXSAVE) == 0U) { if ((*ecx & CPUID_ECX_OSXSAVE) == 0U) {
*ecx &= ~CPUID_ECX_XSAVE; *ecx &= ~CPUID_ECX_XSAVE;
}
*ecx &= ~CPUID_ECX_OSXSAVE;
if ((*ecx & CPUID_ECX_XSAVE) != 0U) {
uint64_t cr4;
/*read guest CR4*/
cr4 = exec_vmread(VMX_GUEST_CR4);
if ((cr4 & CR4_OSXSAVE) != 0UL) {
*ecx |= CPUID_ECX_OSXSAVE;
} }
}
break;
}
case 0x0bU: *ecx &= ~CPUID_ECX_OSXSAVE;
/* Patching X2APIC */ if ((*ecx & CPUID_ECX_XSAVE) != 0U) {
#ifdef CONFIG_PARTITION_MODE uint64_t cr4;
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx); /*read guest CR4*/
#else cr4 = exec_vmread(VMX_GUEST_CR4);
if (is_vm0(vcpu->vm)) { if ((cr4 & CR4_OSXSAVE) != 0UL) {
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx); *ecx |= CPUID_ECX_OSXSAVE;
} else {
*ecx = subleaf & 0xFFU;
*edx = vlapic_get_apicid(vcpu_vlapic(vcpu));
/* No HT emulation for UOS */
switch (subleaf) {
case 0U:
*eax = 0U;
*ebx = 1U;
*ecx |= (1U << 8U);
break;
case 1U:
if (vcpu->vm->hw.created_vcpus == 1U) {
*eax = 0U;
} else {
*eax = (uint32_t)fls32(vcpu->vm->hw.created_vcpus - 1U) + 1U;
} }
*ebx = vcpu->vm->hw.created_vcpus; }
*ecx |= (2U << 8U);
break; break;
default: }
case 0x0bU:
/* Patching X2APIC */
#ifdef CONFIG_PARTITION_MODE
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
#else
if (is_vm0(vcpu->vm)) {
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
} else {
*ecx = subleaf & 0xFFU;
*edx = vlapic_get_apicid(vcpu_vlapic(vcpu));
/* No HT emulation for UOS */
switch (subleaf) {
case 0U:
*eax = 0U;
*ebx = 1U;
*ecx |= (1U << 8U);
break;
case 1U:
if (vcpu->vm->hw.created_vcpus == 1U) {
*eax = 0U;
} else {
*eax = (uint32_t)fls32(vcpu->vm->hw.created_vcpus - 1U) + 1U;
}
*ebx = vcpu->vm->hw.created_vcpus;
*ecx |= (2U << 8U);
break;
default:
*eax = 0U;
*ebx = 0U;
*ecx |= (0U << 8U);
break;
}
}
#endif
break;
case 0x0dU:
if (!cpu_has_cap(X86_FEATURE_OSXSAVE)) {
*eax = 0U; *eax = 0U;
*ebx = 0U; *ebx = 0U;
*ecx |= (0U << 8U); *ecx = 0U;
break; *edx = 0U;
} else {
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
} }
} break;
#endif
break;
case 0x0dU: default:
if (!cpu_has_cap(X86_FEATURE_OSXSAVE)) { /*
*eax = 0U; * In this switch statement, leaf shall either be 0x01U or 0x0bU
*ebx = 0U; * or 0x0dU. All the other cases have been handled properly
*ecx = 0U; * before this switch statement.
*edx = 0U; * Gracefully return if prior case clauses have not been met.
} else { */
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx); break;
} }
break;
default:
/*
* In this switch statement, leaf shall either be 0x01U or 0x0bU
* or 0x0dU. All the other cases have been handled properly
* before this switch statement.
* Gracefully return if prior case clauses have not been met.
*/
break;
} }
} }