hv: cpu: fix "Procedure has more than one exit point"

IEC 61508,ISO 26262 standards highly recommend single-exit rule.

Reduce the count of the "return entries".
Fix the violations which is comply with the cases list below:
1.Function has 2 return entries.
2.The first return entry is used to return the error code of
checking variable whether is valid.

Fix the violations in "if else" format.

Tracked-On: #861
Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Huihuang Shi 2018-11-29 11:08:54 +08:00 committed by wenlingz
parent 7f08ad8375
commit ddb548367a
2 changed files with 113 additions and 113 deletions

View File

@ -67,12 +67,15 @@ bool cpu_has_cap(uint32_t bit)
{ {
uint32_t feat_idx = bit >> 5U; uint32_t feat_idx = bit >> 5U;
uint32_t feat_bit = bit & 0x1fU; uint32_t feat_bit = bit & 0x1fU;
bool ret;
if (feat_idx >= FEATURE_WORDS) { if (feat_idx >= FEATURE_WORDS) {
return false; ret = false;
} else {
ret = ((boot_cpu_data.cpuid_leaves[feat_idx] & (1U << feat_bit)) != 0U);
} }
return ((boot_cpu_data.cpuid_leaves[feat_idx] & (1U << feat_bit)) != 0U); return ret;
} }
static inline bool get_monitor_cap(void) static inline bool get_monitor_cap(void)
@ -710,11 +713,7 @@ void cpu_dead(uint16_t pcpu_id)
*/ */
int halt = 1; int halt = 1;
if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap) == false) { if (bitmap_test_and_clear_lock(pcpu_id, &pcpu_active_bitmap)) {
pr_err("pcpu%hu already dead", pcpu_id);
return;
}
/* clean up native stuff */ /* clean up native stuff */
vmx_off(pcpu_id); vmx_off(pcpu_id);
cache_flush_invalidate_all(); cache_flush_invalidate_all();
@ -726,6 +725,9 @@ void cpu_dead(uint16_t pcpu_id)
do { do {
hlt_cpu(); hlt_cpu();
} while (halt != 0); } while (halt != 0);
} else {
pr_err("pcpu%hu already dead", pcpu_id);
}
} }
static void set_current_cpu_id(uint16_t pcpu_id) static void set_current_cpu_id(uint16_t pcpu_id)
@ -804,10 +806,7 @@ static void ept_cap_detect(void)
msr_val = msr_val >> 32U; msr_val = msr_val >> 32U;
/* Check if secondary processor based VM control is available. */ /* Check if secondary processor based VM control is available. */
if ((msr_val & VMX_PROCBASED_CTLS_SECONDARY) == 0UL) { if ((msr_val & VMX_PROCBASED_CTLS_SECONDARY) != 0UL) {
return;
}
/* Read secondary processor based VM control. */ /* Read secondary processor based VM control. */
msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2); msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
@ -815,6 +814,7 @@ static void ept_cap_detect(void)
cpu_caps.ept_features = 1U; cpu_caps.ept_features = 1U;
} }
} }
}
static void apicv_cap_detect(void) static void apicv_cap_detect(void)
{ {

View File

@ -63,22 +63,24 @@ static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcp
return entry; return entry;
} }
static inline int set_vcpuid_entry(struct acrn_vm *vm, static inline int32_t set_vcpuid_entry(struct acrn_vm *vm,
const struct vcpuid_entry *entry) const struct vcpuid_entry *entry)
{ {
struct vcpuid_entry *tmp; struct vcpuid_entry *tmp;
size_t entry_size = sizeof(struct vcpuid_entry); size_t entry_size = sizeof(struct vcpuid_entry);
int32_t ret;
if (vm->vcpuid_entry_nr == MAX_VM_VCPUID_ENTRIES) { if (vm->vcpuid_entry_nr == MAX_VM_VCPUID_ENTRIES) {
pr_err("%s, vcpuid entry over MAX_VM_VCPUID_ENTRIES(%u)\n", pr_err("%s, vcpuid entry over MAX_VM_VCPUID_ENTRIES(%u)\n",
__func__, MAX_VM_VCPUID_ENTRIES); __func__, MAX_VM_VCPUID_ENTRIES);
return -ENOMEM; ret = -ENOMEM;
} } else {
tmp = &vm->vcpuid_entries[vm->vcpuid_entry_nr]; tmp = &vm->vcpuid_entries[vm->vcpuid_entry_nr];
vm->vcpuid_entry_nr++; vm->vcpuid_entry_nr++;
(void)memcpy_s(tmp, entry_size, entry, entry_size); (void)memcpy_s(tmp, entry_size, entry, entry_size);
return 0; ret = 0;
}
return ret;
} }
/** /**
@ -320,10 +322,7 @@ void guest_cpuid(struct acrn_vcpu *vcpu,
*ecx = 0U; *ecx = 0U;
*edx = 0U; *edx = 0U;
} }
} else {
return;
}
/* percpu related */ /* percpu related */
switch (leaf) { switch (leaf) {
case 0x01U: case 0x01U:
@ -419,3 +418,4 @@ void guest_cpuid(struct acrn_vcpu *vcpu,
break; break;
} }
} }
}