HV: Fix missing brackets for MISRA C Violations

Patch 1 of 7.
Added changes to make sure Misra C violations are fixed
for rules 11S and 12S.

Signed-off-by: Arindam Roy <arindam.roy@intel.com>
This commit is contained in:
Arindam Roy 2018-07-12 15:00:48 -07:00 committed by wenlingz
parent b4a6b93d5c
commit 88a3205d3c
5 changed files with 255 additions and 135 deletions

View File

@ -30,11 +30,12 @@ entry_id(struct ptdev_remapping_info *entry)
{
uint32_t id;
if (entry->type == PTDEV_INTR_INTX)
if (entry->type == PTDEV_INTR_INTX) {
id = entry_id_from_intx(entry->ptdev_intr_info.intx.phys_pin);
else
} else {
id = entry_id_from_msix(entry->phys_bdf,
entry->ptdev_intr_info.msi.msix_entry_index);
}
return id;
}
@ -61,8 +62,9 @@ _lookup_entry_by_id(uint32_t id)
list_for_each(pos, &ptdev_list) {
entry = list_entry(pos, struct ptdev_remapping_info,
entry_node);
if (entry_id(entry) == id)
if (entry_id(entry) == id) {
return entry;
}
}
return NULL;
@ -82,8 +84,9 @@ _lookup_entry_by_vmsi(struct vm *vm, uint16_t vbdf, int32_t index)
&& (entry->vm == vm)
&& (entry->virt_bdf == vbdf)
&& (entry->ptdev_intr_info.msi.msix_entry_index
== index))
== index)) {
return entry;
}
}
return NULL;
@ -114,8 +117,9 @@ _lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
if ((entry->type == PTDEV_INTR_INTX)
&& (entry->vm == vm)
&& (entry->ptdev_intr_info.intx.virt_pin == vpin)
&& (entry->ptdev_intr_info.intx.vpin_src == vpin_src))
&& (entry->ptdev_intr_info.intx.vpin_src == vpin_src)) {
return entry;
}
}
return NULL;
@ -151,13 +155,15 @@ ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
/* VPIN_IOAPIC src means we have vioapic enabled */
vioapic_get_rte(vm, entry->ptdev_intr_info.intx.virt_pin, &rte);
if ((rte & IOAPIC_RTE_TRGRMOD) == IOAPIC_RTE_TRGRLVL)
if ((rte & IOAPIC_RTE_TRGRMOD) == IOAPIC_RTE_TRGRLVL) {
trigger_lvl = true;
}
if (trigger_lvl)
if (trigger_lvl) {
update_irq_handler(phys_irq, common_dev_handler_level);
else
} else {
update_irq_handler(phys_irq, common_handler_edge);
}
}
/* update irq handler for PIC */
if ((entry->type == PTDEV_INTR_INTX) && (phys_irq < NR_LEGACY_IRQ)
@ -167,20 +173,22 @@ ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
/* VPIN_PIC src means we have vpic enabled */
vpic_get_irq_trigger(vm,
entry->ptdev_intr_info.intx.virt_pin, &trigger);
if (trigger == LEVEL_TRIGGER)
if (trigger == LEVEL_TRIGGER) {
update_irq_handler(phys_irq, common_dev_handler_level);
else
} else {
update_irq_handler(phys_irq, common_handler_edge);
}
}
}
static bool ptdev_hv_owned_intx(struct vm *vm, struct ptdev_intx_info *info)
{
/* vm0 pin 4 (uart) is owned by hypervisor under debug version */
if (is_vm0(vm) && (vm->vuart != NULL) && info->virt_pin == 4U)
if (is_vm0(vm) && (vm->vuart != NULL) && info->virt_pin == 4U) {
return true;
else
} else {
return false;
}
}
static void ptdev_build_physical_msi(struct vm *vm, struct ptdev_msi_info *info,
@ -200,8 +208,9 @@ static void ptdev_build_physical_msi(struct vm *vm, struct ptdev_msi_info *info,
/* get physical delivery mode */
delmode = info->vmsi_data & APIC_DELMODE_MASK;
if (delmode != APIC_DELMODE_FIXED && delmode != APIC_DELMODE_LOWPRIO)
if (delmode != APIC_DELMODE_FIXED && delmode != APIC_DELMODE_LOWPRIO) {
delmode = APIC_DELMODE_LOWPRIO;
}
/* update physical delivery mode & vector */
info->pmsi_data = info->vmsi_data;
@ -244,8 +253,9 @@ static uint64_t ptdev_build_physical_rte(struct vm *vm,
/* physical delivery mode */
delmode = low & IOAPIC_RTE_DELMOD;
if ((delmode != IOAPIC_RTE_DELFIXED) &&
(delmode != IOAPIC_RTE_DELLOPRI))
(delmode != IOAPIC_RTE_DELLOPRI)) {
delmode = IOAPIC_RTE_DELLOPRI;
}
/* update physical delivery mode, dest mode(logical) & vector */
low &= ~(IOAPIC_RTE_DESTMOD |
@ -271,8 +281,9 @@ static uint64_t ptdev_build_physical_rte(struct vm *vm,
rte &= ~IOAPIC_RTE_TRGRMOD;
vpic_get_irq_trigger(vm,
entry->ptdev_intr_info.intx.virt_pin, &trigger);
if (trigger == LEVEL_TRIGGER)
if (trigger == LEVEL_TRIGGER) {
rte |= IOAPIC_RTE_TRGRLVL;
}
dev_dbg(ACRN_DBG_IRQ, "IOAPIC RTE = 0x%x:%x(P) -> 0x%x:%x(P)",
physical_rte >> 32, (uint32_t)physical_rte,
@ -339,12 +350,14 @@ remove_msix_remapping(struct vm *vm, uint16_t virt_bdf, int msix_entry_index)
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index);
if (entry == NULL)
if (entry == NULL) {
goto END;
}
if (is_entry_active(entry))
if (is_entry_active(entry)) {
/*TODO: disable MSIX device when HV can in future */
ptdev_deactivate_entry(entry);
}
dev_dbg(ACRN_DBG_IRQ,
"VM%d MSIX remove vector mapping vbdf-pbdf:0x%x-0x%x idx=%d",
@ -420,13 +433,15 @@ static void remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin)
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vintx(vm, virt_pin, vpin_src);
if (entry == NULL)
if (entry == NULL) {
goto END;
}
if (is_entry_active(entry)) {
phys_irq = dev_to_irq(entry->node);
if (!irq_is_gsi(phys_irq))
if (!irq_is_gsi(phys_irq)) {
goto END;
}
/* disable interrupt */
GSI_MASK_IRQ(phys_irq);
@ -459,15 +474,17 @@ static void ptdev_intr_handle_irq(struct vm *vm,
/* VPIN_IOAPIC src means we have vioapic enabled */
vioapic_get_rte(vm, entry->ptdev_intr_info.intx.virt_pin, &rte);
if ((rte & IOAPIC_RTE_TRGRMOD) == IOAPIC_RTE_TRGRLVL)
if ((rte & IOAPIC_RTE_TRGRMOD) == IOAPIC_RTE_TRGRLVL) {
trigger_lvl = true;
}
if (trigger_lvl)
if (trigger_lvl) {
vioapic_assert_irq(vm,
entry->ptdev_intr_info.intx.virt_pin);
else
} else {
vioapic_pulse_irq(vm,
entry->ptdev_intr_info.intx.virt_pin);
}
dev_dbg(ACRN_DBG_PTIRQ,
"dev-assign: irq=0x%x assert vr: 0x%x vRTE=0x%x",
@ -482,12 +499,13 @@ static void ptdev_intr_handle_irq(struct vm *vm,
/* VPIN_PIC src means we have vpic enabled */
vpic_get_irq_trigger(vm,
entry->ptdev_intr_info.intx.virt_pin, &trigger);
if (trigger == LEVEL_TRIGGER)
if (trigger == LEVEL_TRIGGER) {
vpic_assert_irq(vm,
entry->ptdev_intr_info.intx.virt_pin);
else
} else {
vpic_pulse_irq(vm,
entry->ptdev_intr_info.intx.virt_pin);
}
break;
}
default:
@ -501,8 +519,9 @@ void ptdev_softirq(__unused uint16_t cpu_id)
struct ptdev_remapping_info *entry = ptdev_dequeue_softirq();
struct vm *vm;
if (entry == NULL)
if (entry == NULL) {
break;
}
/* skip any inactive entry */
if (!is_entry_active(entry)) {
@ -514,9 +533,9 @@ void ptdev_softirq(__unused uint16_t cpu_id)
vm = entry->vm;
/* handle real request */
if (entry->type == PTDEV_INTR_INTX)
if (entry->type == PTDEV_INTR_INTX) {
ptdev_intr_handle_irq(vm, entry);
else {
} else {
/* TODO: msi destmode check required */
vlapic_intr_msi(vm,
entry->ptdev_intr_info.msi.vmsi_addr,
@ -542,13 +561,15 @@ void ptdev_intx_ack(struct vm *vm, int virt_pin,
int phys_pin;
entry = lookup_entry_by_vintx(vm, virt_pin, vpin_src);
if (entry == NULL)
if (entry == NULL) {
return;
}
phys_pin = entry->ptdev_intr_info.intx.phys_pin;
phys_irq = pin_to_irq(phys_pin);
if (!irq_is_gsi(phys_irq))
if (!irq_is_gsi(phys_irq)) {
return;
}
/* NOTE: only Level trigger will process EOI/ACK and if we got here
* means we have this vioapic or vpic or both enabled
@ -651,12 +672,14 @@ static bool vpin_masked(struct vm *vm, uint8_t virt_pin,
uint64_t rte;
vioapic_get_rte(vm, virt_pin, &rte);
if ((rte & IOAPIC_RTE_INTMASK) == IOAPIC_RTE_INTMSET)
if ((rte & IOAPIC_RTE_INTMASK) == IOAPIC_RTE_INTMSET) {
return true;
else
} else {
return false;
} else
}
} else {
return vpic_is_pin_mask(vm->vpic, virt_pin);
}
}
static void activate_physical_ioapic(struct vm *vm,
@ -705,8 +728,9 @@ int ptdev_intx_pin_remap(struct vm *vm, struct ptdev_intx_info *info)
*/
/* no remap for hypervisor owned intx */
if (ptdev_hv_owned_intx(vm, info))
if (ptdev_hv_owned_intx(vm, info)) {
goto END;
}
/* query if we have virt to phys mapping */
entry = lookup_entry_by_vintx(vm, info->virt_pin, info->vpin_src);
@ -726,22 +750,25 @@ int ptdev_intx_pin_remap(struct vm *vm, struct ptdev_intx_info *info)
pic_ioapic_pin_map[info->virt_pin],
pic_pin ? PTDEV_VPIN_IOAPIC
: PTDEV_VPIN_PIC);
if (entry != NULL)
if (entry != NULL) {
need_switch_vpin_src = true;
}
}
/* entry could be updated by above switch check */
if (entry == NULL) {
/* allocate entry during first unmask */
if (vpin_masked(vm, info->virt_pin,
info->vpin_src))
info->vpin_src)) {
goto END;
}
info->phys_pin = info->virt_pin;
/* fix vPIC pin to correct native IOAPIC pin */
if (pic_pin)
if (pic_pin) {
info->phys_pin =
pic_ioapic_pin_map[info->virt_pin];
}
entry = add_intx_remapping(vm, info->virt_pin,
info->phys_pin, pic_pin);
@ -762,14 +789,16 @@ int ptdev_intx_pin_remap(struct vm *vm, struct ptdev_intx_info *info)
/* no need update if vpin is masked && entry is not active */
if (!is_entry_active(entry) &&
vpin_masked(vm, info->virt_pin, info->vpin_src))
vpin_masked(vm, info->virt_pin, info->vpin_src)) {
goto END;
}
/* phys_pin from physical IOAPIC */
phys_pin = entry->ptdev_intr_info.intx.phys_pin;
phys_irq = pin_to_irq(phys_pin);
if (!irq_is_gsi(phys_irq))
if (!irq_is_gsi(phys_irq)) {
goto END;
}
/* if vpin source need switch, make sure the entry is deactived */
if (need_switch_vpin_src) {
@ -852,8 +881,9 @@ int ptdev_add_intx_remapping(struct vm *vm,
}
entry = add_intx_remapping(vm, virt_pin, phys_pin, pic_pin);
if (is_entry_invalid(entry))
if (is_entry_invalid(entry)) {
return -ENODEV;
}
return 0;
}
@ -881,8 +911,9 @@ int ptdev_add_msix_remapping(struct vm *vm, uint16_t virt_bdf,
for (i = 0; i < vector_count; i++) {
entry = add_msix_remapping(vm, virt_bdf, phys_bdf, i);
if (is_entry_invalid(entry))
if (is_entry_invalid(entry)) {
return -ENODEV;
}
}
return 0;
@ -914,10 +945,11 @@ static void get_entry_info(struct ptdev_remapping_info *entry, char *type,
*dest = (entry->ptdev_intr_info.msi.pmsi_addr & 0xFF000U)
>> 12;
if ((entry->ptdev_intr_info.msi.pmsi_data &
APIC_TRIGMOD_LEVEL) != 0U)
APIC_TRIGMOD_LEVEL) != 0U) {
*lvl_tm = true;
else
} else {
*lvl_tm = false;
}
*pin = IRQ_INVALID;
*vpin = -1;
*bdf = entry->phys_bdf;
@ -928,16 +960,18 @@ static void get_entry_info(struct ptdev_remapping_info *entry, char *type,
uint64_t rte = 0;
if (entry->ptdev_intr_info.intx.vpin_src
== PTDEV_VPIN_IOAPIC)
== PTDEV_VPIN_IOAPIC) {
(void)strcpy_s(type, 16, "IOAPIC");
else
} else {
(void)strcpy_s(type, 16, "PIC");
}
ioapic_get_rte(phys_irq, &rte);
*dest = ((rte >> 32) & IOAPIC_RTE_DEST) >> 24;
if ((rte & IOAPIC_RTE_TRGRLVL) != 0U)
if ((rte & IOAPIC_RTE_TRGRLVL) != 0U) {
*lvl_tm = true;
else
} else {
*lvl_tm = false;
}
*pin = entry->ptdev_intr_info.intx.phys_pin;
*vpin = entry->ptdev_intr_info.intx.virt_pin;
*bdf = 0;

View File

@ -69,8 +69,9 @@ inline bool cpu_has_cap(uint32_t bit)
uint32_t feat_idx = bit >> 5U;
uint32_t feat_bit = bit & 0x1fU;
if (feat_idx >= FEATURE_WORDS)
if (feat_idx >= FEATURE_WORDS) {
return false;
}
return ((boot_cpu_data.cpuid_leaves[feat_idx] & (1U << feat_bit)) != 0U);
}
@ -82,8 +83,9 @@ static inline bool get_monitor_cap(void)
* in hypervisor, but still expose it to the guests and
* let them handle it correctly
*/
if (boot_cpu_data.family != 0x6U || boot_cpu_data.model != 0x5cU)
if (boot_cpu_data.family != 0x6U || boot_cpu_data.model != 0x5cU) {
return true;
}
}
return false;
@ -107,13 +109,15 @@ static void get_cpu_capabilities(void)
&boot_cpu_data.cpuid_leaves[FEAT_1_ECX],
&boot_cpu_data.cpuid_leaves[FEAT_1_EDX]);
family = (eax >> 8U) & 0xffU;
if (family == 0xFU)
if (family == 0xFU) {
family += (eax >> 20U) & 0xffU;
}
boot_cpu_data.family = (uint8_t)family;
model = (eax >> 4U) & 0xfU;
if (family >= 0x06U)
if (family >= 0x06U) {
model += ((eax >> 16U) & 0xfU) << 4U;
}
boot_cpu_data.model = (uint8_t)model;
@ -126,10 +130,11 @@ static void get_cpu_capabilities(void)
&boot_cpu_data.extended_cpuid_level,
&unused, &unused, &unused);
if (boot_cpu_data.extended_cpuid_level >= CPUID_EXTEND_FUNCTION_1)
if (boot_cpu_data.extended_cpuid_level >= CPUID_EXTEND_FUNCTION_1) {
cpuid(CPUID_EXTEND_FUNCTION_1, &unused, &unused,
&boot_cpu_data.cpuid_leaves[FEAT_8000_0001_ECX],
&boot_cpu_data.cpuid_leaves[FEAT_8000_0001_EDX]);
}
if (boot_cpu_data.extended_cpuid_level >= CPUID_EXTEND_ADDRESS_SIZE) {
cpuid(CPUID_EXTEND_ADDRESS_SIZE, &eax,
@ -230,8 +235,9 @@ static int hardware_detect_support(void)
}
ret = check_vmx_mmu_cap();
if (ret != 0)
if (ret != 0) {
return ret;
}
pr_acrnlog("hardware support HV");
return 0;
@ -304,8 +310,9 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum cpu_state state)
}
/* If cpu is dead, decrement CPU up count */
if (state == CPU_STATE_DEAD)
if (state == CPU_STATE_DEAD) {
up_count--;
}
/* Set state for the specified CPU */
per_cpu(state, pcpu_id) = state;
@ -433,8 +440,9 @@ void bsp_boot_init(void)
__bitmap_set(BOOT_CPU_ID, &pcpu_active_bitmap);
misc_en = msr_read(MSR_IA32_MISC_ENABLE);
if ((misc_en & TURBO_MODE_DISABLE) == 0)
if ((misc_en & TURBO_MODE_DISABLE) == 0) {
msr_write(MSR_IA32_MISC_ENABLE, misc_en | TURBO_MODE_DISABLE);
}
/* Get CPU capabilities thru CPUID, including the physical address bit
* limit which is required for initializing paging.
@ -550,8 +558,9 @@ static void bsp_boot_post(void)
console_setup_timer();
/* Start initializing the VM for this CPU */
if (hv_main(BOOT_CPU_ID) != 0)
if (hv_main(BOOT_CPU_ID) != 0) {
panic("failed to start VM for bsp\n");
}
/* Control should not come here */
cpu_dead(BOOT_CPU_ID);
@ -583,8 +592,9 @@ void cpu_secondary_init(void)
__bitmap_set(get_cpu_id(), &pcpu_active_bitmap);
misc_en = msr_read(MSR_IA32_MISC_ENABLE);
if ((misc_en & TURBO_MODE_DISABLE) == 0)
if ((misc_en & TURBO_MODE_DISABLE) == 0) {
msr_write(MSR_IA32_MISC_ENABLE, misc_en | TURBO_MODE_DISABLE);
}
/* Switch to run-time stack */
rsp = (uint64_t)(&get_cpu_var(stack)[CONFIG_STACK_SIZE - 1]);
@ -623,8 +633,9 @@ static void cpu_secondary_post(void)
pcpu_sync_sleep(&pcpu_sync, 0UL);
ret = hv_main(get_cpu_id());
if (ret != 0)
if (ret != 0) {
panic("hv_main ret = %d\n", ret);
}
/* Control will only come here for secondary CPUs not configured for
* use or if an error occurs in hv_main
@ -637,8 +648,9 @@ static uint16_t get_cpu_id_from_lapic_id(uint8_t lapic_id)
uint16_t i;
for (i = 0U; i < phys_cpu_num; i++) {
if (per_cpu(lapic_id, i) == lapic_id)
if (per_cpu(lapic_id, i) == lapic_id) {
return i;
}
}
return INVALID_CPU_ID;
@ -697,8 +709,9 @@ void stop_cpus()
timeout = CONFIG_CPU_UP_TIMEOUT * 1000U;
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
if (get_cpu_id() == pcpu_id) /* avoid offline itself */
if (get_cpu_id() == pcpu_id) { /* avoid offline itself */
continue;
}
make_pcpu_offline(pcpu_id);
}
@ -833,19 +846,22 @@ static void vapic_cap_detect(void)
}
features |= VAPIC_FEATURE_VIRT_ACCESS;
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VAPIC_REGS))
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VAPIC_REGS)) {
features |= VAPIC_FEATURE_VIRT_REG;
}
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VX2APIC))
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VX2APIC)) {
features |= VAPIC_FEATURE_VX2APIC_MODE;
}
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VIRQ)) {
features |= VAPIC_FEATURE_INTR_DELIVERY;
msr_val = msr_read(MSR_IA32_VMX_PINBASED_CTLS);
if (is_ctrl_setting_allowed(msr_val,
VMX_PINBASED_CTLS_POST_IRQ))
VMX_PINBASED_CTLS_POST_IRQ)) {
features |= VAPIC_FEATURE_POST_INTR;
}
}
cpu_caps.vapic_features = features;
@ -880,9 +896,10 @@ static void cpu_xsave_init(void)
cpuid(CPUID_FEATURES, &unused, &unused, &ecx, &unused);
/* if set, update it */
if ((ecx & CPUID_ECX_OSXSAVE) != 0U)
if ((ecx & CPUID_ECX_OSXSAVE) != 0U) {
boot_cpu_data.cpuid_leaves[FEAT_1_ECX] |=
CPUID_ECX_OSXSAVE;
}
}
}
}

View File

@ -15,31 +15,37 @@ static inline struct vcpuid_entry *find_vcpuid_entry(struct vcpu *vcpu,
nr = vm->vcpuid_entry_nr;
half = nr / 2U;
if (vm->vcpuid_entries[half].leaf < leaf)
if (vm->vcpuid_entries[half].leaf < leaf) {
i = half;
}
for (; i < nr; i++) {
struct vcpuid_entry *tmp = &vm->vcpuid_entries[i];
if (tmp->leaf < leaf)
if (tmp->leaf < leaf) {
continue;
}
if (tmp->leaf == leaf) {
if ((tmp->flags & CPUID_CHECK_SUBLEAF) != 0U &&
(tmp->subleaf != subleaf))
(tmp->subleaf != subleaf)) {
continue;
}
entry = tmp;
break;
} else if (tmp->leaf > leaf)
} else if (tmp->leaf > leaf) {
break;
}
}
if (entry == NULL) {
uint32_t limit;
if ((leaf & 0x80000000U) != 0U)
if ((leaf & 0x80000000U) != 0U) {
limit = vm->vcpuid_xlevel;
else
}
else {
limit = vm->vcpuid_level;
}
if (leaf > limit) {
/* Intel documentation states that invalid EAX input
@ -163,15 +169,17 @@ int set_vcpuid_entries(struct vm *vm)
init_vcpuid_entry(vm, 0U, 0U, 0U, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result != 0)
if (result != 0) {
return result;
}
limit = entry.eax;
vm->vcpuid_level = limit;
for (i = 1U; i <= limit; i++) {
/* cpuid 1/0xb is percpu related */
if (i == 1U || i == 0xbU)
if (i == 1U || i == 0xbU) {
continue;
}
switch (i) {
case 0x02U:
@ -181,16 +189,18 @@ int set_vcpuid_entries(struct vm *vm)
init_vcpuid_entry(vm, i, 0U,
CPUID_CHECK_SUBLEAF, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result != 0)
if (result != 0) {
return result;
}
times = entry.eax & 0xffUL;
for (j = 1U; j < times; j++) {
init_vcpuid_entry(vm, i, j,
CPUID_CHECK_SUBLEAF, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result != 0)
if (result != 0) {
return result;
}
}
break;
}
@ -198,52 +208,61 @@ int set_vcpuid_entries(struct vm *vm)
case 0x04U:
case 0x0dU:
for (j = 0U; ; j++) {
if (i == 0x0dU && j == 64U)
if (i == 0x0dU && j == 64U) {
break;
}
init_vcpuid_entry(vm, i, j,
CPUID_CHECK_SUBLEAF, &entry);
if (i == 0x04U && entry.eax == 0U)
if (i == 0x04U && entry.eax == 0U) {
break;
if (i == 0x0dU && entry.eax == 0U)
}
if (i == 0x0dU && entry.eax == 0U) {
continue;
}
result = set_vcpuid_entry(vm, &entry);
if (result != 0)
if (result != 0) {
return result;
}
}
break;
default:
init_vcpuid_entry(vm, i, 0U, 0U, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result != 0)
if (result != 0) {
return result;
}
break;
}
}
init_vcpuid_entry(vm, 0x40000000U, 0U, 0U, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result != 0)
if (result != 0) {
return result;
}
init_vcpuid_entry(vm, 0x40000010U, 0U, 0U, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result != 0)
if (result != 0) {
return result;
}
init_vcpuid_entry(vm, 0x80000000U, 0U, 0U, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result != 0)
if (result != 0) {
return result;
}
limit = entry.eax;
vm->vcpuid_xlevel = limit;
for (i = 0x80000001U; i <= limit; i++) {
init_vcpuid_entry(vm, i, 0U, 0U, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result != 0)
if (result != 0) {
return result;
}
}
return 0;
@ -292,10 +311,11 @@ void guest_cpuid(struct vcpu *vcpu,
#endif
/* Patching X2APIC, X2APIC mode is disabled by default. */
if (x2apic_enabled)
if (x2apic_enabled) {
*ecx |= CPUID_ECX_x2APIC;
else
} else {
*ecx &= ~CPUID_ECX_x2APIC;
}
/* mask pcid */
*ecx &= ~CPUID_ECX_PCID;
@ -304,16 +324,18 @@ void guest_cpuid(struct vcpu *vcpu,
*ecx &= ~CPUID_ECX_VMX;
/*no xsave support for guest if it is not enabled on host*/
if ((*ecx & CPUID_ECX_OSXSAVE) == 0U)
if ((*ecx & CPUID_ECX_OSXSAVE) == 0U) {
*ecx &= ~CPUID_ECX_XSAVE;
}
*ecx &= ~CPUID_ECX_OSXSAVE;
if ((*ecx & CPUID_ECX_XSAVE) != 0U) {
uint64_t cr4;
/*read guest CR4*/
cr4 = exec_vmread(VMX_GUEST_CR4);
if ((cr4 & CR4_OSXSAVE) != 0UL)
if ((cr4 & CR4_OSXSAVE) != 0UL) {
*ecx |= CPUID_ECX_OSXSAVE;
}
}
break;
}
@ -325,8 +347,9 @@ void guest_cpuid(struct vcpu *vcpu,
*ebx = 0U;
*ecx = 0U;
*edx = 0U;
} else
} else {
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
}
break;
case 0x0dU:
@ -335,8 +358,9 @@ void guest_cpuid(struct vcpu *vcpu,
*ebx = 0U;
*ecx = 0U;
*edx = 0U;
} else
} else {
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
}
break;
default:

View File

@ -24,8 +24,9 @@ static uint64_t find_next_table(uint32_t table_offset, void *table_base)
/* If bit 7 is set, entry is not a subtable. */
if ((table_entry & IA32E_PDPTE_PS_BIT) != 0U
|| (table_entry & IA32E_PDE_PS_BIT) != 0U)
|| (table_entry & IA32E_PDE_PS_BIT) != 0U) {
return sub_table_addr;
}
/* Set table present bits to any of the read/write/execute bits */
table_present = (IA32E_EPT_R_BIT | IA32E_EPT_W_BIT | IA32E_EPT_X_BIT);
@ -60,8 +61,9 @@ void free_ept_mem(void *pml4_addr)
for (pml4_index = 0U; pml4_index < IA32E_NUM_ENTRIES; pml4_index++) {
/* Walk from the PML4 table to the PDPT table */
pdpt_addr = HPA2HVA(find_next_table(pml4_index, pml4_addr));
if (pdpt_addr == NULL)
if (pdpt_addr == NULL) {
continue;
}
for (pdpt_index = 0U; pdpt_index < IA32E_NUM_ENTRIES;
pdpt_index++) {
@ -69,8 +71,9 @@ void free_ept_mem(void *pml4_addr)
pde_addr = HPA2HVA(find_next_table(pdpt_index,
pdpt_addr));
if (pde_addr == NULL)
if (pde_addr == NULL) {
continue;
}
for (pde_index = 0U; pde_index < IA32E_NUM_ENTRIES;
pde_index++) {
@ -79,12 +82,14 @@ void free_ept_mem(void *pml4_addr)
pde_addr));
/* Free page table entry table */
if (pte_addr != NULL)
if (pte_addr != NULL) {
free_paging_struct(pte_addr);
}
}
/* Free page directory entry table */
if (pde_addr != NULL)
if (pde_addr != NULL) {
free_paging_struct(pde_addr);
}
}
free_paging_struct(pdpt_addr);
}
@ -130,8 +135,9 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
vm->attr.boot_idx, gpa);
}
if (size != NULL)
if (size != NULL) {
*size = pg_size;
}
return hpa;
}
@ -240,9 +246,10 @@ int register_mmio_emulation_handler(struct vm *vm,
* should unmap it. But UOS will not, so we shouldn't
* need to unmap it.
*/
if (is_vm0(vm))
if (is_vm0(vm)) {
ept_mmap(vm, start, start, end - start,
MAP_UNMAP, 0);
}
/* Return success */
status = 0;
@ -286,8 +293,9 @@ int dm_emulate_mmio_post(struct vcpu *vcpu)
/* VHM emulation data already copy to req, mark to free slot now */
req_buf->req_queue[cur].valid = false;
if (req_buf->req_queue[cur].processed == REQ_STATE_SUCCESS)
if (req_buf->req_queue[cur].processed == REQ_STATE_SUCCESS) {
vcpu->mmio.mmio_status = MMIO_TRANS_VALID;
}
else {
vcpu->mmio.mmio_status = MMIO_TRANS_INVALID;
goto out;
@ -297,8 +305,9 @@ int dm_emulate_mmio_post(struct vcpu *vcpu)
vcpu->mmio.value = vcpu->req.reqs.mmio_request.value;
/* Emulate instruction and update vcpu register set */
ret = emulate_instruction(vcpu);
if (ret != 0)
if (ret != 0) {
goto out;
}
}
out:
@ -311,16 +320,19 @@ static int dm_emulate_mmio_pre(struct vcpu *vcpu, uint64_t exit_qual)
if (vcpu->mmio.read_write == HV_MEM_IO_WRITE) {
status = emulate_instruction(vcpu);
if (status != 0)
if (status != 0) {
return status;
}
vcpu->req.reqs.mmio_request.value = vcpu->mmio.value;
/* XXX: write access while EPT perm RX -> WP */
if ((exit_qual & 0x38UL) == 0x28UL)
if ((exit_qual & 0x38UL) == 0x28UL) {
vcpu->req.type = REQ_WP;
}
}
if (vcpu->req.type == 0U)
if (vcpu->req.type == 0U) {
vcpu->req.type = REQ_MMIO;
}
vcpu->req.reqs.mmio_request.direction = vcpu->mmio.read_write;
vcpu->req.reqs.mmio_request.address = (long)vcpu->mmio.paddr;
vcpu->req.reqs.mmio_request.size = vcpu->mmio.access_size;
@ -371,22 +383,25 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
mmio->paddr = gpa;
ret = decode_instruction(vcpu);
if (ret > 0)
if (ret > 0) {
mmio->access_size = ret;
}
else if (ret == -EFAULT) {
pr_info("page fault happen during decode_instruction");
status = 0;
goto out;
} else
}
else {
goto out;
}
list_for_each(pos, &vcpu->vm->mmio_list) {
mmio_handler = list_entry(pos, struct mem_io_node, list);
if ((mmio->paddr + mmio->access_size <=
mmio_handler->range_start) ||
(mmio->paddr >= mmio_handler->range_end))
(mmio->paddr >= mmio_handler->range_end)) {
continue;
}
else if (!((mmio->paddr >= mmio_handler->range_start) &&
(mmio->paddr + mmio->access_size <=
mmio_handler->range_end))) {
@ -396,8 +411,9 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
}
if (mmio->read_write == HV_MEM_IO_WRITE) {
if (emulate_instruction(vcpu) != 0)
if (emulate_instruction(vcpu) != 0) {
goto out;
}
}
/* Call generic memory emulation handler
@ -408,8 +424,9 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
hv_emulate_mmio(vcpu, mmio, mmio_handler);
if (mmio->read_write == HV_MEM_IO_READ) {
/* Emulate instruction and update vcpu register set */
if (emulate_instruction(vcpu) != 0)
if (emulate_instruction(vcpu) != 0) {
goto out;
}
}
status = 0;
@ -427,8 +444,9 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
*/
(void)memset(&vcpu->req, 0, sizeof(struct vhm_request));
if (dm_emulate_mmio_pre(vcpu, exit_qual) != 0)
if (dm_emulate_mmio_pre(vcpu, exit_qual) != 0) {
goto out;
}
status = acrn_insert_request_wait(vcpu, &vcpu->req);
}
@ -489,16 +507,18 @@ int ept_mmap(struct vm *vm, uint64_t hpa,
* to force snooping of PCIe devices if the page
* is cachable
*/
if ((prot & IA32E_EPT_MT_MASK) != IA32E_EPT_UNCACHED)
if ((prot & IA32E_EPT_MT_MASK) != IA32E_EPT_UNCACHED) {
prot |= IA32E_EPT_SNOOP_CTRL;
}
map_mem(&map_params, (void *)hpa,
(void *)gpa, size, prot);
} else if (type == MAP_UNMAP) {
unmap_mem(&map_params, (void *)hpa, (void *)gpa,
size, prot);
} else
} else {
ASSERT(false, "unknown map type");
}
foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);

View File

@ -41,8 +41,9 @@ inline struct vcpu *vcpu_from_vid(struct vm *vm, uint16_t vcpu_id)
struct vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
if (vcpu->vcpu_id == vcpu_id)
if (vcpu->vcpu_id == vcpu_id) {
return vcpu;
}
}
return NULL;
@ -54,8 +55,9 @@ inline struct vcpu *vcpu_from_pid(struct vm *vm, uint16_t pcpu_id)
struct vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
if (vcpu->pcpu_id == pcpu_id)
if (vcpu->pcpu_id == pcpu_id) {
return vcpu;
}
}
return NULL;
@ -67,8 +69,9 @@ inline struct vcpu *get_primary_vcpu(struct vm *vm)
struct vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
if (is_vcpu_bsp(vcpu))
if (is_vcpu_bsp(vcpu)) {
return vcpu;
}
}
return NULL;
@ -97,8 +100,9 @@ inline bool vm_lapic_disabled(struct vm *vm)
struct vcpu *vcpu;
foreach_vcpu(i, vm, vcpu) {
if (vlapic_enabled(vcpu->arch_vcpu.vlapic))
if (vlapic_enabled(vcpu->arch_vcpu.vlapic)) {
return false;
}
}
return true;
@ -112,16 +116,20 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
cpu_mode = get_vcpu_mode(vcpu);
if (cpu_mode == CPU_MODE_REAL)
if (cpu_mode == CPU_MODE_REAL) {
return PAGING_MODE_0_LEVEL;
}
else if (cpu_mode == CPU_MODE_PROTECTED) {
if ((cur_context->cr4 & CR4_PAE) != 0U)
if ((cur_context->cr4 & CR4_PAE) != 0U) {
return PAGING_MODE_3_LEVEL;
else if ((cur_context->cr0 & CR0_PG) != 0U)
}
else if ((cur_context->cr0 & CR0_PG) != 0U) {
return PAGING_MODE_2_LEVEL;
}
return PAGING_MODE_0_LEVEL;
} else /* compatibility or 64bit mode */
} else { /* compatibility or 64bit mode */
return PAGING_MODE_4_LEVEL;
}
}
/* TODO: Add code to check for Revserved bits, SMAP and PKE when do translation
@ -138,8 +146,9 @@ static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
int ret = 0;
int fault = 0;
if (pw_info->level < 1)
if (pw_info->level < 1) {
return -EINVAL;
}
addr = pw_info->top_entry;
i = pw_info->level;
@ -157,11 +166,12 @@ static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
index = (gva >> shift) & ((1UL << pw_info->width) - 1UL);
page_size = 1UL << shift;
if (pw_info->width == 10U)
if (pw_info->width == 10U) {
/* 32bit entry */
entry = *((uint32_t *)(base + 4U * index));
else
} else {
entry = *((uint64_t *)(base + 8U * index));
}
/* check if the entry present */
if ((entry & MMU_32BIT_PDE_P) == 0U) {
@ -172,21 +182,25 @@ static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
if (pw_info->is_write_access && ((entry & MMU_32BIT_PDE_RW) == 0U)) {
/* Case1: Supermode and wp is 1
* Case2: Usermode */
if (!(!pw_info->is_user_mode && !pw_info->wp))
if (!(!pw_info->is_user_mode && !pw_info->wp)) {
fault = 1;
}
}
/* check for nx, since for 32-bit paing, the XD bit is
* reserved(0), use the same logic as PAE/4-level paging */
if (pw_info->is_inst_fetch && pw_info->nxe &&
((entry & MMU_MEM_ATTR_BIT_EXECUTE_DISABLE) != 0U))
((entry & MMU_MEM_ATTR_BIT_EXECUTE_DISABLE) != 0U)) {
fault = 1;
}
/* check for U/S */
if (((entry & MMU_32BIT_PDE_US) == 0U) && pw_info->is_user_mode)
if (((entry & MMU_32BIT_PDE_US) == 0U) && pw_info->is_user_mode) {
fault = 1;
}
if (pw_info->pse && (i > 0 && ((entry & MMU_32BIT_PDE_PS) != 0U)))
if (pw_info->pse && (i > 0 && ((entry & MMU_32BIT_PDE_PS) != 0U))) {
break;
}
addr = entry;
}
@ -263,8 +277,9 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
struct page_walk_info pw_info;
int ret = 0;
if ((gpa == NULL) || (err_code == NULL))
if ((gpa == NULL) || (err_code == NULL)) {
return -EINVAL;
}
*gpa = 0UL;
pw_info.top_entry = cur_context->cr3;
@ -290,12 +305,14 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
pw_info.pse = ((cur_context->cr4 & CR4_PSE) != 0UL);
pw_info.nxe = false;
ret = _gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
} else
} else {
*gpa = gva;
}
if (ret == -EFAULT) {
if (pw_info.is_user_mode)
if (pw_info.is_user_mode) {
*err_code |= PAGE_FAULT_US_FLAG;
}
}
return ret;
@ -314,8 +331,9 @@ static inline uint32_t _copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
return 0;
}
if (fix_pg_size != 0U)
if (fix_pg_size != 0U) {
pg_size = fix_pg_size;
}
offset_in_pg = (uint32_t)gpa & (pg_size - 1U);
len = (size > (pg_size - offset_in_pg)) ?
@ -323,10 +341,11 @@ static inline uint32_t _copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
g_ptr = HPA2HVA(hpa);
if (cp_from_vm)
if (cp_from_vm) {
(void)memcpy_s(h_ptr, len, g_ptr, len);
else
} else {
(void)memcpy_s(g_ptr, len, h_ptr, len);
}
return len;
}
@ -343,8 +362,9 @@ static inline int copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
while (size > 0U) {
len = _copy_gpa(vm, h_ptr, gpa, size, 0U, cp_from_vm);
if (len == 0U)
if (len == 0U) {
return -EINVAL;
}
gpa += len;
h_ptr += len;
@ -380,8 +400,10 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
len = _copy_gpa(vcpu->vm, h_ptr, gpa, size,
PAGE_SIZE_4K, cp_from_vm);
if (len == 0U)
if (len == 0U) {
return -EINVAL;
}
gva += len;
h_ptr += len;
@ -456,8 +478,9 @@ void init_e820(void)
mmap[i].baseaddr, mmap[i].length);
}
}
} else
} else {
ASSERT(false, "no multiboot info found");
}
}
@ -474,8 +497,9 @@ void obtain_e820_mem_info(void)
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
if (e820_mem.mem_bottom > entry->baseaddr)
if (e820_mem.mem_bottom > entry->baseaddr) {
e820_mem.mem_bottom = entry->baseaddr;
}
if (entry->baseaddr + entry->length
> e820_mem.mem_top) {
@ -595,9 +619,10 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
/* update ram entries to WB attr */
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
if (entry->type == E820_TYPE_RAM)
if (entry->type == E820_TYPE_RAM) {
ept_mmap(vm, entry->baseaddr, entry->baseaddr,
entry->length, MAP_MEM, attr_wb);
}
}
dev_dbg(ACRN_DBG_GUEST, "VM0 e820 layout:\n");