hv: fix 'Function return value potentially unused'

MISRA-C requires that if the return value of a function call is
assigned to a local variable, this value shall be used in all paths.

This patch moves the variable assignment right before the moment that
the variable is used.

Tracked-On: #861
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shiqing Gao 2018-11-29 15:04:38 +08:00 committed by wenlingz
parent e0260b4496
commit db3c5746f3
6 changed files with 30 additions and 33 deletions

View File

@ -116,16 +116,14 @@ static void ptdev_build_physical_msi(struct acrn_vm *vm, struct ptdev_msi_info *
} }
static union ioapic_rte static union ioapic_rte
ptdev_build_physical_rte(struct acrn_vm *vm, ptdev_build_physical_rte(struct acrn_vm *vm, struct ptdev_remapping_info *entry)
struct ptdev_remapping_info *entry)
{ {
union ioapic_rte rte; union ioapic_rte rte;
uint32_t phys_irq = entry->allocated_pirq; uint32_t phys_irq = entry->allocated_pirq;
uint32_t vector = irq_to_vector(phys_irq);
union source_id *virt_sid = &entry->virt_sid; union source_id *virt_sid = &entry->virt_sid;
if (virt_sid->intx_id.src == PTDEV_VPIN_IOAPIC) { if (virt_sid->intx_id.src == PTDEV_VPIN_IOAPIC) {
uint64_t vdmask, pdmask, delmode, dest_mask; uint64_t vdmask, pdmask, delmode, dest_mask, vector;
uint32_t dest; uint32_t dest;
union ioapic_rte virt_rte; union ioapic_rte virt_rte;
bool phys; bool phys;
@ -164,9 +162,9 @@ ptdev_build_physical_rte(struct acrn_vm *vm,
} }
/* update physical delivery mode, dest mode(logical) & vector */ /* update physical delivery mode, dest mode(logical) & vector */
rte.full &= ~(IOAPIC_RTE_DESTMOD | vector = (uint64_t)irq_to_vector(phys_irq);
IOAPIC_RTE_DELMOD | IOAPIC_RTE_INTVEC); rte.full &= ~(IOAPIC_RTE_DESTMOD | IOAPIC_RTE_DELMOD | IOAPIC_RTE_INTVEC);
rte.full |= IOAPIC_RTE_DESTLOG | delmode | (uint64_t)vector; rte.full |= IOAPIC_RTE_DESTLOG | delmode | vector;
dest_mask = calculate_logical_dest_mask(pdmask); dest_mask = calculate_logical_dest_mask(pdmask);
/* update physical dest field */ /* update physical dest field */

View File

@ -899,7 +899,6 @@ static int get_gva_di_check(struct acrn_vcpu *vcpu, struct instr_emul_vie *vie,
enum vm_cpu_mode cpu_mode; enum vm_cpu_mode cpu_mode;
uint64_t val, gpa; uint64_t val, gpa;
val = vm_get_register(vcpu, CPU_REG_RDI);
vm_get_seg_desc(CPU_REG_ES, &desc); vm_get_seg_desc(CPU_REG_ES, &desc);
cpu_mode = get_vcpu_mode(vcpu); cpu_mode = get_vcpu_mode(vcpu);
@ -917,8 +916,8 @@ static int get_gva_di_check(struct acrn_vcpu *vcpu, struct instr_emul_vie *vie,
} }
} }
if (vie_calculate_gla(cpu_mode, CPU_REG_ES, &desc, val, addrsize, gva) val = vm_get_register(vcpu, CPU_REG_RDI);
!= 0) { if (vie_calculate_gla(cpu_mode, CPU_REG_ES, &desc, val, addrsize, gva) != 0) {
goto exception_inject; goto exception_inject;
} }

View File

@ -664,7 +664,6 @@ vlapic_lvt_write_handler(struct acrn_vlapic *vlapic, uint32_t offset)
lapic = &(vlapic->apic_page); lapic = &(vlapic->apic_page);
lvtptr = vlapic_get_lvtptr(vlapic, offset); lvtptr = vlapic_get_lvtptr(vlapic, offset);
val = *lvtptr; val = *lvtptr;
idx = lvt_off_to_idx(offset);
if ((lapic->svr.v & APIC_SVR_ENABLE) == 0U) { if ((lapic->svr.v & APIC_SVR_ENABLE) == 0U) {
val |= APIC_LVT_M; val |= APIC_LVT_M;
@ -719,6 +718,7 @@ vlapic_lvt_write_handler(struct acrn_vlapic *vlapic, uint32_t offset)
} }
*lvtptr = val; *lvtptr = val;
idx = lvt_off_to_idx(offset);
atomic_store32(&vlapic->lvt_last[idx], val); atomic_store32(&vlapic->lvt_last[idx], val);
} }

View File

@ -64,11 +64,11 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
uint64_t sworld_pml4e; uint64_t sworld_pml4e;
uint64_t gpa; uint64_t gpa;
/* Check the HPA of parameter gpa_orig when invoking check_continuos_hpa */ /* Check the HPA of parameter gpa_orig when invoking check_continuos_hpa */
uint64_t hpa = gpa2hpa(vm, gpa_orig); uint64_t hpa;
uint64_t table_present = EPT_RWX; uint64_t table_present = EPT_RWX;
uint64_t pdpte, *dest_pdpte_p, *src_pdpte_p; uint64_t pdpte, *dest_pdpte_p, *src_pdpte_p;
void *sub_table_addr, *pml4_base; void *sub_table_addr, *pml4_base;
struct acrn_vm *vm0 = get_vm_from_vmid(0U); struct acrn_vm *vm0;
uint16_t i; uint16_t i;
if ((vm->sworld_control.flag.supported == 0UL) if ((vm->sworld_control.flag.supported == 0UL)
@ -86,9 +86,10 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
return; return;
} }
hpa = gpa2hpa(vm, gpa_orig);
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */ /* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp, ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp, gpa_orig, size);
gpa_orig, size);
/* Copy PDPT entries from Normal world to Secure world /* Copy PDPT entries from Normal world to Secure world
* Secure world can access Normal World's memory, * Secure world can access Normal World's memory,
@ -105,7 +106,8 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
/* The trusty memory is remapped to guest physical address /* The trusty memory is remapped to guest physical address
* of gpa_rebased to gpa_rebased + size * of gpa_rebased to gpa_rebased + size
*/ */
sub_table_addr = vm->arch_vm.ept_mem_ops.info->ept.sworld_pgtable_base + TRUSTY_PML4_PAGE_NUM(TRUSTY_EPT_REBASE_GPA); sub_table_addr = vm->arch_vm.ept_mem_ops.info->ept.sworld_pgtable_base +
TRUSTY_PML4_PAGE_NUM(TRUSTY_EPT_REBASE_GPA);
(void)memset(sub_table_addr, 0U, CPU_PAGE_SIZE); (void)memset(sub_table_addr, 0U, CPU_PAGE_SIZE);
sworld_pml4e = hva2hpa(sub_table_addr) | table_present; sworld_pml4e = hva2hpa(sub_table_addr) | table_present;
set_pgentry((uint64_t *)pml4_base, sworld_pml4e); set_pgentry((uint64_t *)pml4_base, sworld_pml4e);
@ -128,29 +130,26 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
dest_pdpte_p++; dest_pdpte_p++;
} }
/* Map [gpa_rebased, gpa_rebased + size) to secure ept mapping /* Map [gpa_rebased, gpa_rebased + size) to secure ept mapping */
*/ ept_mr_add(vm, (uint64_t *)vm->arch_vm.sworld_eptp, hpa, gpa_rebased, size, EPT_RWX | EPT_WB);
ept_mr_add(vm, (uint64_t *)vm->arch_vm.sworld_eptp,
hpa, gpa_rebased, size, EPT_RWX | EPT_WB);
/* Get the gpa address in SOS */ /* Get the gpa address in SOS */
gpa = vm0_hpa2gpa(hpa); gpa = vm0_hpa2gpa(hpa);
/* Unmap trusty memory space from sos ept mapping*/ /* Unmap trusty memory space from sos ept mapping*/
ept_mr_del(vm0, (uint64_t *)vm0->arch_vm.nworld_eptp, vm0 = get_vm_from_vmid(0U);
gpa, size); ept_mr_del(vm0, (uint64_t *)vm0->arch_vm.nworld_eptp, gpa, size);
/* Backup secure world info, will be used when /* Backup secure world info, will be used when destroy secure world and suspend UOS */
* destroy secure world and suspend UOS */
vm->sworld_control.sworld_memory.base_gpa_in_sos = gpa; vm->sworld_control.sworld_memory.base_gpa_in_sos = gpa;
vm->sworld_control.sworld_memory.base_gpa_in_uos = gpa_orig; vm->sworld_control.sworld_memory.base_gpa_in_uos = gpa_orig;
vm->sworld_control.sworld_memory.base_hpa = hpa; vm->sworld_control.sworld_memory.base_hpa = hpa;
vm->sworld_control.sworld_memory.length = size; vm->sworld_control.sworld_memory.length = size;
} }
void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem) void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
{ {
struct acrn_vm *vm0 = get_vm_from_vmid(0U); struct acrn_vm *vm0;
uint64_t hpa = vm->sworld_control.sworld_memory.base_hpa; uint64_t hpa = vm->sworld_control.sworld_memory.base_hpa;
uint64_t gpa_sos = vm->sworld_control.sworld_memory.base_gpa_in_sos; uint64_t gpa_sos = vm->sworld_control.sworld_memory.base_gpa_in_sos;
uint64_t gpa_uos = vm->sworld_control.sworld_memory.base_gpa_in_uos; uint64_t gpa_uos = vm->sworld_control.sworld_memory.base_gpa_in_uos;
@ -172,12 +171,11 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
vm->arch_vm.sworld_eptp = NULL; vm->arch_vm.sworld_eptp = NULL;
/* restore memory to SOS ept mapping */ /* restore memory to SOS ept mapping */
ept_mr_add(vm0, vm0->arch_vm.nworld_eptp, vm0 = get_vm_from_vmid(0U);
hpa, gpa_sos, size, EPT_RWX | EPT_WB); ept_mr_add(vm0, vm0->arch_vm.nworld_eptp, hpa, gpa_sos, size, EPT_RWX | EPT_WB);
/* Restore memory to guest normal world */ /* Restore memory to guest normal world */
ept_mr_add(vm, vm->arch_vm.nworld_eptp, ept_mr_add(vm, vm->arch_vm.nworld_eptp, hpa, gpa_uos, size, EPT_RWX | EPT_WB);
hpa, gpa_uos, size, EPT_RWX | EPT_WB);
} }

View File

@ -397,7 +397,7 @@ int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
uint32_t error_code; uint32_t error_code;
struct acrn_vcpu_arch * arch = &vcpu->arch; struct acrn_vcpu_arch * arch = &vcpu->arch;
uint64_t *pending_req_bits = &arch->pending_req; uint64_t *pending_req_bits = &arch->pending_req;
struct acrn_vlapic *vlapic = vcpu_vlapic(vcpu); struct acrn_vlapic *vlapic;
if (bitmap_test_and_clear_lock(ACRN_REQUEST_TRP_FAULT, if (bitmap_test_and_clear_lock(ACRN_REQUEST_TRP_FAULT,
pending_req_bits)) { pending_req_bits)) {
@ -450,8 +450,8 @@ int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
* needed. And then try to handle vmcs event injection. * needed. And then try to handle vmcs event injection.
*/ */
if (is_apicv_intr_delivery_supported() && if (is_apicv_intr_delivery_supported() &&
bitmap_test_and_clear_lock(ACRN_REQUEST_EVENT, bitmap_test_and_clear_lock(ACRN_REQUEST_EVENT, pending_req_bits)) {
pending_req_bits)) { vlapic = vcpu_vlapic(vcpu);
vlapic_apicv_inject_pir(vlapic); vlapic_apicv_inject_pir(vlapic);
} }

View File

@ -365,7 +365,9 @@ static void dmar_enable_translation(struct dmar_drhd_rt *dmar_unit)
iommu_write32(dmar_unit, DMAR_GCMD_REG, dmar_unit->gcmd); iommu_write32(dmar_unit, DMAR_GCMD_REG, dmar_unit->gcmd);
/* 32-bit register */ /* 32-bit register */
dmar_wait_completion(dmar_unit, DMAR_GSTS_REG, DMA_GSTS_TES, false, &status); dmar_wait_completion(dmar_unit, DMAR_GSTS_REG, DMA_GSTS_TES, false, &status);
#if DBG_IOMMU
status = iommu_read32(dmar_unit, DMAR_GSTS_REG); status = iommu_read32(dmar_unit, DMAR_GSTS_REG);
#endif
} }
spinlock_release(&(dmar_unit->lock)); spinlock_release(&(dmar_unit->lock));