mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-05 18:25:05 +00:00
hv: dev: fix "Procedure has more than one exit point"
IEC 61508,ISO 26262 standards highly recommend single-exit rule. Reduce the count of the "return entries". Fix the violations which is comply with the cases list below: 1.Function has 2 return entries. 2.The first return entry is used to return the error code of checking variable whether is valid. Fix the violations in "if else" format. Tracked-On: #861 Signed-off-by: Huihuang Shi <huihuang.shi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
ba44417d96
commit
414860fb89
@ -55,12 +55,16 @@ ptirq_lookup_entry_by_vpin(struct acrn_vm *vm, uint8_t virt_pin, bool pic_pin)
|
||||
#ifdef CONFIG_COM_IRQ
|
||||
static bool ptdev_hv_owned_intx(const struct acrn_vm *vm, const union source_id *virt_sid)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
/* vm0 vuart pin is owned by hypervisor under debug version */
|
||||
if (is_vm0(vm) && (virt_sid->intx_id.pin == CONFIG_COM_IRQ)) {
|
||||
return true;
|
||||
ret = true;
|
||||
} else {
|
||||
return false;
|
||||
ret = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_COM_IRQ */
|
||||
|
||||
@ -496,10 +500,7 @@ void ptirq_intx_ack(struct acrn_vm *vm, uint8_t virt_pin,
|
||||
bool pic_pin = (vpin_src == PTDEV_VPIN_PIC);
|
||||
|
||||
entry = ptirq_lookup_entry_by_vpin(vm, virt_pin, pic_pin);
|
||||
if (entry == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (entry != NULL) {
|
||||
phys_irq = entry->allocated_pirq;
|
||||
|
||||
/* NOTE: only Level trigger will process EOI/ACK and if we got here
|
||||
@ -527,6 +528,7 @@ void ptirq_intx_ack(struct acrn_vm *vm, uint8_t virt_pin,
|
||||
dev_dbg(ACRN_DBG_PTIRQ, "dev-assign: irq=0x%x acked vr: 0x%x",
|
||||
phys_irq, irq_to_vector(phys_irq));
|
||||
gsi_unmask_irq(phys_irq);
|
||||
}
|
||||
}
|
||||
|
||||
/* Main entry for PCI device assignment with MSI and MSI-X
|
||||
|
@ -266,29 +266,35 @@ static void vcpu_inject_exception(struct acrn_vcpu *vcpu, uint32_t vector)
|
||||
vcpu_retain_rip(vcpu);
|
||||
}
|
||||
|
||||
static int vcpu_inject_hi_exception(struct acrn_vcpu *vcpu)
|
||||
static int32_t vcpu_inject_hi_exception(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t vector = vcpu->arch.exception_info.exception;
|
||||
int32_t ret;
|
||||
|
||||
if (vector == IDT_MC || vector == IDT_BP || vector == IDT_DB) {
|
||||
vcpu_inject_exception(vcpu, vector);
|
||||
return 1;
|
||||
ret = 1;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vcpu_inject_lo_exception(struct acrn_vcpu *vcpu)
|
||||
static int32_t vcpu_inject_lo_exception(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t vector = vcpu->arch.exception_info.exception;
|
||||
int32_t ret;
|
||||
|
||||
/* high priority exception already be injected */
|
||||
if (vector <= NR_MAX_VECTOR) {
|
||||
vcpu_inject_exception(vcpu, vector);
|
||||
return 1;
|
||||
ret = 1;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Inject external interrupt to guest */
|
||||
@ -357,10 +363,11 @@ int interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
int32_t external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint32_t intr_info;
|
||||
struct intr_excp_ctx ctx;
|
||||
int32_t ret;
|
||||
|
||||
intr_info = exec_vmread32(VMX_EXIT_INT_INFO);
|
||||
if (((intr_info & VMX_INT_INFO_VALID) == 0U) ||
|
||||
@ -368,9 +375,8 @@ int external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
!= VMX_INT_TYPE_EXT_INT)) {
|
||||
pr_err("Invalid VM exit interrupt info:%x", intr_info);
|
||||
vcpu_retain_rip(vcpu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
ctx.vector = intr_info & 0xFFU;
|
||||
ctx.rip = vcpu_get_rip(vcpu);
|
||||
ctx.rflags = vcpu_get_rflags(vcpu);
|
||||
@ -385,8 +391,10 @@ int external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
vcpu_retain_rip(vcpu);
|
||||
|
||||
TRACE_2L(TRACE_VMEXIT_EXTERNAL_INTERRUPT, ctx.vector, 0UL);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
||||
|
@ -144,11 +144,15 @@ static inline uint8_t* get_ctx_table(uint32_t dmar_index, uint8_t bus_no)
|
||||
|
||||
bool iommu_snoop_supported(struct acrn_vm *vm)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
if (vm->iommu == NULL || vm->iommu->iommu_snoop) {
|
||||
return true;
|
||||
ret = true;
|
||||
} else {
|
||||
ret = false;
|
||||
}
|
||||
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct dmar_drhd_rt dmar_drhd_units[CONFIG_MAX_IOMMU_NUM];
|
||||
@ -267,13 +271,11 @@ static void iommu_flush_cache(const struct dmar_drhd_rt *dmar_unit,
|
||||
uint32_t i;
|
||||
|
||||
/* if vtd support page-walk coherency, no need to flush cacheline */
|
||||
if (iommu_ecap_c(dmar_unit->ecap) != 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (iommu_ecap_c(dmar_unit->ecap) == 0U) {
|
||||
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
|
||||
clflush((char *)p + i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if DBG_IOMMU
|
||||
@ -478,16 +480,14 @@ static void dmar_write_buffer_flush(struct dmar_drhd_rt *dmar_unit)
|
||||
{
|
||||
uint32_t status;
|
||||
|
||||
if (iommu_cap_rwbf(dmar_unit->cap) == 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (iommu_cap_rwbf(dmar_unit->cap) != 0U) {
|
||||
spinlock_obtain(&(dmar_unit->lock));
|
||||
iommu_write32(dmar_unit, DMAR_GCMD_REG, dmar_unit->gcmd | DMA_GCMD_WBF);
|
||||
|
||||
/* read lower 32 bits to check */
|
||||
dmar_wait_completion(dmar_unit, DMAR_GSTS_REG, DMA_GSTS_WBFS, true, &status);
|
||||
spinlock_release(&(dmar_unit->lock));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -687,10 +687,7 @@ static void fault_status_analysis(uint32_t status)
|
||||
|
||||
static void fault_record_analysis(__unused uint64_t low, uint64_t high)
|
||||
{
|
||||
if (dma_frcd_up_f(high)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!dma_frcd_up_f(high)) {
|
||||
/* currently skip PASID related parsing */
|
||||
pr_info("%s, Reason: 0x%x, SID: %x.%x.%x @0x%llx",
|
||||
(dma_frcd_up_t(high) != 0U) ? "Read/Atomic" : "Write", dma_frcd_up_fr(high),
|
||||
@ -700,6 +697,7 @@ static void fault_record_analysis(__unused uint64_t low, uint64_t high)
|
||||
pr_info("Address Type: 0x%x", dma_frcd_up_at(high));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static void dmar_fault_handler(uint32_t irq, void *data)
|
||||
@ -1011,9 +1009,8 @@ struct iommu_domain *create_iommu_domain(uint16_t vm_id, uint64_t translation_ta
|
||||
|
||||
if (translation_table == 0UL) {
|
||||
pr_err("translation table is NULL");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
domain = NULL;
|
||||
} else {
|
||||
/*
|
||||
* A hypercall is called to create an iommu domain for a valid VM,
|
||||
* and hv code limit the VM number to CONFIG_MAX_VM_NUM.
|
||||
@ -1029,6 +1026,7 @@ struct iommu_domain *create_iommu_domain(uint16_t vm_id, uint64_t translation_ta
|
||||
|
||||
dev_dbg(ACRN_DBG_IOMMU, "create domain [%d]: vm_id = %hu, ept@0x%x",
|
||||
vmid_to_domainid(domain->vm_id), domain->vm_id, domain->trans_table_ptr);
|
||||
}
|
||||
|
||||
return domain;
|
||||
}
|
||||
@ -1105,14 +1103,15 @@ void resume_iommu(void)
|
||||
|
||||
int init_iommu(void)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
ret = register_hrhd_units();
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
} else {
|
||||
do_action_for_iommus(dmar_prepare);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user