mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-29 16:58:10 +00:00
hv: hypercall: fix complicated violations of "Procedure has more than one exit point"
Fix the complicated violations by methods list below: 1.if ... else ... fmt. 2.add status in proper position. example: bool bdf_valid;(could find this variable in this patch). V1->V2: merge branches. Tracked-On: #861 Signed-off-by: Huihuang Shi <huihuang.shi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
f680ed5d6a
commit
d6a2268282
@ -469,7 +469,7 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
|
|||||||
|
|
||||||
/* make sure we have set req_buf */
|
/* make sure we have set req_buf */
|
||||||
if ((target_vm == NULL) || (target_vm->sw.io_shared_page == NULL)) {
|
if ((target_vm == NULL) || (target_vm->sw.io_shared_page == NULL)) {
|
||||||
pr_err("%s, invalid parameter\n", __func__);
|
pr_err("%p %s, invalid parameter\n", target_vm, __func__);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
} else {
|
} else {
|
||||||
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
|
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
|
||||||
@ -584,41 +584,38 @@ int32_t hcall_set_vm_memory_regions(struct acrn_vm *vm, uint64_t param)
|
|||||||
struct vm_memory_region mr;
|
struct vm_memory_region mr;
|
||||||
struct acrn_vm *target_vm;
|
struct acrn_vm *target_vm;
|
||||||
uint32_t idx;
|
uint32_t idx;
|
||||||
|
int32_t ret = 0;
|
||||||
|
|
||||||
|
|
||||||
(void)memset((void *)®ions, 0U, sizeof(regions));
|
(void)memset((void *)®ions, 0U, sizeof(regions));
|
||||||
|
|
||||||
if (copy_from_gpa(vm, ®ions, param, sizeof(regions)) != 0) {
|
if (copy_from_gpa(vm, ®ions, param, sizeof(regions)) != 0) {
|
||||||
pr_err("%s: Unable copy param from vm\n", __func__);
|
pr_err("%s: Unable copy param from vm\n", __func__);
|
||||||
return -EFAULT;
|
ret = -EFAULT;
|
||||||
}
|
} else {
|
||||||
|
target_vm = get_vm_from_vmid(regions.vmid);
|
||||||
|
if ((target_vm == NULL) || is_vm0(target_vm)) {
|
||||||
|
pr_err("%p %s:target_vm is invalid or Targeting to service vm", target_vm, __func__);
|
||||||
|
ret = -EFAULT;
|
||||||
|
} else {
|
||||||
|
idx = 0U;
|
||||||
|
while (idx < regions.mr_num) {
|
||||||
|
if (copy_from_gpa(vm, &mr, regions.regions_gpa + idx * sizeof(mr), sizeof(mr)) != 0) {
|
||||||
|
pr_err("%s: Copy mr entry fail from vm\n", __func__);
|
||||||
|
ret = -EFAULT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
target_vm = get_vm_from_vmid(regions.vmid);
|
ret = set_vm_memory_region(vm, target_vm, &mr);
|
||||||
if (target_vm == NULL) {
|
if (ret < 0) {
|
||||||
return -EINVAL;
|
break;
|
||||||
}
|
}
|
||||||
|
idx++;
|
||||||
if (is_vm0(target_vm)) {
|
}
|
||||||
pr_err("%s: Targeting to service vm", __func__);
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
idx = 0U;
|
|
||||||
while (idx < regions.mr_num) {
|
|
||||||
int32_t ret;
|
|
||||||
|
|
||||||
if (copy_from_gpa(vm, &mr, regions.regions_gpa + idx * sizeof(mr), sizeof(mr)) != 0) {
|
|
||||||
pr_err("%s: Copy mr entry fail from vm\n", __func__);
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = set_vm_memory_region(vm, target_vm, &mr);
|
|
||||||
if (ret < 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
idx++;
|
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -708,29 +705,26 @@ int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_
|
|||||||
*/
|
*/
|
||||||
int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
||||||
{
|
{
|
||||||
int32_t ret = 0;
|
int32_t ret;
|
||||||
struct vm_gpa2hpa v_gpa2hpa;
|
struct vm_gpa2hpa v_gpa2hpa;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
(void)memset((void *)&v_gpa2hpa, 0U, sizeof(v_gpa2hpa));
|
(void)memset((void *)&v_gpa2hpa, 0U, sizeof(v_gpa2hpa));
|
||||||
|
if ((target_vm == NULL) || copy_from_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) {
|
||||||
if (copy_from_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) {
|
pr_err("target_vm is invalid or HCALL gpa2hpa: Unable copy param from vm\n");
|
||||||
pr_err("HCALL gpa2hpa: Unable copy param from vm\n");
|
ret = -1;
|
||||||
return -1;
|
} else {
|
||||||
}
|
v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa);
|
||||||
v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa);
|
if (v_gpa2hpa.hpa == INVALID_HPA) {
|
||||||
if (v_gpa2hpa.hpa == INVALID_HPA) {
|
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
|
||||||
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
|
__func__, target_vm->vm_id, v_gpa2hpa.gpa);
|
||||||
__func__, target_vm->vm_id, v_gpa2hpa.gpa);
|
ret = -EINVAL;
|
||||||
return -EINVAL;
|
} else if (copy_to_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) {
|
||||||
}
|
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||||
if (copy_to_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) != 0) {
|
ret = -1;
|
||||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
} else {
|
||||||
return -1;
|
ret = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -753,40 +747,47 @@ int32_t hcall_assign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
int32_t ret;
|
int32_t ret;
|
||||||
uint16_t bdf;
|
uint16_t bdf;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
bool bdf_valid = true;
|
||||||
|
bool iommu_valid = true;
|
||||||
|
|
||||||
if (target_vm == NULL) {
|
if (target_vm != NULL) {
|
||||||
pr_err("%s, vm is null\n", __func__);
|
if (param < 0x10000UL) {
|
||||||
return -EINVAL;
|
bdf = (uint16_t) param;
|
||||||
}
|
} else {
|
||||||
|
if (copy_from_gpa(vm, &bdf, param, sizeof(bdf)) != 0) {
|
||||||
|
pr_err("%s: Unable copy param from vm %d\n",
|
||||||
|
__func__, vm->vm_id);
|
||||||
|
bdf_valid = false;
|
||||||
|
ret = -EIO;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (param < 0x10000UL) {
|
/* create a iommu domain for target VM if not created */
|
||||||
bdf = (uint16_t) param;
|
if (bdf_valid && target_vm->iommu == NULL) {
|
||||||
|
if (target_vm->arch_vm.nworld_eptp == NULL) {
|
||||||
|
pr_err("%s, EPT of VM not set!\n",
|
||||||
|
__func__, target_vm->vm_id);
|
||||||
|
iommu_valid = false;
|
||||||
|
ret = -EPERM;
|
||||||
|
} else {
|
||||||
|
/* TODO: how to get vm's address width? */
|
||||||
|
target_vm->iommu = create_iommu_domain(vmid,
|
||||||
|
hva2hpa(target_vm->arch_vm.nworld_eptp), 48U);
|
||||||
|
if (target_vm->iommu == NULL) {
|
||||||
|
iommu_valid = false;
|
||||||
|
ret = -ENODEV;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (bdf_valid && iommu_valid) {
|
||||||
|
ret = assign_iommu_device(target_vm->iommu,
|
||||||
|
(uint8_t)(bdf >> 8U), (uint8_t)(bdf & 0xffU));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (copy_from_gpa(vm, &bdf, param, sizeof(bdf)) != 0) {
|
pr_err("%s, vm is null\n", __func__);
|
||||||
pr_err("%s: Unable copy param from vm %d\n",
|
ret = -EINVAL;
|
||||||
__func__, vm->vm_id);
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* create a iommu domain for target VM if not created */
|
|
||||||
if (target_vm->iommu == NULL) {
|
|
||||||
if (target_vm->arch_vm.nworld_eptp == NULL) {
|
|
||||||
pr_err("%s, EPT of VM not set!\n",
|
|
||||||
__func__, target_vm->vm_id);
|
|
||||||
return -EPERM;
|
|
||||||
}
|
|
||||||
/* TODO: how to get vm's address width? */
|
|
||||||
target_vm->iommu = create_iommu_domain(vmid,
|
|
||||||
hva2hpa(target_vm->arch_vm.nworld_eptp), 48U);
|
|
||||||
if (target_vm->iommu == NULL) {
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
ret = assign_iommu_device(target_vm->iommu,
|
|
||||||
(uint8_t)(bdf >> 8U), (uint8_t)(bdf & 0xffU));
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -806,22 +807,27 @@ int32_t hcall_deassign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
{
|
{
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
uint16_t bdf;
|
uint16_t bdf;
|
||||||
|
bool bdf_valid = true;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL) {
|
if (target_vm == NULL) {
|
||||||
return -1;
|
ret = -1;
|
||||||
}
|
|
||||||
|
|
||||||
if (param < 0x10000UL) {
|
|
||||||
bdf = (uint16_t) param;
|
|
||||||
} else {
|
} else {
|
||||||
if (copy_from_gpa(vm, &bdf, param, sizeof(bdf)) != 0) {
|
if (param < 0x10000UL) {
|
||||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
bdf = (uint16_t) param;
|
||||||
return -1;
|
} else {
|
||||||
|
if (copy_from_gpa(vm, &bdf, param, sizeof(bdf)) != 0) {
|
||||||
|
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||||
|
bdf_valid = false;
|
||||||
|
ret = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bdf_valid) {
|
||||||
|
ret = unassign_iommu_device(target_vm->iommu,
|
||||||
|
(uint8_t)(bdf >> 8U), (uint8_t)(bdf & 0xffU));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ret = unassign_iommu_device(target_vm->iommu,
|
|
||||||
(uint8_t)(bdf >> 8U), (uint8_t)(bdf & 0xffU));
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -843,32 +849,29 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t pa
|
|||||||
struct hc_ptdev_irq irq;
|
struct hc_ptdev_irq irq;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (target_vm == NULL) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
(void)memset((void *)&irq, 0U, sizeof(irq));
|
(void)memset((void *)&irq, 0U, sizeof(irq));
|
||||||
|
if (target_vm == NULL) {
|
||||||
if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) {
|
ret = -1;
|
||||||
|
} else if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) {
|
||||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||||
return -1;
|
ret = -1;
|
||||||
}
|
} else {
|
||||||
|
/* Inform vPCI about the interupt info changes */
|
||||||
/* Inform vPCI about the interupt info changes */
|
|
||||||
#ifndef CONFIG_PARTITION_MODE
|
#ifndef CONFIG_PARTITION_MODE
|
||||||
vpci_set_ptdev_intr_info(target_vm, irq.virt_bdf, irq.phys_bdf);
|
vpci_set_ptdev_intr_info(target_vm, irq.virt_bdf, irq.phys_bdf);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (irq.type == IRQ_INTX) {
|
if (irq.type == IRQ_INTX) {
|
||||||
ret = ptirq_add_intx_remapping(target_vm, irq.is.intx.virt_pin,
|
ret = ptirq_add_intx_remapping(target_vm, irq.is.intx.virt_pin,
|
||||||
irq.is.intx.phys_pin, irq.is.intx.pic_pin);
|
irq.is.intx.phys_pin, irq.is.intx.pic_pin);
|
||||||
} else if ((irq.type == IRQ_MSI) || (irq.type == IRQ_MSIX)) {
|
} else if ((irq.type == IRQ_MSI) || (irq.type == IRQ_MSIX)) {
|
||||||
ret = ptirq_add_msix_remapping(target_vm,
|
ret = ptirq_add_msix_remapping(target_vm,
|
||||||
irq.virt_bdf, irq.phys_bdf,
|
irq.virt_bdf, irq.phys_bdf,
|
||||||
irq.is.msix.vector_cnt);
|
irq.is.msix.vector_cnt);
|
||||||
} else {
|
} else {
|
||||||
pr_err("%s: Invalid irq type: %u\n", __func__, irq.type);
|
pr_err("%s: Invalid irq type: %u\n", __func__, irq.type);
|
||||||
ret = -1;
|
ret = -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
Loading…
Reference in New Issue
Block a user