diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index 94ddbffa5..923cf834f 100644 --- a/hypervisor/arch/x86/guest/vm.c +++ b/hypervisor/arch/x86/guest/vm.c @@ -93,10 +93,7 @@ bool is_postlaunched_vm(const struct acrn_vm *vm) return (get_vm_config(vm->vm_id)->load_order == POST_LAUNCHED_VM); } -bool is_valid_postlaunched_vmid(uint16_t vm_id) -{ - return ((vm_id < CONFIG_MAX_VM_NUM) && is_postlaunched_vm(get_vm_from_vmid(vm_id))); -} + /** * @pre vm != NULL * @pre vm->vmid < CONFIG_MAX_VM_NUM diff --git a/hypervisor/arch/x86/guest/vmcall.c b/hypervisor/arch/x86/guest/vmcall.c index 97ea61821..38465c079 100644 --- a/hypervisor/arch/x86/guest/vmcall.c +++ b/hypervisor/arch/x86/guest/vmcall.c @@ -14,195 +14,152 @@ #include #include -static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) +struct hc_dispatch { + /* handler(struct acrn_vm *sos_vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2) */ + int32_t (*handler)(struct acrn_vm *, struct acrn_vm *, uint64_t, uint64_t); +}; + +/* VM Dispatch table for Exit condition handling */ +static const struct hc_dispatch hc_dispatch_table[] = { + [HC_IDX(HC_GET_API_VERSION)] = { + .handler = hcall_get_api_version}, + [HC_IDX(HC_SOS_OFFLINE_CPU)] = { + .handler = hcall_sos_offline_cpu}, + [HC_IDX(HC_SET_CALLBACK_VECTOR)] = { + .handler = hcall_set_callback_vector}, + [HC_IDX(HC_GET_PLATFORM_INFO)] = { + .handler = hcall_get_platform_info}, + [HC_IDX(HC_CREATE_VM)] = { + .handler = hcall_create_vm}, + [HC_IDX(HC_DESTROY_VM)] = { + .handler = hcall_destroy_vm}, + [HC_IDX(HC_START_VM)] = { + .handler = hcall_start_vm}, + [HC_IDX(HC_RESET_VM)] = { + .handler = hcall_reset_vm}, + [HC_IDX(HC_PAUSE_VM)] = { + .handler = hcall_pause_vm}, + [HC_IDX(HC_SET_VCPU_REGS)] = { + .handler = hcall_set_vcpu_regs}, + [HC_IDX(HC_CREATE_VCPU)] = { + .handler = hcall_create_vcpu}, + [HC_IDX(HC_SET_IRQLINE)] = { + .handler = hcall_set_irqline}, + [HC_IDX(HC_INJECT_MSI)] = { + .handler = hcall_inject_msi}, + [HC_IDX(HC_SET_IOREQ_BUFFER)] = { + .handler = hcall_set_ioreq_buffer}, + [HC_IDX(HC_NOTIFY_REQUEST_FINISH)] = { + .handler = hcall_notify_ioreq_finish}, + [HC_IDX(HC_VM_SET_MEMORY_REGIONS)] = { + .handler = hcall_set_vm_memory_regions}, + [HC_IDX(HC_VM_WRITE_PROTECT_PAGE)] = { + .handler = hcall_write_protect_page}, + [HC_IDX(HC_VM_GPA2HPA)] = { + .handler = hcall_gpa_to_hpa}, + [HC_IDX(HC_ASSIGN_PCIDEV)] = { + .handler = hcall_assign_pcidev}, + [HC_IDX(HC_DEASSIGN_PCIDEV)] = { + .handler = hcall_deassign_pcidev}, + [HC_IDX(HC_ASSIGN_MMIODEV)] = { + .handler = hcall_assign_mmiodev}, + [HC_IDX(HC_DEASSIGN_MMIODEV)] = { + .handler = hcall_deassign_mmiodev}, + [HC_IDX(HC_SET_PTDEV_INTR_INFO)] = { + .handler = hcall_set_ptdev_intr_info}, + [HC_IDX(HC_RESET_PTDEV_INTR_INFO)] = { + .handler = hcall_reset_ptdev_intr_info}, + [HC_IDX(HC_PM_GET_CPU_STATE)] = { + .handler = hcall_get_cpu_pm_state}, + [HC_IDX(HC_VM_INTR_MONITOR)] = { + .handler = hcall_vm_intr_monitor}, + [HC_IDX(HC_SETUP_SBUF)] = { + .handler = hcall_setup_sbuf}, + [HC_IDX(HC_SETUP_HV_NPK_LOG)] = { + .handler = hcall_setup_hv_npk_log}, + [HC_IDX(HC_PROFILING_OPS)] = { + .handler = hcall_profiling_ops}, + [HC_IDX(HC_GET_HW_INFO)] = { + .handler = hcall_get_hw_info} +}; + +struct acrn_vm *parse_target_vm(struct acrn_vm *sos_vm, uint64_t hcall_id, uint64_t param1, __unused uint64_t param2) { - struct acrn_vm *sos_vm = vcpu->vm; - /* hypercall ID from guest*/ - uint64_t hypcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8); - /* hypercall param1 from guest*/ - uint64_t param1 = vcpu_get_gpreg(vcpu, CPU_REG_RDI); - /* hypercall param2 from guest*/ - uint64_t param2 = vcpu_get_gpreg(vcpu, CPU_REG_RSI); - /* hypercall param1 is a relative vm id from SOS view */ - uint16_t relative_vm_id = (uint16_t)param1; - uint16_t vm_id = rel_vmid_2_vmid(sos_vm->vm_id, relative_vm_id); - int32_t ret = -1; - - switch (hypcall_id) { - case HC_SOS_OFFLINE_CPU: - ret = hcall_sos_offline_cpu(sos_vm, param1); - break; - case HC_GET_API_VERSION: - ret = hcall_get_api_version(sos_vm, param1); - break; - - case HC_GET_PLATFORM_INFO: - ret = hcall_get_platform_info(sos_vm, param1); - break; - - case HC_SET_CALLBACK_VECTOR: - ret = hcall_set_callback_vector(sos_vm, param1); - - break; + struct acrn_vm *target_vm = NULL; + uint16_t vm_id = ACRN_INVALID_VMID; + struct acrn_create_vm cv; + struct set_regions regions; + uint16_t relative_vm_id; + switch (hcall_id) { case HC_CREATE_VM: - ret = hcall_create_vm(sos_vm, param1); - break; - - case HC_DESTROY_VM: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_destroy_vm(vm_id); - } - break; - - case HC_START_VM: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_start_vm(vm_id); - } - break; - - case HC_RESET_VM: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_reset_vm(vm_id); - } - break; - - case HC_PAUSE_VM: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_pause_vm(vm_id); - } - break; - - case HC_CREATE_VCPU: - ret = 0; - break; - - case HC_SET_VCPU_REGS: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_set_vcpu_regs(sos_vm, vm_id, param2); - } - break; - - case HC_SET_IRQLINE: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_set_irqline(sos_vm, vm_id, - (struct acrn_irqline_ops *)¶m2); - } - break; - - case HC_INJECT_MSI: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_inject_msi(sos_vm, vm_id, param2); - } - break; - - case HC_SET_IOREQ_BUFFER: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_set_ioreq_buffer(sos_vm, vm_id, param2); - } - break; - - case HC_NOTIFY_REQUEST_FINISH: - /* param1: relative vmid to sos, vm_id: absolute vmid - * param2: vcpu_id */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_notify_ioreq_finish(vm_id, - (uint16_t)param2); - } - break; - - case HC_VM_SET_MEMORY_REGIONS: - ret = hcall_set_vm_memory_regions(sos_vm, param1); - break; - - case HC_VM_WRITE_PROTECT_PAGE: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_write_protect_page(sos_vm, vm_id, param2); - } - break; - - /* - * Don't do MSI remapping and make the pmsi_data equal to vmsi_data - * This is a temporary solution before this hypercall is removed from SOS - */ - case HC_VM_PCI_MSIX_REMAP: - ret = 0; - break; - - case HC_VM_GPA2HPA: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if ((vm_id < CONFIG_MAX_VM_NUM) && !is_prelaunched_vm(get_vm_from_vmid(vm_id))) { - ret = hcall_gpa_to_hpa(sos_vm, vm_id, param2); - } - break; - - case HC_ASSIGN_PCIDEV: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_assign_pcidev(sos_vm, vm_id, param2); - } - break; - - case HC_DEASSIGN_PCIDEV: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_deassign_pcidev(sos_vm, vm_id, param2); - } - break; - - case HC_ASSIGN_MMIODEV: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_assign_mmiodev(sos_vm, vm_id, param2); - } - break; - - case HC_DEASSIGN_MMIODEV: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_deassign_mmiodev(sos_vm, vm_id, param2); - } - break; - - case HC_SET_PTDEV_INTR_INFO: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_set_ptdev_intr_info(sos_vm, vm_id, param2); - } - break; - - case HC_RESET_PTDEV_INTR_INFO: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_reset_ptdev_intr_info(sos_vm, vm_id, param2); + if (copy_from_gpa(sos_vm, &cv, param1, sizeof(cv)) == 0) { + vm_id = get_vmid_by_uuid(&cv.uuid[0]); } break; case HC_PM_GET_CPU_STATE: - ret = hcall_get_cpu_pm_state(sos_vm, param1, param2); + vm_id = rel_vmid_2_vmid(sos_vm->vm_id, (uint16_t)((param1 & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT)); break; - case HC_VM_INTR_MONITOR: - /* param1: relative vmid to sos, vm_id: absolute vmid */ - if (is_valid_postlaunched_vmid(vm_id)) { - ret = hcall_vm_intr_monitor(sos_vm, vm_id, param2); + case HC_VM_SET_MEMORY_REGIONS: + if (copy_from_gpa(sos_vm, ®ions, param1, sizeof(regions)) == 0) { + /* the vmid in regions is a relative vm id, need to convert to absolute vm id */ + vm_id = rel_vmid_2_vmid(sos_vm->vm_id, regions.vmid); } break; - + case HC_GET_API_VERSION: + case HC_SOS_OFFLINE_CPU: + case HC_SET_CALLBACK_VECTOR: + case HC_GET_PLATFORM_INFO: + case HC_SETUP_SBUF: + case HC_SETUP_HV_NPK_LOG: + case HC_PROFILING_OPS: + case HC_GET_HW_INFO: + target_vm = sos_vm; + break; default: - ret = hcall_debug(sos_vm, param1, param2, hypcall_id); + relative_vm_id = (uint16_t)param1; + vm_id = rel_vmid_2_vmid(sos_vm->vm_id, relative_vm_id); break; } + if ((target_vm == NULL) && (vm_id < CONFIG_MAX_VM_NUM)) { + target_vm = get_vm_from_vmid(vm_id); + if (hcall_id == HC_CREATE_VM) { + target_vm->vm_id = vm_id; + } + } + + return target_vm; +} + +static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu) +{ + int32_t ret = -EINVAL; + struct hc_dispatch *dispatch = NULL; + struct acrn_vm *sos_vm = vcpu->vm; + /* hypercall ID from guest*/ + uint64_t hcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8); + /* hypercall param1 from guest*/ + uint64_t param1 = vcpu_get_gpreg(vcpu, CPU_REG_RDI); + /* hypercall param2 from guest*/ + uint64_t param2 = vcpu_get_gpreg(vcpu, CPU_REG_RSI); + struct acrn_vm *target_vm = parse_target_vm(sos_vm, hcall_id, param1, param2); + + if (((target_vm == sos_vm) + || (((target_vm != NULL) && (target_vm != sos_vm) && is_postlaunched_vm(target_vm)))) + && (HC_IDX(hcall_id) < ARRAY_SIZE(hc_dispatch_table))) { + + /* Calculate dispatch table entry */ + dispatch = (struct hc_dispatch *)(hc_dispatch_table + HC_IDX(hcall_id)); + if (dispatch->handler != NULL) { + get_vm_lock(target_vm); + ret = dispatch->handler(sos_vm, target_vm, param1, param2); + put_vm_lock(target_vm); + } + + } return ret; } @@ -219,7 +176,6 @@ int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu) uint64_t hypcall_id = vcpu_get_gpreg(vcpu, CPU_REG_R8); if (!is_hypercall_from_ring0()) { - pr_err("hypercall 0x%lx is only allowed from RING-0!\n", hypcall_id); vcpu_inject_gp(vcpu, 0U); ret = -EACCES; } else if (hypcall_id == HC_WORLD_SWITCH) { @@ -235,7 +191,6 @@ int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu) /* Dispatch the hypercall handler */ ret = dispatch_sos_hypercall(vcpu); } else { - pr_err("hypercall 0x%lx is only allowed from SOS_VM!\n", hypcall_id); vcpu_inject_ud(vcpu); ret = -ENODEV; } @@ -243,6 +198,9 @@ int32_t vmcall_vmexit_handler(struct acrn_vcpu *vcpu) if ((ret != -EACCES) && (ret != -ENODEV)) { vcpu_set_gpreg(vcpu, CPU_REG_RAX, (uint64_t)ret); } + if (ret < 0) { + pr_err("ret=%d hypercall=0x%lx failed in %s\n", ret, hypcall_id, __func__); + } TRACE_2L(TRACE_VMEXIT_VMCALL, vm->vm_id, hypcall_id); return 0; diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 284316da0..b2e295858 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -45,18 +45,19 @@ bool is_hypercall_from_ring0(void) * The function offline specific vcpu from SOS. * * @param vm Pointer to VM data structure - * @param lapicid lapic id of the vcpu which wants to offline + * @param param1 lapic id of the vcpu which wants to offline * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid) +int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + uint64_t param1, __unused uint64_t param2) { struct acrn_vcpu *vcpu; uint16_t i; int32_t ret = 0; + uint64_t lapicid = param1; - get_vm_lock(vm); pr_info("sos offline cpu with lapicid %ld", lapicid); foreach_vcpu(i, vm, vcpu) { @@ -70,7 +71,6 @@ int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid) offline_vcpu(vcpu); } } - put_vm_lock(vm); return ret; } @@ -81,20 +81,21 @@ int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid) * The function only return api version information when VM is SOS_VM. * * @param vm Pointer to VM data structure - * @param param guest physical memory address. The api version returned + * @param param1 guest physical memory address. The api version returned * will be copied to this gpa * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_get_api_version(struct acrn_vm *vm, uint64_t param) +int32_t hcall_get_api_version(struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + uint64_t param1, __unused uint64_t param2) { struct hc_api_version version; version.major_version = HV_API_MAJOR_VERSION; version.minor_version = HV_API_MINOR_VERSION; - return copy_to_gpa(vm, &version, param, sizeof(version)); + return copy_to_gpa(vm, &version, param1, sizeof(version)); } /** @@ -104,19 +105,20 @@ int32_t hcall_get_api_version(struct acrn_vm *vm, uint64_t param) * for the current platform. * * @param vm Pointer to VM data structure. - * @param param GPA pointer to struct hc_platform_info. + * @param param1 GPA pointer to struct hc_platform_info. * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non zero in case of error. */ -int32_t hcall_get_platform_info(struct acrn_vm *vm, uint64_t param) +int32_t hcall_get_platform_info(struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + uint64_t param1, __unused uint64_t param2) { struct hc_platform_info pi = { 0 }; uint32_t entry_size = sizeof(struct acrn_vm_config); int32_t ret; /* to get the vm_config_info pointer */ - ret = copy_from_gpa(vm, &pi, param, sizeof(pi)); + ret = copy_from_gpa(vm, &pi, param1, sizeof(pi)); if (ret == 0) { pi.cpu_num = get_pcpu_nums(); pi.version = 0x100; /* version 1.0; byte[1:0] = major:minor version */ @@ -131,7 +133,7 @@ int32_t hcall_get_platform_info(struct acrn_vm *vm, uint64_t param) } if (ret == 0) { - ret = copy_to_gpa(vm, &pi, param, sizeof(pi)); + ret = copy_to_gpa(vm, &pi, param1, sizeof(pi)); } } @@ -146,67 +148,64 @@ int32_t hcall_get_platform_info(struct acrn_vm *vm, uint64_t param) * support later. * * @param vm Pointer to VM data structure - * @param param guest physical memory address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param1 guest physical memory address. This gpa points to * struct acrn_create_vm * * @pre Pointer vm shall point to SOS_VM, vm_config != NULL * @return 0 on success, non-zero on error. */ -int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param) +int32_t hcall_create_vm(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, __unused uint64_t param2) { - uint16_t vm_id; + uint16_t vmid = target_vm->vm_id; int32_t ret = -1; - struct acrn_vm *target_vm = NULL; + struct acrn_vm *tgt_vm = NULL; struct acrn_create_vm cv; struct acrn_vm_config* vm_config = NULL; - if (copy_from_gpa(vm, &cv, param, sizeof(cv)) == 0) { - vm_id = get_vmid_by_uuid(&cv.uuid[0]); - if ((vm_id > vm->vm_id) && (vm_id < CONFIG_MAX_VM_NUM)) { - get_vm_lock(get_vm_from_vmid(vm_id)); - if (is_poweroff_vm(get_vm_from_vmid(vm_id))) { + if (copy_from_gpa(vm, &cv, param1, sizeof(cv)) == 0) { + if (is_poweroff_vm(get_vm_from_vmid(vmid))) { - vm_config = get_vm_config(vm_id); + vm_config = get_vm_config(vmid); - /* Filter out the bits should not set by DM and then assign it to guest_flags */ - vm_config->guest_flags |= (cv.vm_flag & DM_OWNED_GUEST_FLAG_MASK); + /* Filter out the bits should not set by DM and then assign it to guest_flags */ + vm_config->guest_flags |= (cv.vm_flag & DM_OWNED_GUEST_FLAG_MASK); - /* post-launched VM is allowed to choose pCPUs from vm_config->cpu_affinity only */ - if ((cv.cpu_affinity & ~(vm_config->cpu_affinity)) == 0UL) { - /* By default launch VM with all the configured pCPUs */ - uint64_t pcpu_bitmap = vm_config->cpu_affinity; + /* post-launched VM is allowed to choose pCPUs from vm_config->cpu_affinity only */ + if ((cv.cpu_affinity & ~(vm_config->cpu_affinity)) == 0UL) { + /* By default launch VM with all the configured pCPUs */ + uint64_t pcpu_bitmap = vm_config->cpu_affinity; - if (cv.cpu_affinity != 0UL) { - /* overwrite the statically configured CPU affinity */ - pcpu_bitmap = cv.cpu_affinity; - } - - /* - * GUEST_FLAG_RT must be set if we have GUEST_FLAG_LAPIC_PASSTHROUGH - * set in guest_flags - */ - if (((vm_config->guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH) != 0UL) - && ((vm_config->guest_flags & GUEST_FLAG_RT) == 0UL)) { - pr_err("Wrong guest flags 0x%lx\n", vm_config->guest_flags); - } else { - if (create_vm(vm_id, pcpu_bitmap, vm_config, &target_vm) == 0) { - /* return a relative vm_id from SOS view */ - cv.vmid = vmid_2_rel_vmid(vm->vm_id, vm_id); - cv.vcpu_num = target_vm->hw.created_vcpus; - } else { - dev_dbg(DBG_LEVEL_HYCALL, "HCALL: Create VM failed"); - cv.vmid = ACRN_INVALID_VMID; - } - - ret = copy_to_gpa(vm, &cv, param, sizeof(cv)); - } - } else { - pr_err("Post-launched VM%u chooses invalid pCPUs(0x%llx).", - vm_id, cv.cpu_affinity); + if (cv.cpu_affinity != 0UL) { + /* overwrite the statically configured CPU affinity */ + pcpu_bitmap = cv.cpu_affinity; } + + /* + * GUEST_FLAG_RT must be set if we have GUEST_FLAG_LAPIC_PASSTHROUGH + * set in guest_flags + */ + if (((vm_config->guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH) != 0UL) + && ((vm_config->guest_flags & GUEST_FLAG_RT) == 0UL)) { + pr_err("Wrong guest flags 0x%lx\n", vm_config->guest_flags); + } else { + if (create_vm(vmid, pcpu_bitmap, vm_config, &tgt_vm) == 0) { + /* return a relative vm_id from SOS view */ + cv.vmid = vmid_2_rel_vmid(vm->vm_id, vmid); + cv.vcpu_num = tgt_vm->hw.created_vcpus; + } else { + dev_dbg(DBG_LEVEL_HYCALL, "HCALL: Create VM failed"); + cv.vmid = ACRN_INVALID_VMID; + } + + ret = copy_to_gpa(vm, &cv, param1, sizeof(cv)); + } + } else { + pr_err("Post-launched VM%u chooses invalid pCPUs(0x%llx).", + vmid, cv.cpu_affinity); } - put_vm_lock(get_vm_from_vmid(vm_id)); } + } return ret; @@ -218,21 +217,19 @@ int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param) * Destroy a virtual machine, it will pause target VM then shutdown it. * The function will return -1 if the target VM does not exist. * - * @param vmid ID of the VM + * @param target_vm Pointer to target VM data structure * * @return 0 on success, non-zero on error. */ -int32_t hcall_destroy_vm(uint16_t vmid) +int32_t hcall_destroy_vm(__unused struct acrn_vm *vm, struct acrn_vm *target_vm, + __unused uint64_t param1, __unused uint64_t param2) { int32_t ret = -1; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); - get_vm_lock(target_vm); if (is_paused_vm(target_vm)) { /* TODO: check target_vm guest_flags */ ret = shutdown_vm(target_vm); } - put_vm_lock(target_vm); return ret; } @@ -243,22 +240,20 @@ int32_t hcall_destroy_vm(uint16_t vmid) * The function will return -1 if the target VM does not exist or the * IOReq buffer page for the VM is not ready. * - * @param vmid ID of the VM + * @param target_vm Pointer to target VM data structure * * @return 0 on success, non-zero on error. */ -int32_t hcall_start_vm(uint16_t vmid) +int32_t hcall_start_vm(__unused struct acrn_vm *vm, struct acrn_vm *target_vm, + __unused uint64_t param1, __unused uint64_t param2) { int32_t ret = -1; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); - get_vm_lock(target_vm); if ((is_created_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) { /* TODO: check target_vm guest_flags */ start_vm(target_vm); ret = 0; } - put_vm_lock(target_vm); return ret; } @@ -270,22 +265,20 @@ int32_t hcall_start_vm(uint16_t vmid) * will return 0 directly for success. * The function will return -1 if the target VM does not exist. * - * @param vmid ID of the VM + * @param target_vm Pointer to target VM data structure * * @return 0 on success, non-zero on error. */ -int32_t hcall_pause_vm(uint16_t vmid) +int32_t hcall_pause_vm(__unused struct acrn_vm *vm, struct acrn_vm *target_vm, + __unused uint64_t param1, __unused uint64_t param2) { - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); int32_t ret = -1; - get_vm_lock(target_vm); if (!is_poweroff_vm(target_vm)) { /* TODO: check target_vm guest_flags */ pause_vm(target_vm); ret = 0; } - put_vm_lock(target_vm); return ret; } @@ -297,21 +290,19 @@ int32_t hcall_pause_vm(uint16_t vmid) * each vcpu state and do some initialization for guest. * The function will return -1 if the target VM does not exist. * - * @param vmid ID of the VM + * @param target_vm Pointer to target VM data structure * * @return 0 on success, non-zero on error. */ -int32_t hcall_reset_vm(uint16_t vmid) +int32_t hcall_reset_vm(__unused struct acrn_vm *vm, struct acrn_vm *target_vm, + __unused uint64_t param1, __unused uint64_t param2) { - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); int32_t ret = -1; - get_vm_lock(target_vm); if (is_paused_vm(target_vm)) { /* TODO: check target_vm guest_flags */ ret = reset_vm(target_vm); } - put_vm_lock(target_vm); return ret; } @@ -323,24 +314,22 @@ int32_t hcall_reset_vm(uint16_t vmid) * The function will return -1 if the targat VM or BSP doesn't exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to * struct acrn_vcpu_regs * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); struct acrn_set_vcpu_regs vcpu_regs; struct acrn_vcpu *vcpu; int32_t ret = -1; - get_vm_lock(target_vm); /* Only allow setup init ctx while target_vm is inactive */ - if ((!is_poweroff_vm(target_vm)) && (param != 0U) && (target_vm->state != VM_RUNNING)) { - if (copy_from_gpa(vm, &vcpu_regs, param, sizeof(vcpu_regs)) != 0) { + if ((!is_poweroff_vm(target_vm)) && (param2 != 0U) && (target_vm->state != VM_RUNNING)) { + if (copy_from_gpa(vm, &vcpu_regs, param2, sizeof(vcpu_regs)) != 0) { } else if (vcpu_regs.vcpu_id >= MAX_VCPUS_PER_VM) { pr_err("%s: invalid vcpu_id for set_vcpu_regs\n", __func__); } else { @@ -351,11 +340,16 @@ int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param) } } } - put_vm_lock(target_vm); return ret; } +int32_t hcall_create_vcpu(__unused struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + __unused uint64_t param1, __unused uint64_t param2) +{ + return 0; +} + /** * @brief set or clear IRQ line * @@ -364,18 +358,17 @@ int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param) * The function will return -1 if the target VM does not exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param ops request command for IRQ set or clear + * @param target_vm Pointer to target VM data structure + * @param param2 info for irqline * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_irqline(const struct acrn_vm *vm, uint16_t vmid, - const struct acrn_irqline_ops *ops) +int32_t hcall_set_irqline(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { uint32_t irq_pic; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); int32_t ret = -1; + struct acrn_irqline_ops *ops = (struct acrn_irqline_ops *)¶m2; if (!is_poweroff_vm(target_vm)) { if (ops->gsi < get_vm_gsicount(vm)) { @@ -455,21 +448,20 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry * The function will return -1 if the target VM does not exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to struct acrn_msi_entry + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to struct acrn_msi_entry * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_inject_msi(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { int32_t ret = -1; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); if (!is_poweroff_vm(target_vm)) { struct acrn_msi_entry msi; - if (copy_from_gpa(vm, &msi, param, sizeof(msi)) == 0) { + if (copy_from_gpa(vm, &msi, param2, sizeof(msi)) == 0) { /* For target cpu with lapic pt, send ipi instead of injection via vlapic */ if (is_lapic_pt_configured(target_vm)) { enum vm_vlapic_mode vlapic_mode = check_vm_vlapic_mode(target_vm); @@ -509,27 +501,25 @@ int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param) * The function will return -1 if the target VM does not exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to * struct acrn_set_ioreq_buffer * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { uint64_t hpa; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); uint16_t i; int32_t ret = -1; - get_vm_lock(target_vm); if (is_created_vm(target_vm)) { struct acrn_set_ioreq_buffer iobuf; - if (copy_from_gpa(vm, &iobuf, param, sizeof(iobuf)) == 0) { + if (copy_from_gpa(vm, &iobuf, param2, sizeof(iobuf)) == 0) { dev_dbg(DBG_LEVEL_HYCALL, "[%d] SET BUFFER=0x%p", - vmid, iobuf.req_buf); + target_vm->vm_id, iobuf.req_buf); hpa = gpa2hpa(vm, iobuf.req_buf); if (hpa == INVALID_HPA) { @@ -545,7 +535,6 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param } } } - put_vm_lock(target_vm); return ret; } @@ -556,21 +545,22 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param * Notify the requestor VCPU for the completion of an ioreq. * The function will return -1 if the target VM does not exist. * - * @param vmid ID of the VM - * @param vcpu_id vcpu ID of the requestor + * @param target_vm Pointer to target VM data structure + * @param param2 vcpu ID of the requestor * * @return 0 on success, non-zero on error. */ -int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id) +int32_t hcall_notify_ioreq_finish(__unused struct acrn_vm *vm, struct acrn_vm *target_vm, + __unused uint64_t param1, uint64_t param2) { struct acrn_vcpu *vcpu; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); int32_t ret = -1; + uint16_t vcpu_id = (uint16_t)param2; /* make sure we have set req_buf */ if ((!is_poweroff_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) { dev_dbg(DBG_LEVEL_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d", - vmid, vcpu_id); + target_vm->vm_id, vcpu_id); if (vcpu_id >= target_vm->hw.created_vcpus) { pr_err("%s, failed to get VCPU %d context from VM %d\n", @@ -685,28 +675,24 @@ static int32_t set_vm_memory_region(struct acrn_vm *vm, * @brief setup ept memory mapping for multi regions * * @param vm Pointer to VM data structure - * @param param guest physical address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param1 guest physical address. This gpa points to * struct set_memmaps * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_vm_memory_regions(struct acrn_vm *vm, uint64_t param) +int32_t hcall_set_vm_memory_regions(struct acrn_vm *vm, struct acrn_vm *target_vm, + uint64_t param1, __unused uint64_t param2) { struct set_regions regions; struct vm_memory_region mr; - struct acrn_vm *target_vm = NULL; uint32_t idx; int32_t ret = -1; - if (copy_from_gpa(vm, ®ions, param, sizeof(regions)) == 0) { - /* the vmid in regions is a relative vm id, need to convert to absolute vm id */ - uint16_t target_vmid = rel_vmid_2_vmid(vm->vm_id, regions.vmid); + if (copy_from_gpa(vm, ®ions, param1, sizeof(regions)) == 0) { - if (target_vmid < CONFIG_MAX_VM_NUM) { - target_vm = get_vm_from_vmid(target_vmid); - } - if ((target_vm != NULL) && !is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) { + if (!is_poweroff_vm(target_vm)) { idx = 0U; while (idx < regions.mr_num) { if (copy_from_gpa(vm, &mr, regions.regions_gpa + idx * sizeof(mr), sizeof(mr)) != 0) { @@ -774,17 +760,18 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp) * @brief change guest memory page write permission * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param wp_gpa guest physical address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to * struct wp_data * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_gpa) +int32_t hcall_write_protect_page(struct acrn_vm *vm, struct acrn_vm *target_vm, + __unused uint64_t param1, uint64_t param2) { - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); int32_t ret = -1; + uint64_t wp_gpa = param2; if (!is_poweroff_vm(target_vm)) { struct wp_data wp; @@ -806,27 +793,26 @@ int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_ * The function will return -1 if the target VM does not exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to struct vm_gpa2hpa + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to struct vm_gpa2hpa * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { int32_t ret = -1; struct vm_gpa2hpa v_gpa2hpa; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); (void)memset((void *)&v_gpa2hpa, 0U, sizeof(v_gpa2hpa)); if (!is_poweroff_vm(target_vm) && - (copy_from_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)) == 0)) { + (copy_from_gpa(vm, &v_gpa2hpa, param2, sizeof(v_gpa2hpa)) == 0)) { v_gpa2hpa.hpa = gpa2hpa(target_vm, v_gpa2hpa.gpa); if (v_gpa2hpa.hpa == INVALID_HPA) { pr_err("%s,vm[%hu] gpa 0x%lx,GPA is unmapping.", __func__, target_vm->vm_id, v_gpa2hpa.gpa); } else { - ret = copy_to_gpa(vm, &v_gpa2hpa, param, sizeof(v_gpa2hpa)); + ret = copy_to_gpa(vm, &v_gpa2hpa, param2, sizeof(v_gpa2hpa)); } } else { pr_err("target_vm is invalid or HCALL gpa2hpa: Unable copy param from vm\n"); @@ -839,22 +825,21 @@ int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param) * @brief Assign one PCI dev to a VM. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to data structure of * acrn_assign_pcidev including assign PCI device info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_assign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_assign_pcidev(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { int32_t ret = -EINVAL; struct acrn_assign_pcidev pcidev; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); /* We should only assign a device to a post-launched VM at creating time for safety, not runtime or other cases*/ if (is_created_vm(target_vm)) { - if (copy_from_gpa(vm, &pcidev, param, sizeof(pcidev)) == 0) { + if (copy_from_gpa(vm, &pcidev, param2, sizeof(pcidev)) == 0) { ret = vpci_assign_pcidev(target_vm, &pcidev); } } else { @@ -868,22 +853,21 @@ int32_t hcall_assign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) * @brief Deassign one PCI dev from a VM. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to data structure of * acrn_assign_pcidev including deassign PCI device info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_deassign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_deassign_pcidev(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { int32_t ret = -EINVAL; struct acrn_assign_pcidev pcidev; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); /* We should only de-assign a device from a post-launched VM at creating/shutdown/reset time */ if ((is_paused_vm(target_vm) || is_created_vm(target_vm))) { - if (copy_from_gpa(vm, &pcidev, param, sizeof(pcidev)) == 0) { + if (copy_from_gpa(vm, &pcidev, param2, sizeof(pcidev)) == 0) { ret = vpci_deassign_pcidev(target_vm, &pcidev); } } else { @@ -897,22 +881,21 @@ int32_t hcall_deassign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) * @brief Assign one MMIO dev to a VM. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to data structure of * acrn_mmiodev including assign MMIO device info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_assign_mmiodev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_assign_mmiodev(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { int32_t ret = -EINVAL; struct acrn_mmiodev mmiodev; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); /* We should only assign a device to a post-launched VM at creating time for safety, not runtime or other cases*/ if (is_created_vm(target_vm)) { - if (copy_from_gpa(vm, &mmiodev, param, sizeof(mmiodev)) == 0) { + if (copy_from_gpa(vm, &mmiodev, param2, sizeof(mmiodev)) == 0) { ret = deassign_mmio_dev(vm, &mmiodev); if (ret == 0) { ret = assign_mmio_dev(target_vm, &mmiodev); @@ -929,22 +912,21 @@ int32_t hcall_assign_mmiodev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) * @brief Deassign one MMIO dev from a VM. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to data structure of * acrn_mmiodev including deassign MMIO device info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_deassign_mmiodev(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_deassign_mmiodev(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { int32_t ret = -EINVAL; struct acrn_mmiodev mmiodev; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); /* We should only de-assign a device from a post-launched VM at creating/shutdown/reset time */ if ((is_paused_vm(target_vm) || is_created_vm(target_vm))) { - if (copy_from_gpa(vm, &mmiodev, param, sizeof(mmiodev)) == 0) { + if (copy_from_gpa(vm, &mmiodev, param2, sizeof(mmiodev)) == 0) { ret = deassign_mmio_dev(target_vm, &mmiodev); if (ret == 0) { ret = assign_mmio_dev(vm, &mmiodev); @@ -961,22 +943,22 @@ int32_t hcall_deassign_mmiodev(struct acrn_vm *vm, uint16_t vmid, uint64_t param * @brief Set interrupt mapping info of ptdev. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to data structure of * hc_ptdev_irq including intr remapping info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, struct acrn_vm *target_vm, + __unused uint64_t param1, uint64_t param2) { int32_t ret = -1; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); if (!is_poweroff_vm(target_vm)) { struct hc_ptdev_irq irq; - if (copy_from_gpa(vm, &irq, param, sizeof(irq)) == 0) { + if (copy_from_gpa(vm, &irq, param2, sizeof(irq)) == 0) { if (irq.type == IRQ_INTX) { struct pci_vdev *vdev; union pci_bdf bdf = {.value = irq.virt_bdf}; @@ -991,11 +973,11 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t pa * phys_pin to phys_gsi */ if ((vdev != NULL) && (vdev->pdev->bdf.value == irq.phys_bdf)) { - if ((((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm))) || - ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount()))) && - is_gsi_valid(irq.intx.phys_pin)) { + if ((((!irq.intx.pic_pin) && (irq.intx.virt_pin < get_vm_gsicount(target_vm))) + || ((irq.intx.pic_pin) && (irq.intx.virt_pin < vpic_pincount()))) + && is_gsi_valid(irq.intx.phys_pin)) { ret = ptirq_add_intx_remapping(target_vm, irq.intx.virt_pin, - irq.intx.phys_pin, irq.intx.pic_pin); + irq.intx.phys_pin, irq.intx.pic_pin); } else { pr_err("%s: Invalid phys pin or virt pin\n", __func__); } @@ -1012,23 +994,22 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t pa * @brief Clear interrupt mapping info of ptdev. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to data structure of * hc_ptdev_irq including intr remapping info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t -hcall_reset_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_reset_ptdev_intr_info(struct acrn_vm *vm, struct acrn_vm *target_vm, + __unused uint64_t param1, uint64_t param2) { int32_t ret = -1; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); if (!is_poweroff_vm(target_vm)) { struct hc_ptdev_irq irq; - if (copy_from_gpa(vm, &irq, param, sizeof(irq)) == 0) { + if (copy_from_gpa(vm, &irq, param2, sizeof(irq)) == 0) { if (irq.type == IRQ_INTX) { struct pci_vdev *vdev; union pci_bdf bdf = {.value = irq.virt_bdf}; @@ -1064,28 +1045,23 @@ hcall_reset_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param) * @brief Get VCPU Power state. * * @param vm pointer to VM data structure - * @param cmd cmd to show get which VCPU power state data - * @param param VCPU power state data + * @param target_vm Pointer to target VM data structure + * @param param1 cmd to show get which VCPU power state data + * @param param2 VCPU power state data * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param) +int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2) { - uint16_t target_vm_id; - struct acrn_vm *target_vm = NULL; int32_t ret = -1; + uint64_t cmd = param1; - /* the vmid in cmd is a relative vm id, need to convert to absolute vm id */ - target_vm_id = rel_vmid_2_vmid(vm->vm_id, (uint16_t)((cmd & PMCMD_VMID_MASK) >> PMCMD_VMID_SHIFT)); - if (target_vm_id < CONFIG_MAX_VM_NUM) { - target_vm = get_vm_from_vmid(target_vm_id); - } - if ((target_vm != NULL) && (!is_poweroff_vm(target_vm)) && (is_postlaunched_vm(target_vm))) { + if (!is_poweroff_vm(target_vm)) { switch (cmd & PMCMD_TYPE_MASK) { case PMCMD_GET_PX_CNT: { if (target_vm->pm.px_cnt != 0U) { - ret = copy_to_gpa(vm, &(target_vm->pm.px_cnt), param, sizeof(target_vm->pm.px_cnt)); + ret = copy_to_gpa(vm, &(target_vm->pm.px_cnt), param2, sizeof(target_vm->pm.px_cnt)); } break; } @@ -1107,12 +1083,12 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param) } px_data = target_vm->pm.px_data + pn; - ret = copy_to_gpa(vm, px_data, param, sizeof(struct cpu_px_data)); + ret = copy_to_gpa(vm, px_data, param2, sizeof(struct cpu_px_data)); break; } case PMCMD_GET_CX_CNT: { if (target_vm->pm.cx_cnt != 0U) { - ret = copy_to_gpa(vm, &(target_vm->pm.cx_cnt), param, sizeof(target_vm->pm.cx_cnt)); + ret = copy_to_gpa(vm, &(target_vm->pm.cx_cnt), param2, sizeof(target_vm->pm.cx_cnt)); } break; } @@ -1131,7 +1107,7 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param) } cx_data = target_vm->pm.cx_data + cx_idx; - ret = copy_to_gpa(vm, cx_data, param, sizeof(struct cpu_cx_data)); + ret = copy_to_gpa(vm, cx_data, param2, sizeof(struct cpu_cx_data)); break; } default: @@ -1147,23 +1123,22 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param) * @brief Get VCPU a VM's interrupt count data. * * @param vm pointer to VM data structure - * @param vmid id of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param2 guest physical address. This gpa points to data structure of * acrn_intr_monitor * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param) +int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, struct acrn_vm *target_vm, __unused uint64_t param1, uint64_t param2) { int32_t status = -EINVAL; struct acrn_intr_monitor *intr_hdr; uint64_t hpa; - struct acrn_vm *target_vm = get_vm_from_vmid(vmid); if (!is_poweroff_vm(target_vm)) { /* the param for this hypercall is page aligned */ - hpa = gpa2hpa(vm, param); + hpa = gpa2hpa(vm, param2); if (hpa != INVALID_HPA) { intr_hdr = (struct acrn_intr_monitor *)hpa2hva(hpa); stac(); @@ -1202,20 +1177,21 @@ int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param) * to notify the SOS kernel. * * @param vm Pointer to VM data structure - * @param param the expected notifier vector from guest + * @param param1 the expected notifier vector from guest * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_callback_vector(__unused const struct acrn_vm *vm, uint64_t param) +int32_t hcall_set_callback_vector(__unused struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + uint64_t param1, __unused uint64_t param2) { int32_t ret; - if ((param > NR_MAX_VECTOR) || (param < VECTOR_DYNAMIC_START)) { + if ((param1 > NR_MAX_VECTOR) || (param1 < VECTOR_DYNAMIC_START)) { pr_err("%s: Invalid passed vector\n", __func__); ret = -EINVAL; } else { - set_vhm_notification_vector((uint32_t)param); + set_vhm_notification_vector((uint32_t)param1); ret = 0; } diff --git a/hypervisor/debug/hypercall.c b/hypervisor/debug/hypercall.c index 8dcdc41fa..9a5271a15 100644 --- a/hypervisor/debug/hypercall.c +++ b/hypervisor/debug/hypercall.c @@ -18,44 +18,45 @@ * @brief Execute profiling operation * * @param vm Pointer to VM data structure - * @param cmd profiling command to be executed - * @param cmd profiling command to be executed - * @param param guest physical address. This gpa points to + * @param param1 profiling command to be executed + * @param param2 guest physical address. This gpa points to * data structure required by each command * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -static int32_t hcall_profiling_ops(struct acrn_vm *vm, uint64_t cmd, uint64_t param) +int32_t hcall_profiling_ops(struct acrn_vm *vm, __unused struct acrn_vm *target_vm, uint64_t param1, uint64_t param2) { int32_t ret; + uint64_t cmd = param1; + switch (cmd) { case PROFILING_MSR_OPS: - ret = profiling_msr_ops_all_cpus(vm, param); + ret = profiling_msr_ops_all_cpus(vm, param2); break; case PROFILING_GET_VMINFO: - ret = profiling_vm_list_info(vm, param); + ret = profiling_vm_list_info(vm, param2); break; case PROFILING_GET_VERSION: - ret = profiling_get_version_info(vm, param); + ret = profiling_get_version_info(vm, param2); break; case PROFILING_GET_CONTROL_SWITCH: - ret = profiling_get_control(vm, param); + ret = profiling_get_control(vm, param2); break; case PROFILING_SET_CONTROL_SWITCH: - ret = profiling_set_control(vm, param); + ret = profiling_set_control(vm, param2); break; case PROFILING_CONFIG_PMI: - ret = profiling_configure_pmi(vm, param); + ret = profiling_configure_pmi(vm, param2); break; case PROFILING_CONFIG_VMSWITCH: - ret = profiling_configure_vmsw(vm, param); + ret = profiling_configure_vmsw(vm, param2); break; case PROFILING_GET_PCPUID: - ret = profiling_get_pcpu_id(vm, param); + ret = profiling_get_pcpu_id(vm, param2); break; case PROFILING_GET_STATUS: - ret = profiling_get_status_info(vm, param); + ret = profiling_get_status_info(vm, param2); break; default: pr_err("%s: invalid profiling command %lu\n", __func__, cmd); @@ -70,18 +71,19 @@ static int32_t hcall_profiling_ops(struct acrn_vm *vm, uint64_t cmd, uint64_t pa * @brief Setup a share buffer for a VM. * * @param vm Pointer to VM data structure - * @param param guest physical address. This gpa points to + * @param param1 guest physical address. This gpa points to * struct sbuf_setup_param * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -static int32_t hcall_setup_sbuf(struct acrn_vm *vm, uint64_t param) +int32_t hcall_setup_sbuf(struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + uint64_t param1, __unused uint64_t param2) { struct sbuf_setup_param ssp; uint64_t *hva; - if (copy_from_gpa(vm, &ssp, param, sizeof(ssp)) != 0) { + if (copy_from_gpa(vm, &ssp, param1, sizeof(ssp)) != 0) { return -1; } @@ -98,23 +100,24 @@ static int32_t hcall_setup_sbuf(struct acrn_vm *vm, uint64_t param) * @brief Setup the hypervisor NPK log. * * @param vm Pointer to VM data structure - * @param param guest physical address. This gpa points to + * @param param1 guest physical address. This gpa points to * struct hv_npk_log_param * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -static int32_t hcall_setup_hv_npk_log(struct acrn_vm *vm, uint64_t param) +int32_t hcall_setup_hv_npk_log(struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + uint64_t param1, __unused uint64_t param2) { struct hv_npk_log_param npk_param; - if (copy_from_gpa(vm, &npk_param, param, sizeof(npk_param)) != 0) { + if (copy_from_gpa(vm, &npk_param, param1, sizeof(npk_param)) != 0) { return -1; } npk_log_setup(&npk_param); - return copy_to_gpa(vm, &npk_param, param, sizeof(npk_param)); + return copy_to_gpa(vm, &npk_param, param1, sizeof(npk_param)); } /** @@ -124,59 +127,18 @@ static int32_t hcall_setup_hv_npk_log(struct acrn_vm *vm, uint64_t param) * @param param Guest physical address pointing to struct acrn_hw_info * * @pre vm shall point to SOS_VM - * @pre param shall be a valid physical address + * @pre param1 shall be a valid physical address * * @retval 0 on success * @retval -1 in case of error */ -static int32_t hcall_get_hw_info(struct acrn_vm *vm, uint64_t param) +int32_t hcall_get_hw_info(struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + uint64_t param1, __unused uint64_t param2) { struct acrn_hw_info hw_info; (void)memset((void *)&hw_info, 0U, sizeof(hw_info)); hw_info.cpu_num = get_pcpu_nums(); - return copy_to_gpa(vm, &hw_info, param, sizeof(hw_info)); -} - -/** - * @brief Setup hypervisor debug infrastructure, such as share buffer, NPK log and profiling. - * - * @param vm Pointer to VM data structure - * @param param1 hypercall param1 from guest - * @param param2 hypercall param2 from guest - * @param hypcall_id hypercall ID from guest - * - * @pre Pointer vm shall point to SOS_VM - * @return 0 on success, non-zero on error. - */ -int32_t hcall_debug(struct acrn_vm *vm, uint64_t param1, uint64_t param2, uint64_t hypcall_id) -{ - int32_t ret; - - /* Dispatch the debug hypercall handler */ - switch (hypcall_id) { - case HC_SETUP_SBUF: - ret = hcall_setup_sbuf(vm, param1); - break; - - case HC_SETUP_HV_NPK_LOG: - ret = hcall_setup_hv_npk_log(vm, param1); - break; - - case HC_PROFILING_OPS: - ret = hcall_profiling_ops(vm, param1, param2); - break; - - case HC_GET_HW_INFO: - ret = hcall_get_hw_info(vm, param1); - break; - - default: - pr_err("op %d: Invalid hypercall\n", hypcall_id); - ret = -EPERM; - break; - } - - return ret; + return copy_to_gpa(vm, &hw_info, param1, sizeof(hw_info)); } diff --git a/hypervisor/include/arch/x86/guest/vm.h b/hypervisor/include/arch/x86/guest/vm.h index 23f9e49b8..2c6662fc5 100644 --- a/hypervisor/include/arch/x86/guest/vm.h +++ b/hypervisor/include/arch/x86/guest/vm.h @@ -225,7 +225,6 @@ bool is_created_vm(const struct acrn_vm *vm); bool is_paused_vm(const struct acrn_vm *vm); bool is_sos_vm(const struct acrn_vm *vm); bool is_postlaunched_vm(const struct acrn_vm *vm); -bool is_valid_postlaunched_vmid(uint16_t vm_id); bool is_prelaunched_vm(const struct acrn_vm *vm); uint16_t get_vmid_by_uuid(const uint8_t *uuid); struct acrn_vm *get_vm_from_vmid(uint16_t vm_id); diff --git a/hypervisor/include/common/hypercall.h b/hypervisor/include/common/hypercall.h index 34ff6d6eb..46686fb9c 100644 --- a/hypervisor/include/common/hypercall.h +++ b/hypervisor/include/common/hypercall.h @@ -28,12 +28,14 @@ bool is_hypercall_from_ring0(void); * The function offline specific vcpu from SOS. * * @param vm Pointer to VM data structure - * @param lapicid lapic id of the vcpu which wants to offline + * @param target_vm not used + * @param param1 lapic id of the vcpu which wants to offline + * @param param2 not used * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid); +int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Get hypervisor api version @@ -41,14 +43,15 @@ int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid); * The function only return api version information when VM is SOS_VM. * * @param vm Pointer to VM data structure - * @param param guest physical memory address. The api version returned + * @param target_vm not used + * @param param1 guest physical memory address. The api version returned * will be copied to this gpa + * @param param2 not used * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_get_api_version(struct acrn_vm *vm, uint64_t param); - +int32_t hcall_get_api_version(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Get basic platform information. @@ -57,12 +60,14 @@ int32_t hcall_get_api_version(struct acrn_vm *vm, uint64_t param); * for the current platform. * * @param vm Pointer to VM data structure. - * @param param GPA pointer to struct hc_platform_info. + * @param target_vm not used + * @param param1 GPA pointer to struct hc_platform_info. + * @param param2 not used * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, -1 in case of error. */ -int32_t hcall_get_platform_info(struct acrn_vm *vm, uint64_t param); +int32_t hcall_get_platform_info(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief create virtual machine @@ -72,13 +77,15 @@ int32_t hcall_get_platform_info(struct acrn_vm *vm, uint64_t param); * support later. * * @param vm Pointer to VM data structure - * @param param guest physical memory address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param1 guest physical memory address. This gpa points to * struct acrn_create_vm + * @param param2 not used * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param); +int32_t hcall_create_vm(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief destroy virtual machine @@ -86,11 +93,14 @@ int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param); * Destroy a virtual machine, it will pause target VM then shutdown it. * The function will return -1 if the target VM does not exist. * - * @param vmid ID of the VM + * @param vm not used + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 not used * * @return 0 on success, non-zero on error. */ -int32_t hcall_destroy_vm(uint16_t vmid); +int32_t hcall_destroy_vm(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief reset virtual machine @@ -100,11 +110,14 @@ int32_t hcall_destroy_vm(uint16_t vmid); * each vcpu state and do some initialization for guest. * The function will return -1 if the target VM does not exist. * - * @param vmid ID of the VM + * @param vm not used + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 not used * * @return 0 on success, non-zero on error. */ -int32_t hcall_reset_vm(uint16_t vmid); +int32_t hcall_reset_vm(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief start virtual machine @@ -113,11 +126,14 @@ int32_t hcall_reset_vm(uint16_t vmid); * The function will return -1 if the target VM does not exist or the * IOReq buffer page for the VM is not ready. * - * @param vmid ID of the VM + * @param vm not used + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 not used * * @return 0 on success, non-zero on error. */ -int32_t hcall_start_vm(uint16_t vmid); +int32_t hcall_start_vm(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief pause virtual machine @@ -126,11 +142,14 @@ int32_t hcall_start_vm(uint16_t vmid); * will return 0 directly for success. * The function will return -1 if the target VM does not exist. * - * @param vmid ID of the VM + * @param vm not used + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 not used * * @return 0 on success, non-zero on error. */ -int32_t hcall_pause_vm(uint16_t vmid); +int32_t hcall_pause_vm(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief set vcpu regs @@ -140,13 +159,14 @@ int32_t hcall_pause_vm(uint16_t vmid); * The function will return -1 if the targat VM or BSP doesn't exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to * struct acrn_vcpu_regs * * @return 0 on success, non-zero on error. */ -int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief set or clear IRQ line @@ -156,14 +176,15 @@ int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param); * The function will return -1 if the target VM does not exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param ops request command for IRQ set or clear + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 info for irqline * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_irqline(const struct acrn_vm *vm, uint16_t vmid, - const struct acrn_irqline_ops *ops); +int32_t hcall_set_irqline(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); + /** * @brief inject MSI interrupt * @@ -171,13 +192,14 @@ int32_t hcall_set_irqline(const struct acrn_vm *vm, uint16_t vmid, * The function will return -1 if the target VM does not exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to struct acrn_msi_entry + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to struct acrn_msi_entry * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_inject_msi(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief set ioreq shared buffer @@ -186,14 +208,15 @@ int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param); * The function will return -1 if the target VM does not exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to * struct acrn_set_ioreq_buffer * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief notify request done @@ -201,37 +224,42 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param * Notify the requestor VCPU for the completion of an ioreq. * The function will return -1 if the target VM does not exist. * - * @param vmid ID of the VM - * @param vcpu_id vcpu ID of the requestor + * @param vm not used + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 vcpu ID of the requestor * * @return 0 on success, non-zero on error. */ -int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id); +int32_t hcall_notify_ioreq_finish(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief setup ept memory mapping for multi regions * * @param vm Pointer to VM data structure - * @param param guest physical address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param1 guest physical address. This gpa points to * struct set_memmaps + * @param param2 not used * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_vm_memory_regions(struct acrn_vm *vm, uint64_t param); +int32_t hcall_set_vm_memory_regions(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief change guest memory page write permission * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param wp_gpa guest physical address. This gpa points to + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to * struct wp_data * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_gpa); +int32_t hcall_write_protect_page(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief translate guest physical address to host physical address @@ -240,131 +268,125 @@ int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_ * The function will return -1 if the target VM does not exist. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to struct vm_gpa2hpa + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to struct vm_gpa2hpa * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Assign one PCI dev to VM. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to data structure of * acrn_assign_pcidev including assign PCI device info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_assign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_assign_pcidev(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Deassign one PCI dev to VM. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to data structure of * acrn_assign_pcidev including deassign PCI device info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_deassign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_deassign_pcidev(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Assign one MMIO dev to VM. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to data structure of * acrn_mmiodev including assign MMIO device info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_assign_mmiodev(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_assign_mmiodev(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Deassign one MMIO dev to VM. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to data structure of * acrn_mmiodev including deassign MMIO device info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_deassign_mmiodev(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_deassign_mmiodev(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Set interrupt mapping info of ptdev. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to data structure of * hc_ptdev_irq including intr remapping info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Clear interrupt mapping info of ptdev. * * @param vm Pointer to VM data structure - * @param vmid ID of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to data structure of * hc_ptdev_irq including intr remapping info * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_reset_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, - uint64_t param); - -/** - * @brief Setup hypervisor debug infrastructure, such as share buffer, NPK log and profiling. - * - * @param vm Pointer to VM data structure - * @param param1 hypercall param1 from guest - * @param param2 hypercall param2 from guest - * @param hypcall_id hypercall ID from guest - * - * @pre Pointer vm shall point to SOS_VM - * @return 0 on success, non-zero on error. - */ -int32_t hcall_debug(struct acrn_vm *vm, uint64_t param1, uint64_t param2, uint64_t hypcall_id); +int32_t hcall_reset_ptdev_intr_info(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Get VCPU Power state. * * @param vm pointer to VM data structure - * @param cmd cmd to show get which VCPU power state data - * @param param VCPU power state data + * @param target_vm Pointer to target VM data structure + * @param param1 cmd to show get which VCPU power state data + * @param param2 VCPU power state data * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ - -int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param); +int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @brief Get VCPU a VM's interrupt count data. * * @param vm pointer to VM data structure - * @param vmid id of the VM - * @param param guest physical address. This gpa points to data structure of + * @param target_vm Pointer to target VM data structure + * @param param1 not used + * @param param2 guest physical address. This gpa points to data structure of * acrn_intr_monitor * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param); +int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @defgroup trusty_hypercall Trusty Hypercalls @@ -429,14 +451,75 @@ int32_t hcall_save_restore_sworld_ctx(struct acrn_vcpu *vcpu); * not called, the hypervisor will use the default notifier vector(0xF7) * to notify the SOS kernel. * - * @param vm Pointer to VM data structure - * @param param the expected notifier vector from guest + * @param vm not used + * @param target_vm not used + * @param param1 the expected notifier vector from guest + * @param param2 not used * * @pre Pointer vm shall point to SOS_VM * @return 0 on success, non-zero on error. */ -int32_t hcall_set_callback_vector(__unused const struct acrn_vm *vm, uint64_t param); +int32_t hcall_set_callback_vector(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); +/** + * @brief Setup a share buffer for a VM. + * + * @param vm Pointer to VM data structure + * @param target_vm not used + * @param param1 guest physical address. This gpa points to + * struct sbuf_setup_param + * @param param2 not used + * + * @pre Pointer vm shall point to SOS_VM + * @return 0 on success, non-zero on error. + */ +int32_t hcall_setup_sbuf(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); + +/** + * @brief Setup the hypervisor NPK log. + * + * @param vm Pointer to VM data structure + * @param target_vm not used + * @param param1 guest physical address. This gpa points to + * struct hv_npk_log_param + * @param param2 not used + * + * @pre Pointer vm shall point to SOS_VM + * @return 0 on success, non-zero on error. + */ +int32_t hcall_setup_hv_npk_log(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); + +/** + * @brief Get hardware related info + * + * @param vm Pointer to vm data structure + * @param target_vm not used + * @param param1 Guest physical address pointing to struct acrn_hw_info + * @param param2 not used + * + * @pre vm shall point to SOS_VM + * @pre param1 shall be a valid physical address + * + * @retval 0 on success + * @retval -1 in case of error + */ +int32_t hcall_get_hw_info(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); + +/** + * @brief Execute profiling operation + * + * @param vm Pointer to VM data structure + * @param target_vm not used + * @param param1 profiling command to be executed + * @param param2 guest physical address. This gpa points to + * data structure required by each command + * + * @pre Pointer vm shall point to SOS_VM + * @return 0 on success, non-zero on error. + */ +int32_t hcall_profiling_ops(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); + +int32_t hcall_create_vcpu(struct acrn_vm *vm, struct acrn_vm *target_vm, uint64_t param1, uint64_t param2); /** * @} */ diff --git a/hypervisor/include/public/acrn_hv_defs.h b/hypervisor/include/public/acrn_hv_defs.h index f48285fe5..33101b633 100644 --- a/hypervisor/include/public/acrn_hv_defs.h +++ b/hypervisor/include/public/acrn_hv_defs.h @@ -20,6 +20,7 @@ */ #define BASE_HC_ID(x, y) (((x)<<24U)|(y)) +#define HC_IDX(id) ((id)&(0xFFUL)) #define HC_ID 0x80UL diff --git a/hypervisor/release/hypercall.c b/hypervisor/release/hypercall.c index 836088c37..b7eb841b6 100644 --- a/hypervisor/release/hypercall.c +++ b/hypervisor/release/hypercall.c @@ -8,8 +8,26 @@ #include #include -int32_t hcall_debug(__unused struct acrn_vm *vm, __unused uint64_t param1, __unused uint64_t param2, - __unused uint64_t hypcall_id) +int32_t hcall_setup_sbuf(__unused struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + __unused uint64_t param1, __unused uint64_t param2) +{ + return -EPERM; +} + +int32_t hcall_setup_hv_npk_log(__unused struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + __unused uint64_t param1, __unused uint64_t param2) +{ + return -EPERM; +} + +int32_t hcall_get_hw_info(__unused struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + __unused uint64_t param1, __unused uint64_t param2) +{ + return -EPERM; +} + +int32_t hcall_profiling_ops(__unused struct acrn_vm *vm, __unused struct acrn_vm *target_vm, + __unused uint64_t param1, __unused uint64_t param2) { return -EPERM; }