diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c index 2de2a34a4..7f9c590af 100644 --- a/hypervisor/arch/x86/guest/assign.c +++ b/hypervisor/arch/x86/guest/assign.c @@ -636,24 +636,49 @@ int32_t ptirq_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t phys_bd spinlock_release(&ptdev_lock); if (entry != NULL) { + ret = 0; if (is_entry_active(entry) && (info->vmsi_data.full == 0U)) { /* handle destroy case */ info->pmsi_data.full = 0U; } else { /* build physical config MSI, update to info->pmsi_xxx */ - if (is_lapic_pt_enabled(vm)) { - /* for vm with lapic-pt, keep vector from guest */ - ptirq_build_physical_msi(vm, info, entry, (uint32_t)info->vmsi_data.bits.vector); + if (is_lapic_pt_configured(vm)) { + enum vm_vlapic_state vlapic_state = check_vm_vlapic_state(vm); + if (vlapic_state == VM_VLAPIC_X2APIC) { + /* + * All the vCPUs are in x2APIC mode and LAPIC is Pass-through + * Use guest vector to program the interrupt source + */ + ptirq_build_physical_msi(vm, info, entry, (uint32_t)info->vmsi_data.bits.vector); + } else if (vlapic_state == VM_VLAPIC_XAPIC) { + /* + * All the vCPUs are in xAPIC mode and LAPIC is emulated + * Use host vector to program the interrupt source + */ + ptirq_build_physical_msi(vm, info, entry, irq_to_vector(entry->allocated_pirq)); + } else if (vlapic_state == VM_VLAPIC_TRANSITION) { + /* + * vCPUs are in middle of transition, so do not program interrupt source + * TODO: Devices programmed during transistion do not work after transition + * as device is not programmed with interrupt info. Need to implement a + * method to get interrupts working after transition. + */ + ret = -EFAULT; + } else { + /* Do nothing for VM_VLAPIC_DISABLED */ + ret = -EFAULT; + } } else { ptirq_build_physical_msi(vm, info, entry, irq_to_vector(entry->allocated_pirq)); } - entry->msi = *info; - dev_dbg(ACRN_DBG_IRQ, "PCI %x:%x.%x MSI VR[%d] 0x%x->0x%x assigned to vm%d", - pci_bus(virt_bdf), pci_slot(virt_bdf), pci_func(virt_bdf), entry_nr, - info->vmsi_data.bits.vector, irq_to_vector(entry->allocated_pirq), entry->vm->vm_id); + if (ret == 0) { + entry->msi = *info; + dev_dbg(ACRN_DBG_IRQ, "PCI %x:%x.%x MSI VR[%d] 0x%x->0x%x assigned to vm%d", + pci_bus(virt_bdf), pci_slot(virt_bdf), pci_func(virt_bdf), entry_nr, + info->vmsi_data.bits.vector, irq_to_vector(entry->allocated_pirq), entry->vm->vm_id); + } } - ret = 0; } return ret; diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index b916a8649..4531e0913 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -595,7 +595,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state) if (atomic_load32(&vcpu->running) == 1U) { remove_from_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id); - if (is_lapic_pt_enabled(vcpu->vm)) { + if (is_lapic_pt_enabled(vcpu)) { make_reschedule_request(vcpu->pcpu_id, DEL_MODE_INIT); } else { make_reschedule_request(vcpu->pcpu_id, DEL_MODE_IPI); @@ -738,3 +738,17 @@ uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask) return dmask; } + +/* + * @brief Check if vCPU uses LAPIC in x2APIC mode and the VM, vCPU belongs to, is configured for + * LAPIC Pass-through + * + * @pre vcpu != NULL + * + * @return true, if vCPU LAPIC is in x2APIC mode and VM, vCPU belongs to, is configured for + * LAPIC Pass-through + */ +bool is_lapic_pt_enabled(struct acrn_vcpu *vcpu) +{ + return ((is_x2apic_enabled(vcpu_vlapic(vcpu))) && (is_lapic_pt_configured(vcpu->vm))); +} diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index 43a554b88..08e739e62 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -2042,6 +2042,9 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val) if ((phys == false) || (shorthand != APIC_DEST_DESTFLD)) { pr_err("Logical destination mode or shorthands \ not supported in ICR forpartition mode\n"); + /* + * TODO: To support logical destination and shorthand modes + */ } else { vcpu_id = vm_apicid2vcpu_id(vm, vapic_id); if ((vcpu_id < vm->hw.created_vcpus) && (vm->hw.vcpu_array[vcpu_id].state != VCPU_OFFLINE)) { @@ -2056,11 +2059,13 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val) break; default: /* convert the dest from virtual apic_id to physical apic_id */ - papic_id = per_cpu(lapic_id, target_vcpu->pcpu_id); - dev_dbg(ACRN_DBG_LAPICPT, - "%s vapic_id: 0x%08lx papic_id: 0x%08lx icr_low:0x%08lx", - __func__, vapic_id, papic_id, icr_low); - msr_write(MSR_IA32_EXT_APIC_ICR, (((uint64_t)papic_id) << 32U) | icr_low); + if (is_x2apic_enabled(vcpu_vlapic(target_vcpu))) { + papic_id = per_cpu(lapic_id, target_vcpu->pcpu_id); + dev_dbg(ACRN_DBG_LAPICPT, + "%s vapic_id: 0x%08lx papic_id: 0x%08lx icr_low:0x%08lx", + __func__, vapic_id, papic_id, icr_low); + msr_write(MSR_IA32_EXT_APIC_ICR, (((uint64_t)papic_id) << 32U) | icr_low); + } break; } ret = 0; @@ -2602,14 +2607,3 @@ void vlapic_set_apicv_ops(void) apicv_ops = &apicv_basic_ops; } } - -/** - * @pre vm != NULL - * @pre vm->vmid < CONFIG_MAX_VM_NUM - */ -bool is_lapic_pt_enabled(struct acrn_vm *vm) -{ - struct acrn_vcpu *vcpu = vcpu_from_vid(vm, 0U); - - return ((is_x2apic_enabled(vcpu_vlapic(vcpu))) && (is_lapic_pt_configured(vm))); -} diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index aa24020bb..4bf41b7e4 100644 --- a/hypervisor/arch/x86/guest/vm.c +++ b/hypervisor/arch/x86/guest/vm.c @@ -559,7 +559,7 @@ int32_t shutdown_vm(struct acrn_vm *vm) reset_vcpu(vcpu); offline_vcpu(vcpu); - if (is_lapic_pt_enabled(vm)) { + if (is_lapic_pt_enabled(vcpu)) { bitmap_set_nolock(vcpu->pcpu_id, &mask); make_pcpu_offline(vcpu->pcpu_id); } @@ -567,7 +567,7 @@ int32_t shutdown_vm(struct acrn_vm *vm) wait_pcpus_offline(mask); - if (is_lapic_pt_enabled(vm) && !start_pcpus(mask)) { + if (is_lapic_pt_configured(vm) && !start_pcpus(mask)) { pr_fatal("Failed to start all cpus in mask(0x%llx)", mask); ret = -ETIMEDOUT; } @@ -841,3 +841,16 @@ void update_vm_vlapic_state(struct acrn_vm *vm) vm->arch_vm.vlapic_state = vlapic_state; spinlock_release(&vm->vm_lock); } + +/* + * @brief Check state of vLAPICs of a VM + * + * @pre vm != NULL + */ +enum vm_vlapic_state check_vm_vlapic_state(const struct acrn_vm *vm) +{ + enum vm_vlapic_state vlapic_state; + + vlapic_state = vm->arch_vm.vlapic_state; + return vlapic_state; +} diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c index 67e8c0cc7..2d1db9c2e 100644 --- a/hypervisor/common/hv_main.c +++ b/hypervisor/common/hv_main.c @@ -27,7 +27,7 @@ void vcpu_thread(struct sched_object *obj) init_vmcs(vcpu); } - if (!is_lapic_pt_enabled(vcpu->vm)) { + if (!is_lapic_pt_enabled(vcpu)) { /* handle pending softirq when irq enable*/ do_softirq(); CPU_IRQ_DISABLE(); @@ -64,7 +64,7 @@ void vcpu_thread(struct sched_object *obj) profiling_pre_vmexit_handler(vcpu); - if (!is_lapic_pt_enabled(vcpu->vm)) { + if (!is_lapic_pt_enabled(vcpu)) { CPU_IRQ_ENABLE(); } /* Dispatch handler */ diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 2a3f41fef..22f215511 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -484,9 +484,27 @@ int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param) pr_err("%s: Unable copy param to vm\n", __func__); } else { /* For target cpu with lapic pt, send ipi instead of injection via vlapic */ - if (is_lapic_pt_enabled(target_vm)) { - inject_msi_lapic_pt(target_vm, &msi); - ret = 0; + if (is_lapic_pt_configured(target_vm)) { + enum vm_vlapic_state vlapic_state = check_vm_vlapic_state(vm); + if (vlapic_state == VM_VLAPIC_X2APIC) { + /* + * All the vCPUs of VM are in x2APIC mode and LAPIC is PT + * Inject the vMSI as an IPI directly to VM + */ + inject_msi_lapic_pt(target_vm, &msi); + ret = 0; + } else if (vlapic_state == VM_VLAPIC_XAPIC) { + /* + * All the vCPUs of VM are in xAPIC and use vLAPIC + * Inject using vLAPIC + */ + ret = vlapic_intr_msi(target_vm, msi.msi_addr, msi.msi_data); + } else { + /* + * For cases VM_VLAPIC_DISABLED and VM_VLAPIC_TRANSITION + * Silently drop interrupt + */ + } } else { ret = vlapic_intr_msi(target_vm, msi.msi_addr, msi.msi_data); } diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 18032916c..2d81ed94c 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -747,7 +747,7 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id); * @return The physical destination CPU mask */ uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask); - +bool is_lapic_pt_enabled(struct acrn_vcpu *vcpu); /** * @} */ diff --git a/hypervisor/include/arch/x86/guest/vlapic.h b/hypervisor/include/arch/x86/guest/vlapic.h index af235ba6a..cf8cdb0a5 100644 --- a/hypervisor/include/arch/x86/guest/vlapic.h +++ b/hypervisor/include/arch/x86/guest/vlapic.h @@ -210,7 +210,6 @@ void vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast, uint32_t dest, bool phys, bool lowprio); void vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast, uint32_t dest, bool phys); -bool is_lapic_pt_enabled(struct acrn_vm *vm); bool is_x2apic_enabled(const struct acrn_vlapic *vlapic); bool is_xapic_enabled(const struct acrn_vlapic *vlapic); /** diff --git a/hypervisor/include/arch/x86/guest/vm.h b/hypervisor/include/arch/x86/guest/vm.h index a1486e68e..ba8ad4ff4 100644 --- a/hypervisor/include/arch/x86/guest/vm.h +++ b/hypervisor/include/arch/x86/guest/vm.h @@ -227,7 +227,7 @@ bool is_rt_vm(const struct acrn_vm *vm); bool is_highest_severity_vm(const struct acrn_vm *vm); bool vm_hide_mtrr(const struct acrn_vm *vm); void update_vm_vlapic_state(struct acrn_vm *vm); - +enum vm_vlapic_state check_vm_vlapic_state(const struct acrn_vm *vm); #endif /* !ASSEMBLER */ #endif /* VM_H_ */