mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-18 08:11:07 +00:00
hv: LAPIC pass-thru support for partition mode of ACRN
ACRN, in partition mode, supports LAPIC pass-thru to guests. Guest needs to use x2APIC mode of LAPIC for pass-thru to be enabled. ACRN also needs the user to configure lapic_pt to true in vm_desc for the VM. Interrupt Command Register (ICR) is the only APIC register that is intercepted. Reference code in partition/vm_description.c enables LAPIC pass-thru for vm2. Tracked-On: #1626 Signed-off-by: Sainath Grandhi <sainath.grandhi@intel.com> Reviewed-by: Xu Anthony <anthony.xu@intel.com>
This commit is contained in:
parent
ff56b6f62d
commit
48ae379b4b
@ -360,6 +360,9 @@ void guest_cpuid(struct vcpu *vcpu,
|
|||||||
|
|
||||||
case 0x0bU:
|
case 0x0bU:
|
||||||
/* Patching X2APIC */
|
/* Patching X2APIC */
|
||||||
|
#ifdef CONFIG_PARTITION_MODE
|
||||||
|
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
||||||
|
#else
|
||||||
if (is_vm0(vcpu->vm)) {
|
if (is_vm0(vcpu->vm)) {
|
||||||
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
||||||
} else {
|
} else {
|
||||||
@ -388,6 +391,7 @@ void guest_cpuid(struct vcpu *vcpu,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 0x0dU:
|
case 0x0dU:
|
||||||
|
@ -1146,6 +1146,52 @@ vlapic_get_cr8(const struct acrn_vlapic *vlapic)
|
|||||||
return (uint64_t)(tpr >> 4U);
|
return (uint64_t)(tpr >> 4U);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
vlapic_process_init_sipi(struct vcpu* target_vcpu, uint32_t mode,
|
||||||
|
uint32_t icr_low, uint16_t vcpu_id)
|
||||||
|
{
|
||||||
|
if (mode == APIC_DELMODE_INIT) {
|
||||||
|
if ((icr_low & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_dbg(ACRN_DBG_LAPIC,
|
||||||
|
"Sending INIT from VCPU %hu to %hu",
|
||||||
|
target_vcpu->vcpu_id, vcpu_id);
|
||||||
|
|
||||||
|
/* put target vcpu to INIT state and wait for SIPI */
|
||||||
|
pause_vcpu(target_vcpu, VCPU_PAUSED);
|
||||||
|
reset_vcpu(target_vcpu);
|
||||||
|
/* new cpu model only need one SIPI to kick AP run,
|
||||||
|
* the second SIPI will be ignored as it move out of
|
||||||
|
* wait-for-SIPI state.
|
||||||
|
*/
|
||||||
|
target_vcpu->arch_vcpu.nr_sipi = 1U;
|
||||||
|
} else if (mode == APIC_DELMODE_STARTUP) {
|
||||||
|
/* Ignore SIPIs in any state other than wait-for-SIPI */
|
||||||
|
if ((target_vcpu->state != VCPU_INIT) ||
|
||||||
|
(target_vcpu->arch_vcpu.nr_sipi == 0U)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_dbg(ACRN_DBG_LAPIC,
|
||||||
|
"Sending SIPI from VCPU %hu to %hu with vector %u",
|
||||||
|
target_vcpu->vcpu_id, vcpu_id,
|
||||||
|
(icr_low & APIC_VECTOR_MASK));
|
||||||
|
|
||||||
|
target_vcpu->arch_vcpu.nr_sipi--;
|
||||||
|
if (target_vcpu->arch_vcpu.nr_sipi > 0U) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_err("Start Secondary VCPU%hu for VM[%d]...",
|
||||||
|
target_vcpu->vcpu_id,
|
||||||
|
target_vcpu->vm->vm_id);
|
||||||
|
set_ap_entry(target_vcpu, (icr_low & APIC_VECTOR_MASK) << 12U);
|
||||||
|
schedule_vcpu(target_vcpu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
|
vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
|
||||||
{
|
{
|
||||||
@ -1217,52 +1263,18 @@ vlapic_icrlo_write_handler(struct acrn_vlapic *vlapic)
|
|||||||
|
|
||||||
if (mode == APIC_DELMODE_FIXED) {
|
if (mode == APIC_DELMODE_FIXED) {
|
||||||
vlapic_set_intr(target_vcpu, vec,
|
vlapic_set_intr(target_vcpu, vec,
|
||||||
LAPIC_TRIG_EDGE);
|
LAPIC_TRIG_EDGE);
|
||||||
dev_dbg(ACRN_DBG_LAPIC,
|
dev_dbg(ACRN_DBG_LAPIC,
|
||||||
"vlapic sending ipi %u to vcpu_id %hu",
|
"vlapic sending ipi %u to vcpu_id %hu",
|
||||||
vec, vcpu_id);
|
vec, vcpu_id);
|
||||||
} else if (mode == APIC_DELMODE_NMI) {
|
} else if (mode == APIC_DELMODE_NMI) {
|
||||||
vcpu_inject_nmi(target_vcpu);
|
vcpu_inject_nmi(target_vcpu);
|
||||||
dev_dbg(ACRN_DBG_LAPIC,
|
dev_dbg(ACRN_DBG_LAPIC,
|
||||||
"vlapic send ipi nmi to vcpu_id %hu", vcpu_id);
|
"vlapic send ipi nmi to vcpu_id %hu", vcpu_id);
|
||||||
} else if (mode == APIC_DELMODE_INIT) {
|
} else if (mode == APIC_DELMODE_INIT) {
|
||||||
if ((icr_low & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT) {
|
vlapic_process_init_sipi(target_vcpu, mode, icr_low, vcpu_id);
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_LAPIC,
|
|
||||||
"Sending INIT from VCPU %hu to %hu",
|
|
||||||
vlapic->vcpu->vcpu_id, vcpu_id);
|
|
||||||
|
|
||||||
/* put target vcpu to INIT state and wait for SIPI */
|
|
||||||
pause_vcpu(target_vcpu, VCPU_PAUSED);
|
|
||||||
reset_vcpu(target_vcpu);
|
|
||||||
/* new cpu model only need one SIPI to kick AP run,
|
|
||||||
* the second SIPI will be ignored as it move out of
|
|
||||||
* wait-for-SIPI state.
|
|
||||||
*/
|
|
||||||
target_vcpu->arch_vcpu.nr_sipi = 1U;
|
|
||||||
} else if (mode == APIC_DELMODE_STARTUP) {
|
} else if (mode == APIC_DELMODE_STARTUP) {
|
||||||
/* Ignore SIPIs in any state other than wait-for-SIPI */
|
vlapic_process_init_sipi(target_vcpu, mode, icr_low, vcpu_id);
|
||||||
if ((target_vcpu->state != VCPU_INIT) ||
|
|
||||||
(target_vcpu->arch_vcpu.nr_sipi == 0U)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_dbg(ACRN_DBG_LAPIC,
|
|
||||||
"Sending SIPI from VCPU %hu to %hu with vector %u",
|
|
||||||
vlapic->vcpu->vcpu_id, vcpu_id, vec);
|
|
||||||
|
|
||||||
target_vcpu->arch_vcpu.nr_sipi--;
|
|
||||||
if (target_vcpu->arch_vcpu.nr_sipi > 0U) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_err("Start Secondary VCPU%hu for VM[%d]...",
|
|
||||||
target_vcpu->vcpu_id,
|
|
||||||
target_vcpu->vm->vm_id);
|
|
||||||
set_ap_entry(target_vcpu, vec << 12U);
|
|
||||||
schedule_vcpu(target_vcpu);
|
|
||||||
} else if (mode == APIC_DELMODE_SMI) {
|
} else if (mode == APIC_DELMODE_SMI) {
|
||||||
pr_info("vlapic: SMI IPI do not support\n");
|
pr_info("vlapic: SMI IPI do not support\n");
|
||||||
} else {
|
} else {
|
||||||
@ -2002,7 +2014,58 @@ static inline uint32_t x2apic_msr_to_regoff(uint32_t msr)
|
|||||||
return (((msr - 0x800U) & 0x3FFU) << 4U);
|
return (((msr - 0x800U) & 0x3FFU) << 4U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vlapic_x2apic_access(struct vcpu *vcpu, uint32_t msr, bool write, uint64_t *val)
|
#ifdef CONFIG_PARTITION_MODE
|
||||||
|
/*
|
||||||
|
* If x2apic is pass-thru to guests, we have to special case the following
|
||||||
|
* 1. INIT Delivery mode
|
||||||
|
* 2. SIPI Delivery mode
|
||||||
|
* For all other cases, send IPI on the wire.
|
||||||
|
* No shorthand and Physical destination mode are only supported.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int
|
||||||
|
vlapic_x2apic_pt_icr_access(struct vm *vm, uint64_t val)
|
||||||
|
{
|
||||||
|
uint64_t apic_id = (uint32_t) (val >> 32U);
|
||||||
|
uint32_t icr_low = val;
|
||||||
|
uint32_t mode = icr_low & APIC_DELMODE_MASK;
|
||||||
|
uint16_t vcpu_id;
|
||||||
|
struct vcpu *target_vcpu;
|
||||||
|
bool phys;
|
||||||
|
uint32_t shorthand;
|
||||||
|
|
||||||
|
phys = ((icr_low & APIC_DESTMODE_LOG) == 0UL);
|
||||||
|
shorthand = icr_low & APIC_DEST_MASK;
|
||||||
|
|
||||||
|
if ((phys == false) || (shorthand != APIC_DEST_DESTFLD)) {
|
||||||
|
pr_err("Logical destination mode or shorthands \
|
||||||
|
not supported in ICR forpartition mode\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
vcpu_id = vm_apicid2vcpu_id(vm, apic_id);
|
||||||
|
target_vcpu = vcpu_from_vid(vm, vcpu_id);
|
||||||
|
|
||||||
|
if (target_vcpu == NULL) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
switch (mode) {
|
||||||
|
case APIC_DELMODE_INIT:
|
||||||
|
vlapic_process_init_sipi(target_vcpu, mode, icr_low, vcpu_id);
|
||||||
|
break;
|
||||||
|
case APIC_DELMODE_STARTUP:
|
||||||
|
vlapic_process_init_sipi(target_vcpu, mode, icr_low, vcpu_id);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
msr_write(MSR_IA32_EXT_APIC_ICR, (apic_id << 32U) | icr_low);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static int vlapic_x2apic_access(struct vcpu *vcpu, uint32_t msr, bool write,
|
||||||
|
uint64_t *val)
|
||||||
{
|
{
|
||||||
struct acrn_vlapic *vlapic;
|
struct acrn_vlapic *vlapic;
|
||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
@ -2014,6 +2077,14 @@ static int vlapic_x2apic_access(struct vcpu *vcpu, uint32_t msr, bool write, uin
|
|||||||
*/
|
*/
|
||||||
vlapic = vcpu_vlapic(vcpu);
|
vlapic = vcpu_vlapic(vcpu);
|
||||||
if (is_x2apic_enabled(vlapic)) {
|
if (is_x2apic_enabled(vlapic)) {
|
||||||
|
#ifdef CONFIG_PARTITION_MODE
|
||||||
|
if (vcpu->vm->vm_desc->lapic_pt) {
|
||||||
|
if (msr == MSR_IA32_EXT_APIC_ICR) {
|
||||||
|
error = vlapic_x2apic_pt_icr_access(vcpu->vm, *val);
|
||||||
|
}
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
offset = x2apic_msr_to_regoff(msr);
|
offset = x2apic_msr_to_regoff(msr);
|
||||||
if (write) {
|
if (write) {
|
||||||
if (!is_x2apic_read_only_msr(msr)) {
|
if (!is_x2apic_read_only_msr(msr)) {
|
||||||
|
@ -446,3 +446,17 @@ void update_msr_bitmap_x2apic_apicv(struct vcpu *vcpu)
|
|||||||
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_EOI, READ);
|
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_EOI, READ);
|
||||||
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_SELF_IPI, READ);
|
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_SELF_IPI, READ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void update_msr_bitmap_x2apic_passthru(struct vcpu *vcpu)
|
||||||
|
{
|
||||||
|
uint32_t msr;
|
||||||
|
uint8_t *msr_bitmap;
|
||||||
|
|
||||||
|
msr_bitmap = vcpu->vm->arch_vm.msr_bitmap;
|
||||||
|
for (msr = MSR_IA32_EXT_XAPICID;
|
||||||
|
msr <= MSR_IA32_EXT_APIC_SELF_IPI; msr++) {
|
||||||
|
enable_msr_interception(msr_bitmap, msr, DISABLE);
|
||||||
|
}
|
||||||
|
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_ICR, WRITE);
|
||||||
|
enable_msr_interception(msr_bitmap, MSR_IA32_TSC_DEADLINE, DISABLE);
|
||||||
|
}
|
||||||
|
@ -27,6 +27,7 @@ static uint64_t cr4_always_on_mask;
|
|||||||
static uint64_t cr4_always_off_mask;
|
static uint64_t cr4_always_off_mask;
|
||||||
|
|
||||||
void update_msr_bitmap_x2apic_apicv(struct vcpu *vcpu);
|
void update_msr_bitmap_x2apic_apicv(struct vcpu *vcpu);
|
||||||
|
void update_msr_bitmap_x2apic_passthru(struct vcpu *vcpu);
|
||||||
|
|
||||||
bool is_vmx_disabled(void)
|
bool is_vmx_disabled(void)
|
||||||
{
|
{
|
||||||
@ -1055,6 +1056,7 @@ void init_vmcs(struct vcpu *vcpu)
|
|||||||
init_exit_ctrl();
|
init_exit_ctrl();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_PARTITION_MODE
|
||||||
void switch_apicv_mode_x2apic(struct vcpu *vcpu)
|
void switch_apicv_mode_x2apic(struct vcpu *vcpu)
|
||||||
{
|
{
|
||||||
uint32_t value32;
|
uint32_t value32;
|
||||||
@ -1064,3 +1066,52 @@ void switch_apicv_mode_x2apic(struct vcpu *vcpu)
|
|||||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
|
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
|
||||||
update_msr_bitmap_x2apic_apicv(vcpu);
|
update_msr_bitmap_x2apic_apicv(vcpu);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
void switch_apicv_mode_x2apic(struct vcpu *vcpu)
|
||||||
|
{
|
||||||
|
uint32_t value32;
|
||||||
|
if(vcpu->vm->vm_desc->lapic_pt) {
|
||||||
|
/*
|
||||||
|
* Disable external interrupt exiting and irq ack
|
||||||
|
* Disable posted interrupt processing
|
||||||
|
* update x2apic msr bitmap for pass-thru
|
||||||
|
* enable inteception only for ICR
|
||||||
|
* disable pre-emption for TSC DEADLINE MSR
|
||||||
|
* Disable Register Virtualization and virtual interrupt delivery
|
||||||
|
* Disable "use TPR shadow"
|
||||||
|
*/
|
||||||
|
|
||||||
|
value32 = exec_vmread32(VMX_PIN_VM_EXEC_CONTROLS);
|
||||||
|
value32 &= ~VMX_PINBASED_CTLS_IRQ_EXIT;
|
||||||
|
if (is_apicv_posted_intr_supported()) {
|
||||||
|
value32 &= ~VMX_PINBASED_CTLS_POST_IRQ;
|
||||||
|
}
|
||||||
|
exec_vmwrite32(VMX_PIN_VM_EXEC_CONTROLS, value32);
|
||||||
|
|
||||||
|
value32 = exec_vmread32(VMX_EXIT_CONTROLS);
|
||||||
|
value32 &= ~VMX_EXIT_CTLS_ACK_IRQ;
|
||||||
|
exec_vmwrite32(VMX_EXIT_CONTROLS, value32);
|
||||||
|
|
||||||
|
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS);
|
||||||
|
value32 &= ~VMX_PROCBASED_CTLS_TPR_SHADOW;
|
||||||
|
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32);
|
||||||
|
|
||||||
|
exec_vmwrite32(VMX_TPR_THRESHOLD, 0U);
|
||||||
|
|
||||||
|
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2);
|
||||||
|
value32 &= ~VMX_PROCBASED_CTLS2_VAPIC_REGS;
|
||||||
|
if (is_apicv_intr_delivery_supported()) {
|
||||||
|
value32 &= ~VMX_PROCBASED_CTLS2_VIRQ;
|
||||||
|
}
|
||||||
|
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
|
||||||
|
|
||||||
|
update_msr_bitmap_x2apic_passthru(vcpu);
|
||||||
|
} else {
|
||||||
|
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2);
|
||||||
|
value32 &= ~VMX_PROCBASED_CTLS2_VAPIC;
|
||||||
|
value32 |= VMX_PROCBASED_CTLS2_VX2APIC;
|
||||||
|
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
|
||||||
|
update_msr_bitmap_x2apic_apicv(vcpu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -193,6 +193,7 @@ struct vm_description {
|
|||||||
bool vm_vuart;
|
bool vm_vuart;
|
||||||
const char *bootargs;
|
const char *bootargs;
|
||||||
struct vpci_vdev_array *vpci_vdev_array;
|
struct vpci_vdev_array *vpci_vdev_array;
|
||||||
|
bool lapic_pt;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -183,6 +183,7 @@ struct vm_description_array vm_desc_partition = {
|
|||||||
consoleblank=0 tsc=reliable xapic_phys",
|
consoleblank=0 tsc=reliable xapic_phys",
|
||||||
.vpci_vdev_array = &vpci_vdev_array2,
|
.vpci_vdev_array = &vpci_vdev_array2,
|
||||||
.mptable = &mptable_vm2,
|
.mptable = &mptable_vm2,
|
||||||
|
.lapic_pt = true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user