hv:enable APICv features based on CPU capability

this patch is to detect and enable only APICv features which
are actually supported by the processor, instead fo tuning on
all features by default.

Signed-off-by: Yonghua Huang <yonghua.huang@intel.com>
This commit is contained in:
Yonghua Huang 2018-03-22 01:17:24 +08:00 committed by Anthony Xu
parent c8dc56e08b
commit 0a5806ea8e
7 changed files with 123 additions and 62 deletions

View File

@ -64,16 +64,24 @@ DEFINE_CPU_DATA(void *, vcpu);
DEFINE_CPU_DATA(int, state);
/* TODO: add more capability per requirement */
/*APICv features*/
#define VAPIC_FEATURE_VIRT_ACCESS (1 << 0)
#define VAPIC_FEATURE_VIRT_REG (1 << 1)
#define VAPIC_FEATURE_INTR_DELIVERY (1 << 2)
#define VAPIC_FEATURE_TPR_SHADOW (1 << 3)
#define VAPIC_FEATURE_POST_INTR (1 << 4)
#define VAPIC_FEATURE_VX2APIC_MODE (1 << 5)
struct cpu_capability {
bool tsc_adjust_supported;
bool ibrs_ibpb_supported;
bool stibp_supported;
bool apicv_supported;
uint8_t vapic_features;
bool monitor_supported;
};
static struct cpu_capability cpu_caps;
static void apicv_cap_detect(void);
static void vapic_cap_detect(void);
static void monitor_cap_detect(void);
static void cpu_set_logical_id(uint32_t logical_id);
static void print_hv_banner(void);
@ -309,7 +317,7 @@ void bsp_boot_init(void)
check_cpu_capability();
apicv_cap_detect();
vapic_cap_detect();
monitor_cap_detect();
@ -597,31 +605,58 @@ static bool is_ctrl_setting_allowed(uint64_t msr_val, uint32_t ctrl)
return ((((uint32_t)(msr_val >> 32)) & ctrl) == ctrl);
}
static void apicv_cap_detect(void)
static void vapic_cap_detect(void)
{
uint64_t val64;
uint32_t ctrl;
bool result;
uint8_t features;
uint64_t msr_val;
ctrl = VMX_PROCBASED_CTLS_TPR_SHADOW;
val64 = msr_read(MSR_IA32_VMX_PROCBASED_CTLS);
features = 0;
result = is_ctrl_setting_allowed(val64, ctrl);
if (result) {
ctrl = VMX_PROCBASED_CTLS2_VAPIC |
VMX_PROCBASED_CTLS2_VAPIC_REGS |
VMX_PROCBASED_CTLS2_VIRQ;
msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS);
if (!is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS_TPR_SHADOW)) {
cpu_caps.vapic_features = 0;
return;
}
features |= VAPIC_FEATURE_TPR_SHADOW;
val64 = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
result = is_ctrl_setting_allowed(val64, ctrl);
msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
if (!is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VAPIC)) {
cpu_caps.vapic_features = features;
return;
}
features |= VAPIC_FEATURE_VIRT_ACCESS;
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VAPIC_REGS))
features |= VAPIC_FEATURE_VIRT_REG;
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VX2APIC))
features |= VAPIC_FEATURE_VX2APIC_MODE;
if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VIRQ)) {
features |= VAPIC_FEATURE_INTR_DELIVERY;
msr_val = msr_read(MSR_IA32_VMX_PINBASED_CTLS);
if (is_ctrl_setting_allowed(msr_val,
VMX_PINBASED_CTLS_POST_IRQ))
features |= VAPIC_FEATURE_POST_INTR;
}
cpu_caps.apicv_supported = result;
cpu_caps.vapic_features = features;
}
bool is_apicv_enabled(void)
bool is_vapic_supported(void)
{
return cpu_caps.apicv_supported;
return ((cpu_caps.vapic_features & VAPIC_FEATURE_VIRT_ACCESS) != 0);
}
bool is_vapic_intr_delivery_supported(void)
{
return ((cpu_caps.vapic_features & VAPIC_FEATURE_INTR_DELIVERY) != 0);
}
bool is_vapic_virt_reg_supported(void)
{
return ((cpu_caps.vapic_features & VAPIC_FEATURE_VIRT_REG) != 0);
}
static void monitor_cap_detect(void)

View File

@ -2080,39 +2080,42 @@ int vlapic_create(struct vcpu *vcpu)
vlapic->vcpu = vcpu;
vlapic->apic_page = (struct lapic *) apic_page;
if (is_apicv_enabled()) {
vlapic->ops.apicv_set_intr_ready = apicv_set_intr_ready;
vlapic->ops.apicv_pending_intr = apicv_pending_intr;
vlapic->ops.apicv_set_tmr = apicv_set_tmr;
vlapic->ops.apicv_batch_set_tmr = apicv_batch_set_tmr;
if (is_vapic_supported()) {
if (is_vapic_intr_delivery_supported()) {
vlapic->ops.apicv_set_intr_ready =
apicv_set_intr_ready;
vlapic->ops.apicv_pending_intr =
apicv_pending_intr;
vlapic->ops.apicv_set_tmr = apicv_set_tmr;
vlapic->ops.apicv_batch_set_tmr =
apicv_batch_set_tmr;
vlapic->pir_desc = (struct pir_desc *)(&(vlapic->pir));
}
vlapic->pir_desc =
(struct pir_desc *)(&(vlapic->pir));
if (is_vcpu_bsp(vcpu)) {
ept_mmap(vcpu->vm,
apicv_get_apic_access_addr(vcpu->vm),
DEFAULT_APIC_BASE,
CPU_PAGE_SIZE,
MAP_MMIO,
MMU_MEM_ATTR_WRITE |
MMU_MEM_ATTR_READ |
MMU_MEM_ATTR_UNCACHED);
apicv_get_apic_access_addr(vcpu->vm),
DEFAULT_APIC_BASE, CPU_PAGE_SIZE, MAP_MMIO,
MMU_MEM_ATTR_WRITE | MMU_MEM_ATTR_READ |
MMU_MEM_ATTR_UNCACHED);
}
}
vcpu->arch_vcpu.vlapic = vlapic;
vlapic_init(vlapic);
if (!is_apicv_enabled()) {
return register_mmio_emulation_handler(vcpu->vm,
} else {
/*No APICv support*/
if (register_mmio_emulation_handler(vcpu->vm,
vlapic_mmio_access_handler,
(uint64_t)DEFAULT_APIC_BASE,
(uint64_t)DEFAULT_APIC_BASE +
CPU_PAGE_SIZE,
(void *) 0);
(void *) 0))
return -1;
}
vcpu->arch_vcpu.vlapic = vlapic;
vlapic_init(vlapic);
return 0;
}
@ -2131,7 +2134,7 @@ void vlapic_free(struct vcpu *vcpu)
if (vlapic->last_timer > 0)
cancel_timer(vlapic->last_timer, vcpu->pcpu_id);
if (!is_apicv_enabled()) {
if (!is_vapic_supported()) {
unregister_mmio_emulation_handler(vcpu->vm,
(uint64_t)DEFAULT_APIC_BASE,
(uint64_t)DEFAULT_APIC_BASE + CPU_PAGE_SIZE);
@ -2451,3 +2454,9 @@ int apicv_write_exit_handler(struct vcpu *vcpu)
return handled;
}
int apic_tpr_below_threshold_exit_handler(__unused struct vcpu *vcpu)
{
pr_err("Unhandled %s,", __func__);
return 0;
}

View File

@ -141,7 +141,7 @@ static int vcpu_do_pending_event(struct vcpu *vcpu)
int vector = 0;
int ret = 0;
if (is_apicv_enabled()) {
if (is_vapic_intr_delivery_supported()) {
apicv_inject_pir(vlapic);
return 0;
}

View File

@ -123,7 +123,7 @@ static const struct vm_exit_dispatch dispatch_table[] = {
[VMX_EXIT_REASON_ENTRY_FAILURE_MACHINE_CHECK] = {
.handler = unhandled_vmexit_handler},
[VMX_EXIT_REASON_TPR_BELOW_THRESHOLD] = {
.handler = unhandled_vmexit_handler},
.handler = apic_tpr_below_threshold_exit_handler},
[VMX_EXIT_REASON_APIC_ACCESS] = {
.handler = apic_access_exit_handler,
.need_exit_qualification = 1},

View File

@ -901,7 +901,7 @@ static void init_exec_ctrl(struct vcpu *vcpu)
value32 &= ~(VMX_PROCBASED_CTLS_CR3_LOAD |
VMX_PROCBASED_CTLS_CR3_STORE);
if (is_apicv_enabled()) {
if (is_vapic_supported()) {
value32 |= VMX_PROCBASED_CTLS_TPR_SHADOW;
} else {
/* Add CR8 VMExit for vlapic */
@ -922,17 +922,30 @@ static void init_exec_ctrl(struct vcpu *vcpu)
/* VMX_PROCBASED_CTLS2_RDTSCP | */
VMX_PROCBASED_CTLS2_UNRESTRICT);
if (is_apicv_enabled()) {
value32 |=
(VMX_PROCBASED_CTLS2_VAPIC |
VMX_PROCBASED_CTLS2_VAPIC_REGS |
VMX_PROCBASED_CTLS2_VIRQ);
if (is_vapic_supported()) {
value32 |= VMX_PROCBASED_CTLS2_VAPIC;
if (is_vapic_virt_reg_supported())
value32 |= VMX_PROCBASED_CTLS2_VAPIC_REGS;
if (is_vapic_intr_delivery_supported())
value32 |= VMX_PROCBASED_CTLS2_VIRQ;
else
/*
* This field exists only on processors that support
* the 1-setting of the "use TPR shadow"
* VM-execution control.
*
* Set up TPR threshold for virtual interrupt delivery
* - pg 2904 24.6.8
*/
exec_vmwrite(VMX_TPR_THRESHOLD, 0);
}
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS2, value32);
pr_dbg("VMX_PROC_VM_EXEC_CONTROLS2: 0x%x ", value32);
if (is_apicv_enabled()) {
if (is_vapic_supported()) {
/*APIC-v, config APIC-access address*/
value64 = apicv_get_apic_access_addr(vcpu->vm);
exec_vmwrite64(VMX_APIC_ACCESS_ADDR_FULL,
@ -943,10 +956,16 @@ static void init_exec_ctrl(struct vcpu *vcpu)
exec_vmwrite64(VMX_VIRTUAL_APIC_PAGE_ADDR_FULL,
value64);
exec_vmwrite64(VMX_EOI_EXIT0_FULL, -1UL);
exec_vmwrite64(VMX_EOI_EXIT1_FULL, -1UL);
exec_vmwrite64(VMX_EOI_EXIT2_FULL, -1UL);
exec_vmwrite64(VMX_EOI_EXIT3_FULL, -1UL);
if (is_vapic_intr_delivery_supported()) {
/* these fields are supported only on processors
* that support the 1-setting of the "virtual-interrupt
* delivery" VM-execution control
*/
exec_vmwrite64(VMX_EOI_EXIT0_FULL, -1UL);
exec_vmwrite64(VMX_EOI_EXIT1_FULL, -1UL);
exec_vmwrite64(VMX_EOI_EXIT2_FULL, -1UL);
exec_vmwrite64(VMX_EOI_EXIT3_FULL, -1UL);
}
}
/* Check for EPT support */
@ -989,11 +1008,6 @@ static void init_exec_ctrl(struct vcpu *vcpu)
*/
exec_vmwrite(VMX_CR3_TARGET_COUNT, 0);
/* Set up TPR threshold for virtual interrupt delivery * - pg 2904
* 24.6.8
*/
exec_vmwrite(VMX_TPR_THRESHOLD, 0);
/* Set up IO bitmap register A and B - pg 2902 24.6.4 */
value64 = (int64_t) vm->arch_vm.iobitmap[0];
exec_vmwrite64(VMX_IO_BITMAP_A_FULL, value64);

View File

@ -225,7 +225,9 @@ int hv_main(int cpu_id);
bool check_tsc_adjust_support(void);
bool check_ibrs_ibpb_support(void);
bool check_stibp_support(void);
bool is_apicv_enabled(void);
bool is_vapic_supported(void);
bool is_vapic_intr_delivery_supported(void);
bool is_vapic_virt_reg_supported(void);
/* Read control register */
#define CPU_CR_READ(cr, result_ptr) \

View File

@ -128,6 +128,7 @@ void apicv_inject_pir(struct vlapic *vlapic);
int apic_access_exit_handler(struct vcpu *vcpu);
int apicv_write_exit_handler(struct vcpu *vcpu);
int apicv_virtualized_eoi_exit_handler(struct vcpu *vcpu);
int apic_tpr_below_threshold_exit_handler(struct vcpu *vcpu);
void calcvdest(struct vm *vm, uint64_t *dmask, uint32_t dest, bool phys);
#endif /* _VLAPIC_H_ */