acrn-hypervisor/hypervisor/common/hv_main.c
Sainath Grandhi 536c69b9ff hv: distinguish between LAPIC_PASSTHROUGH configured vs enabled
ACRN supports LAPIC emulation for guests using x86 APICv. When guest OS/BIOS
switches from xAPIC to x2APIC mode of operation, ACRN also supports switching
froom LAPIC emulation to LAPIC passthrough to guest. User/developer needs to
configure GUEST_FLAG_LAPIC_PASSTHROUGH for guest_flags in the corresponding
VM's config for ACRN to enable LAPIC passthrough.

This patch does the following

1)Fixes a bug in the abovementioned feature. For a guest that is
configured with GUEST_FLAG_LAPIC_PASSTHROUGH, during the time period guest is
using xAPIC mode of LAPIC,  virtual interrupts are not delivered. This can be
manifested as guest hang when it does not receive virtual timer interrupts.

2)ACRN exposes physical topology via CPUID leaf 0xb to LAPIC PT VMs. This patch
removes that condition and exposes virtual topology via CPUID leaf 0xb.

Tracked-On: #3136
Signed-off-by: Sainath Grandhi <sainath.grandhi@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
2019-05-23 11:15:31 +08:00

101 lines
2.1 KiB
C

/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <vm.h>
#include <vm_reset.h>
#include <vmcs.h>
#include <vmexit.h>
#include <irq.h>
#include <schedule.h>
#include <softirq.h>
#include <profiling.h>
#include <trace.h>
#include <logmsg.h>
void vcpu_thread(struct sched_object *obj)
{
struct acrn_vcpu *vcpu = list_entry(obj, struct acrn_vcpu, sched_obj);
uint32_t basic_exit_reason = 0U;
int32_t ret = 0;
do {
/* If vcpu is not launched, we need to do init_vmcs first */
if (!vcpu->launched) {
init_vmcs(vcpu);
}
if (!is_lapic_pt_enabled(vcpu->vm)) {
/* handle pending softirq when irq enable*/
do_softirq();
CPU_IRQ_DISABLE();
/* handle risk softirq when disabling irq*/
do_softirq();
}
/* Check and process pending requests(including interrupt) */
ret = acrn_handle_pending_request(vcpu);
if (ret < 0) {
pr_fatal("vcpu handling pending request fail");
pause_vcpu(vcpu, VCPU_ZOMBIE);
continue;
}
if (need_reschedule(vcpu->pcpu_id)) {
schedule();
continue;
}
profiling_vmenter_handler(vcpu);
TRACE_2L(TRACE_VM_ENTER, 0UL, 0UL);
ret = run_vcpu(vcpu);
if (ret != 0) {
pr_fatal("vcpu resume failed");
pause_vcpu(vcpu, VCPU_ZOMBIE);
continue;
}
basic_exit_reason = vcpu->arch.exit_reason & 0xFFFFU;
TRACE_2L(TRACE_VM_EXIT, basic_exit_reason, vcpu_get_rip(vcpu));
vcpu->arch.nrexits++;
profiling_pre_vmexit_handler(vcpu);
if (!is_lapic_pt_enabled(vcpu->vm)) {
CPU_IRQ_ENABLE();
}
/* Dispatch handler */
ret = vmexit_handler(vcpu);
if (ret < 0) {
pr_fatal("dispatch VM exit handler failed for reason"
" %d, ret = %d!", basic_exit_reason, ret);
vcpu_inject_gp(vcpu, 0U);
continue;
}
profiling_post_vmexit_handler(vcpu);
} while (1);
}
void default_idle(__unused struct sched_object *obj)
{
uint16_t pcpu_id = get_pcpu_id();
while (1) {
if (need_reschedule(pcpu_id)) {
schedule();
} else if (need_offline(pcpu_id) != 0) {
cpu_dead();
} else if (need_shutdown_vm(pcpu_id)) {
shutdown_vm_from_idle(pcpu_id);
} else {
CPU_IRQ_ENABLE();
cpu_do_idle();
CPU_IRQ_DISABLE();
}
}
}