acrn-hypervisor/hypervisor/common/hv_main.c
Yifan Liu 5c9456462b hv && config-tool: Add compilation option to disable all interrupts in HV
This patch adds an option CONFIG_KEEP_IRQ_DISABLED to hv (default n) and
config-tool so that when this option is 'y', all interrupts in hv root
mode will be permanently disabled.

With this option to be 'y', all interrupts received in root mode will be
handled in external interrupt vmexit after next VM entry. The postpone
latency is negligible. This new configuration is a requirement from x86
TEE's secure/non-secure interrupt flow support. Many race conditions can be
avoided when keeping IRQ off.

v5:
Rename CONFIG_ACRN_KEEP_IRQ_DISABLED to CONFIG_KEEP_IRQ_DISABLED

v4:
Change CPU_IRQ_ENABLE/DISABLE to
CPU_IRQ_ENABLE_ON_CONFIG/DISABLE_ON_CONFIG and guard them using
CONFIG_ACRN_KEEP_IRQ_DISABLED

v3:
CONFIG_ACRN_DISABLE_INTERRUPT -> CONFIG_ACRN_KEEP_IRQ_DISABLED
Add more comment in commit message

Tracked-On: #6571
Signed-off-by: Yifan Liu <yifan1.liu@intel.com>
Reviewed-by: Wang, Yu1 <yu1.wang@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
2021-12-10 09:50:17 +08:00

115 lines
2.7 KiB
C

/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <asm/guest/vm.h>
#include <asm/guest/vm_reset.h>
#include <asm/guest/vmcs.h>
#include <asm/guest/vmexit.h>
#include <asm/guest/virq.h>
#include <schedule.h>
#include <profiling.h>
#include <sprintf.h>
#include <trace.h>
#include <logmsg.h>
void vcpu_thread(struct thread_object *obj)
{
struct acrn_vcpu *vcpu = container_of(obj, struct acrn_vcpu, thread_obj);
int32_t ret = 0;
do {
if (!is_lapic_pt_enabled(vcpu)) {
CPU_IRQ_DISABLE_ON_CONFIG();
}
/* Don't open interrupt window between here and vmentry */
if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
schedule();
}
/* Check and process pending requests(including interrupt) */
ret = acrn_handle_pending_request(vcpu);
if (ret < 0) {
pr_fatal("vcpu handling pending request fail");
get_vm_lock(vcpu->vm);
zombie_vcpu(vcpu, VCPU_ZOMBIE);
put_vm_lock(vcpu->vm);
/* Fatal error happened (triple fault). Stop the vcpu running. */
continue;
}
reset_event(&vcpu->events[VCPU_EVENT_VIRTUAL_INTERRUPT]);
profiling_vmenter_handler(vcpu);
TRACE_2L(TRACE_VM_ENTER, 0UL, 0UL);
ret = run_vcpu(vcpu);
if (ret != 0) {
pr_fatal("vcpu resume failed");
get_vm_lock(vcpu->vm);
zombie_vcpu(vcpu, VCPU_ZOMBIE);
put_vm_lock(vcpu->vm);
/* Fatal error happened (resume vcpu failed). Stop the vcpu running. */
continue;
}
TRACE_2L(TRACE_VM_EXIT, vcpu->arch.exit_reason, vcpu_get_rip(vcpu));
profiling_pre_vmexit_handler(vcpu);
if (!is_lapic_pt_enabled(vcpu)) {
CPU_IRQ_ENABLE_ON_CONFIG();
}
/* Dispatch handler */
ret = vmexit_handler(vcpu);
if (ret < 0) {
pr_fatal("dispatch VM exit handler failed for reason"
" %d, ret = %d!", vcpu->arch.exit_reason, ret);
vcpu_inject_gp(vcpu, 0U);
continue;
}
profiling_post_vmexit_handler(vcpu);
} while (1);
}
void default_idle(__unused struct thread_object *obj)
{
uint16_t pcpu_id = get_pcpu_id();
while (1) {
if (need_reschedule(pcpu_id)) {
schedule();
} else if (need_offline(pcpu_id)) {
cpu_dead();
} else if (need_shutdown_vm(pcpu_id)) {
shutdown_vm_from_idle(pcpu_id);
} else {
CPU_IRQ_ENABLE_ON_CONFIG();
cpu_do_idle();
CPU_IRQ_DISABLE_ON_CONFIG();
}
}
}
void run_idle_thread(void)
{
uint16_t pcpu_id = get_pcpu_id();
struct thread_object *idle = &per_cpu(idle, pcpu_id);
char idle_name[16];
snprintf(idle_name, 16U, "idle%hu", pcpu_id);
(void)strncpy_s(idle->name, 16U, idle_name, 16U);
idle->pcpu_id = pcpu_id;
idle->thread_entry = default_idle;
idle->switch_out = NULL;
idle->switch_in = NULL;
idle->priority = PRIO_IDLE;
run_thread(idle);
/* Control should not come here */
cpu_dead();
}