acrn-hypervisor/hypervisor/arch/x86/notify.c
dongshen 14fa9c563c hv: define posted interrupt IRQs/vectors
This is a preparation patch for adding support for VT-d PI
related vCPU scheduling.

ACRN does not support vCPU migration, one vCPU always runs on
the same pCPU, so PI's ndst is never changed after startup.

VCPUs of a VM won’t share same pCPU. So the maximum possible number
of VCPUs that can run on a pCPU is CONFIG_MAX_VM_NUM.

Allocate unique Activation Notification Vectors (ANV) for each vCPU
that belongs to the same pCPU, the ANVs need only be unique within each
pCPU, not across all vCPUs. This reduces # of pre-allocated ANVs for
posted interrupts to CONFIG_MAX_VM_NUM, and enables ACRN to avoid
switching between active and wake-up vector values in the posted
interrupt descriptor on vCPU scheduling state changes.

A total of CONFIG_MAX_VM_NUM consecutive IRQs/vectors are reserved
for posted interrupts use.

The code first initializes vcpu->arch.pid.control.bits.nv dynamically
(will be added in subsequent patch), the other code shall use
vcpu->arch.pid.control.bits.nv instead of the hard-coded notification vectors.

Rename some functions:
  apicv_post_intr --> apicv_trigger_pi_anv
  posted_intr_notification --> handle_pi_notification
  setup_posted_intr_notification --> setup_pi_notification

Tracked-On: #4506
Signed-off-by: dongshen <dongsheng.x.zhang@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@Intel.com>
2020-04-15 13:47:22 +08:00

139 lines
3.5 KiB
C

/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <types.h>
#include <errno.h>
#include <bits.h>
#include <atomic.h>
#include <irq.h>
#include <cpu.h>
#include <per_cpu.h>
#include <lapic.h>
#include <vm.h>
static uint32_t notification_irq = IRQ_INVALID;
static uint64_t smp_call_mask = 0UL;
/* run in interrupt context */
static void kick_notification(__unused uint32_t irq, __unused void *data)
{
/* Notification vector is used to kick taget cpu out of non-root mode.
* And it also serves for smp call.
*/
uint16_t pcpu_id = get_pcpu_id();
if (bitmap_test(pcpu_id, &smp_call_mask)) {
struct smp_call_info_data *smp_call =
&per_cpu(smp_call_info, pcpu_id);
if (smp_call->func != NULL) {
smp_call->func(smp_call->data);
}
bitmap_clear_lock(pcpu_id, &smp_call_mask);
}
}
void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
{
uint16_t pcpu_id;
struct smp_call_info_data *smp_call;
/* wait for previous smp call complete, which may run on other cpus */
while (atomic_cmpxchg64(&smp_call_mask, 0UL, mask) != 0UL);
pcpu_id = ffs64(mask);
while (pcpu_id < MAX_PCPU_NUM) {
bitmap_clear_nolock(pcpu_id, &mask);
if (is_pcpu_active(pcpu_id)) {
smp_call = &per_cpu(smp_call_info, pcpu_id);
smp_call->func = func;
smp_call->data = data;
} else {
/* pcpu is not in active, print error */
pr_err("pcpu_id %d not in active!", pcpu_id);
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
}
pcpu_id = ffs64(mask);
}
send_dest_ipi_mask((uint32_t)smp_call_mask, NOTIFY_VCPU_VECTOR);
/* wait for current smp call complete */
wait_sync_change(&smp_call_mask, 0UL);
}
static int32_t request_notification_irq(irq_action_t func, void *data)
{
int32_t retval;
if (notification_irq != IRQ_INVALID) {
pr_info("%s, Notification vector already allocated on this CPU", __func__);
retval = -EBUSY;
} else {
/* all cpu register the same notification vector */
retval = request_irq(NOTIFY_VCPU_IRQ, func, data, IRQF_NONE);
if (retval < 0) {
pr_err("Failed to add notify isr");
retval = -ENODEV;
} else {
notification_irq = (uint32_t)retval;
}
}
return retval;
}
/*
* @pre be called only by BSP initialization process
*/
void setup_notification(void)
{
/* support IPI notification, SOS_VM will register all CPU */
if (request_notification_irq(kick_notification, NULL) < 0) {
pr_err("Failed to setup notification");
}
dev_dbg(DBG_LEVEL_PTIRQ, "NOTIFY: irq[%d] setup vector %x",
notification_irq, irq_to_vector(notification_irq));
}
static void handle_pi_notification(__unused uint32_t irq, __unused void *data)
{
}
/*pre-conditon: be called only by BSP initialization proccess*/
void setup_pi_notification(void)
{
uint32_t i;
for (i = 0U; i < CONFIG_MAX_VM_NUM; i++) {
if (request_irq(POSTED_INTR_IRQ + i, handle_pi_notification, NULL, IRQF_NONE) < 0) {
pr_err("Failed to setup pi notification");
break;
}
}
}
/**
* @brief Check if the NMI is for notification purpose
*
* @return true, if the NMI is triggered for notifying vCPU
* @return false, if the NMI is triggered for other purpose
*/
bool is_notification_nmi(const struct acrn_vm *vm)
{
bool ret;
/*
* Currently, ACRN doesn't support vNMI well and there is no well-designed
* way to check if the NMI is for notification or not. Here we take all the
* NMIs as notification NMI for lapic-pt VMs temporarily.
*
* TODO: Add a way to check the NMI is for notification or not in order to support vNMI.
*/
ret = is_lapic_pt_configured(vm);
return ret;
}