diff --git a/hypervisor/Makefile b/hypervisor/Makefile index d860a9a1d..8614c307a 100644 --- a/hypervisor/Makefile +++ b/hypervisor/Makefile @@ -236,6 +236,7 @@ HW_C_SRCS += common/schedule.c HW_C_SRCS += common/event.c HW_C_SRCS += common/efi_mmap.c HW_C_SRCS += common/sbuf.c +HW_C_SRCS += common/vm_event.c ifeq ($(CONFIG_SCHED_NOOP),y) HW_C_SRCS += common/sched_noop.c endif diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index 07da81b75..1ab7eb312 100644 --- a/hypervisor/arch/x86/guest/vm.c +++ b/hypervisor/arch/x86/guest/vm.c @@ -787,6 +787,8 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v passthrough_smbios(vm, get_acrn_boot_info()); #endif + vm->sw.vm_event_sbuf = NULL; + status = init_vpci(vm); if (status == 0) { enable_iommu(); diff --git a/hypervisor/common/sbuf.c b/hypervisor/common/sbuf.c index 9c7ef0beb..c5d014090 100644 --- a/hypervisor/common/sbuf.c +++ b/hypervisor/common/sbuf.c @@ -14,6 +14,7 @@ #include #include #include +#include uint32_t sbuf_next_ptr(uint32_t pos_arg, uint32_t span, uint32_t scope) @@ -93,6 +94,9 @@ int32_t sbuf_setup_common(struct acrn_vm *vm, uint16_t cpu_id, uint32_t sbuf_id, case ACRN_ASYNCIO: ret = init_asyncio(vm, hva); break; + case ACRN_VM_EVENT: + ret = init_vm_event(vm, hva); + break; default: pr_err("%s not support sbuf_id %d", __func__, sbuf_id); ret = -1; diff --git a/hypervisor/common/vm_event.c b/hypervisor/common/vm_event.c new file mode 100644 index 000000000..7c9316fdd --- /dev/null +++ b/hypervisor/common/vm_event.c @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2020-2023 Intel Corporation. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include + +int32_t init_vm_event(struct acrn_vm *vm, uint64_t *hva) +{ + struct shared_buf *sbuf = (struct shared_buf *)hva; + int ret = -1; + + stac(); + if (sbuf != NULL) { + if (sbuf->magic == SBUF_MAGIC) { + vm->sw.vm_event_sbuf = sbuf; + spinlock_init(&vm->vm_event_lock); + ret = 0; + } + } + clac(); + + return ret; +} + +int32_t send_vm_event(struct acrn_vm *vm, struct vm_event *event) +{ + struct shared_buf *sbuf = (struct shared_buf *)vm->sw.vm_event_sbuf; + int32_t ret = -ENODEV; + uint32_t size_sent; + + if (sbuf != NULL) { + spinlock_obtain(&vm->vm_event_lock); + size_sent = sbuf_put(sbuf, (uint8_t *)event); + spinlock_release(&vm->vm_event_lock); + if (size_sent == sizeof(struct vm_event)) { + arch_fire_hsm_interrupt(); + ret = 0; + } + } + return ret; +} diff --git a/hypervisor/include/arch/x86/asm/guest/vm.h b/hypervisor/include/arch/x86/asm/guest/vm.h index d77f8e3c2..0503ed8b4 100644 --- a/hypervisor/include/arch/x86/asm/guest/vm.h +++ b/hypervisor/include/arch/x86/asm/guest/vm.h @@ -71,6 +71,7 @@ struct vm_sw_info { /* HVA to IO shared page */ void *io_shared_page; void *asyncio_sbuf; + void *vm_event_sbuf; /* If enable IO completion polling mode */ bool is_polling_ioreq; }; @@ -146,6 +147,7 @@ struct acrn_vm { struct asyncio_desc aio_desc[ACRN_ASYNCIO_MAX]; struct list_head aiodesc_queue; spinlock_t asyncio_lock; /* Spin-lock used to protect asyncio add/remove for a VM */ + spinlock_t vm_event_lock; enum vpic_wire_mode wire_mode; struct iommu_domain *iommu; /* iommu domain of this VM */ diff --git a/hypervisor/include/common/vm_event.h b/hypervisor/include/common/vm_event.h new file mode 100644 index 000000000..e39aaa76d --- /dev/null +++ b/hypervisor/include/common/vm_event.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2019-2023 Intel Corporation. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef VM_EVENT_H +#define VM_EVENT_H + +#include +#include + +int32_t init_vm_event(struct acrn_vm *vm, uint64_t *hva); +int32_t send_vm_event(struct acrn_vm *vm, struct vm_event *event); + +#endif /* VM_EVENT_H */ diff --git a/hypervisor/include/public/acrn_common.h b/hypervisor/include/public/acrn_common.h index 7c769f28c..3f4741428 100644 --- a/hypervisor/include/public/acrn_common.h +++ b/hypervisor/include/public/acrn_common.h @@ -776,6 +776,7 @@ enum { /* The sbuf with above ids are created each pcpu */ ACRN_SBUF_PER_PCPU_ID_MAX, ACRN_ASYNCIO = 64, + ACRN_VM_EVENT, }; /* Make sure sizeof(struct shared_buf) == SBUF_HEAD_SIZE */ @@ -792,6 +793,53 @@ struct shared_buf { uint32_t padding[6]; }; +/** + * VM event architecture: + * +------------------------------------------------------+ + * | Service VM | + * | +----------------------------+ | + * | | DM +--------------------+ | | + * | | | [ event source ] | | | + * | | +-+------------+-----+ | | + * | | | (eventfd) |(sbuf) | | + * | | v v | | + * | | +------------------------+ | (socket) +---------+ | + * | | | [event deliver logic] |-+---------->| Libvirt | | + * | | +------------------------+ | +---------+ | + * | | ^ ^ | | + * | +-------|------------|-------+ | + * | | (eventfd) | (sbuf) | + * +---------|------------|-------------------------------+ + * +---------|------------|-------------------------------+ + * | kernel [HSM] | | + * | ^ | | + * +---------|------------|-------------------------------+ + * |upcall | + * +---------+------------+-------------------------------+ + * | HV [ event source ] | + * +------------------------------------------------------+ + * + * For event sources in HV + * - HV puts the event in the shared ring sbuf. + * - The hypervisor notifies the service VM via upcall. + * - HSM in the service VM notifies device model via eventfd. + * - The device model fetches and handles events from the shared ring sbuf. + * For event sources in DM + * - DM puts the event in the DM event ring sbuf. + * - DM notifies event delivery logic via eventfd. + * - The event delivery logic fetches and handles events from the DM event ring sbuf. + */ +#define VM_EVENT_RTC_CHG 0U +#define VM_EVENT_POWEROFF 1U +#define VM_EVENT_TRIPLE_FAULT 2U + +#define VM_EVENT_DATA_LEN 28U + +struct vm_event { + uint32_t type; + uint8_t event_data[VM_EVENT_DATA_LEN]; +}; + /** * @} */