mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-11-16 13:55:04 +00:00
Add pre_user_access() and post_user_access() interfaces to protect mmio from hypervisor accidental access, SMAP extensiton need to be detected Tracked-On: #8831 Signed-off-by: hangliu1 <hang1.liu@intel.com> Reviewed-by: Liu, Yifan1 <yifan1.liu@intel.com> Acked-by: Wang, Yu1 <yu1.wang@intel.com>
49 lines
1017 B
C
49 lines
1017 B
C
/*
|
|
* Copyright (C) 2020-2023 Intel Corporation.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <errno.h>
|
|
#include <util.h>
|
|
#include <acrn_hv_defs.h>
|
|
#include <asm/guest/vm.h>
|
|
#include <vm_event.h>
|
|
#include <sbuf.h>
|
|
|
|
int32_t init_vm_event(struct acrn_vm *vm, uint64_t *hva)
|
|
{
|
|
struct shared_buf *sbuf = (struct shared_buf *)hva;
|
|
int ret = -1;
|
|
|
|
pre_user_access();
|
|
if (sbuf != NULL) {
|
|
if (sbuf->magic == SBUF_MAGIC) {
|
|
vm->sw.vm_event_sbuf = sbuf;
|
|
spinlock_init(&vm->vm_event_lock);
|
|
ret = 0;
|
|
}
|
|
}
|
|
post_user_access();
|
|
|
|
return ret;
|
|
}
|
|
|
|
int32_t send_vm_event(struct acrn_vm *vm, struct vm_event *event)
|
|
{
|
|
struct shared_buf *sbuf = (struct shared_buf *)vm->sw.vm_event_sbuf;
|
|
int32_t ret = -ENODEV;
|
|
uint32_t size_sent;
|
|
|
|
if (sbuf != NULL) {
|
|
spinlock_obtain(&vm->vm_event_lock);
|
|
size_sent = sbuf_put(sbuf, (uint8_t *)event, sizeof(*event));
|
|
spinlock_release(&vm->vm_event_lock);
|
|
if (size_sent == sizeof(struct vm_event)) {
|
|
arch_fire_hsm_interrupt();
|
|
ret = 0;
|
|
}
|
|
}
|
|
return ret;
|
|
}
|