hv: dm: Use new I/O request data structures

struct vhm_request		->	struct acrn_io_request
union vhm_request_buffer	->	struct acrn_io_request_buffer
struct pio_request		->	struct acrn_pio_request
struct mmio_request		->	struct acrn_mmio_request
struct ioreq_notify		->	struct acrn_ioreq_notify

VHM_REQ_PIO_INVAL		->	IOREQ_PIO_INVAL
VHM_REQ_MMIO_INVAL		->	IOREQ_MMIO_INVAL
REQ_PORTIO			->	ACRN_IOREQ_TYPE_PORTIO
REQ_MMIO			->	ACRN_IOREQ_TYPE_MMIO
REQ_PCICFG			->	ACRN_IOREQ_TYPE_PCICFG
REQ_WP				->	ACRN_IOREQ_TYPE_WP

REQUEST_READ			->	ACRN_IOREQ_DIR_READ
REQUEST_WRITE			->	ACRN_IOREQ_DIR_WRITE
REQ_STATE_PROCESSING		->	ACRN_IOREQ_STATE_PROCESSING
REQ_STATE_PENDING		->	ACRN_IOREQ_STATE_PENDING
REQ_STATE_COMPLETE		->	ACRN_IOREQ_STATE_COMPLETE
REQ_STATE_FREE			->	ACRN_IOREQ_STATE_FREE

IC_CREATE_IOREQ_CLIENT		->	ACRN_IOCTL_CREATE_IOREQ_CLIENT
IC_DESTROY_IOREQ_CLIENT		->	ACRN_IOCTL_DESTROY_IOREQ_CLIENT
IC_ATTACH_IOREQ_CLIENT		->	ACRN_IOCTL_ATTACH_IOREQ_CLIENT
IC_NOTIFY_REQUEST_FINISH	->	ACRN_IOCTL_NOTIFY_REQUEST_FINISH
IC_CLEAR_VM_IOREQ		->	ACRN_IOCTL_CLEAR_VM_IOREQ
HYPERVISOR_CALLBACK_VHM_VECTOR	->	HYPERVISOR_CALLBACK_HSM_VECTOR

arch_fire_vhm_interrupt()	->	arch_fire_hsm_interrupt()
get_vhm_notification_vector()	->	get_hsm_notification_vector()
set_vhm_notification_vector()	->	set_hsm_notification_vector()
acrn_vhm_notification_vector	->	acrn_hsm_notification_vector
get_vhm_req_state()		->	get_io_req_state()
set_vhm_req_state()		->	set_io_req_state()

Below structures have slight difference with former ones.

  struct acrn_ioreq_notify
  strcut acrn_io_request

Tracked-On: #6282
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
This commit is contained in:
Shuo A Liu
2021-07-07 15:38:07 +08:00
committed by wenlingz
parent 3c66ba7ef5
commit 9c910bae44
30 changed files with 307 additions and 304 deletions

View File

@@ -87,7 +87,7 @@ register_default_iohandler(int start, int size)
}
int
emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *pio_request)
emulate_inout(struct vmctx *ctx, int *pvcpu, struct acrn_pio_request *pio_request)
{
int bytes, flags, in, port;
inout_func_t handler;
@@ -95,7 +95,7 @@ emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *pio_request)
int retval;
bytes = pio_request->size;
in = (pio_request->direction == REQUEST_READ);
in = (pio_request->direction == ACRN_IOREQ_DIR_READ);
port = pio_request->address;
if ((port + bytes - 1 >= MAX_IOPORTS) ||
@@ -106,7 +106,7 @@ emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *pio_request)
flags = inout_handlers[port].flags;
arg = inout_handlers[port].arg;
if (pio_request->direction == REQUEST_READ) {
if (pio_request->direction == ACRN_IOREQ_DIR_READ) {
if (!(flags & IOPORT_F_IN))
return -1;
} else {

View File

@@ -73,11 +73,11 @@
#define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */
/* Values returned for reads on invalid I/O requests. */
#define VHM_REQ_PIO_INVAL (~0U)
#define VHM_REQ_MMIO_INVAL (~0UL)
#define IOREQ_PIO_INVAL (~0U)
#define IOREQ_MMIO_INVAL (~0UL)
typedef void (*vmexit_handler_t)(struct vmctx *,
struct vhm_request *, int *vcpu);
struct acrn_io_request *, int *vcpu);
char *vmname;
@@ -114,10 +114,10 @@ static cpuset_t cpumask;
static void vm_loop(struct vmctx *ctx);
static char vhm_request_page[4096] __aligned(4096);
static char io_request_page[4096] __aligned(4096);
static struct vhm_request *vhm_req_buf =
(struct vhm_request *)&vhm_request_page;
static struct acrn_io_request *ioreq_buf =
(struct acrn_io_request *)&io_request_page;
struct dmstats {
uint64_t vmexit_bogus;
@@ -302,16 +302,16 @@ notify_vmloop_thread(void)
#endif
static void
vmexit_inout(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
vmexit_inout(struct vmctx *ctx, struct acrn_io_request *io_req, int *pvcpu)
{
int error;
int bytes, port, in;
port = vhm_req->reqs.pio.address;
bytes = vhm_req->reqs.pio.size;
in = (vhm_req->reqs.pio.direction == REQUEST_READ);
port = io_req->reqs.pio_request.address;
bytes = io_req->reqs.pio_request.size;
in = (io_req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ);
error = emulate_inout(ctx, pvcpu, &vhm_req->reqs.pio);
error = emulate_inout(ctx, pvcpu, &io_req->reqs.pio_request);
if (error) {
pr_err("Unhandled %s%c 0x%04x\n",
in ? "in" : "out",
@@ -319,56 +319,56 @@ vmexit_inout(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
port);
if (in) {
vhm_req->reqs.pio.value = VHM_REQ_PIO_INVAL;
io_req->reqs.pio_request.value = IOREQ_PIO_INVAL;
}
}
}
static void
vmexit_mmio_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
vmexit_mmio_emul(struct vmctx *ctx, struct acrn_io_request *io_req, int *pvcpu)
{
int err;
stats.vmexit_mmio_emul++;
err = emulate_mem(ctx, &vhm_req->reqs.mmio);
err = emulate_mem(ctx, &io_req->reqs.mmio_request);
if (err) {
if (err == -ESRCH)
pr_err("Unhandled memory access to 0x%lx\n",
vhm_req->reqs.mmio.address);
io_req->reqs.mmio_request.address);
pr_err("Failed to emulate instruction [");
pr_err("mmio address 0x%lx, size %ld",
vhm_req->reqs.mmio.address,
vhm_req->reqs.mmio.size);
io_req->reqs.mmio_request.address,
io_req->reqs.mmio_request.size);
if (vhm_req->reqs.mmio.direction == REQUEST_READ) {
vhm_req->reqs.mmio.value = VHM_REQ_MMIO_INVAL;
if (io_req->reqs.mmio_request.direction == ACRN_IOREQ_DIR_READ) {
io_req->reqs.mmio_request.value = IOREQ_MMIO_INVAL;
}
}
}
static void
vmexit_pci_emul(struct vmctx *ctx, struct vhm_request *vhm_req, int *pvcpu)
vmexit_pci_emul(struct vmctx *ctx, struct acrn_io_request *io_req, int *pvcpu)
{
int err, in = (vhm_req->reqs.pci.direction == REQUEST_READ);
int err, in = (io_req->reqs.pci_request.direction == ACRN_IOREQ_DIR_READ);
err = emulate_pci_cfgrw(ctx, *pvcpu, in,
vhm_req->reqs.pci.bus,
vhm_req->reqs.pci.dev,
vhm_req->reqs.pci.func,
vhm_req->reqs.pci.reg,
vhm_req->reqs.pci.size,
&vhm_req->reqs.pci.value);
io_req->reqs.pci_request.bus,
io_req->reqs.pci_request.dev,
io_req->reqs.pci_request.func,
io_req->reqs.pci_request.reg,
io_req->reqs.pci_request.size,
&io_req->reqs.pci_request.value);
if (err) {
pr_err("Unhandled pci cfg rw at %x:%x.%x reg 0x%x\n",
vhm_req->reqs.pci.bus,
vhm_req->reqs.pci.dev,
vhm_req->reqs.pci.func,
vhm_req->reqs.pci.reg);
io_req->reqs.pci_request.bus,
io_req->reqs.pci_request.dev,
io_req->reqs.pci_request.func,
io_req->reqs.pci_request.reg);
if (in) {
vhm_req->reqs.pio.value = VHM_REQ_PIO_INVAL;
io_req->reqs.pio_request.value = IOREQ_PIO_INVAL;
}
}
}
@@ -397,24 +397,24 @@ static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
};
static void
handle_vmexit(struct vmctx *ctx, struct vhm_request *vhm_req, int vcpu)
handle_vmexit(struct vmctx *ctx, struct acrn_io_request *io_req, int vcpu)
{
enum vm_exitcode exitcode;
exitcode = vhm_req->type;
exitcode = io_req->type;
if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
pr_err("handle vmexit: unexpected exitcode 0x%x\n",
exitcode);
exit(1);
}
(*handler[exitcode])(ctx, vhm_req, &vcpu);
(*handler[exitcode])(ctx, io_req, &vcpu);
/* We cannot notify the VHM/hypervisor on the request completion at this
/* We cannot notify the HSM/hypervisor on the request completion at this
* point if the UOS is in suspend or system reset mode, as the VM is
* still not paused and a notification can kick off the vcpu to run
* again. Postpone the notification till vm_system_reset() or
* vm_suspend_resume() for resetting the ioreq states in the VHM and
* vm_suspend_resume() for resetting the ioreq states in the HSM and
* hypervisor.
*/
if ((VM_SUSPEND_SYSTEM_RESET == vm_get_suspend_mode()) ||
@@ -633,10 +633,10 @@ vm_system_reset(struct vmctx *ctx)
* request which is the APIC PM CR write. VM reset will reset it
*
* When handling emergency mode triggered by one vcpu without
* offlining any other vcpus, there can be multiple VHM requests
* offlining any other vcpus, there can be multiple IO requests
* with various states. We should be careful on potential races
* when resetting especially in SMP SOS. vm_clear_ioreq can be used
* to clear all ioreq status in VHM after VM pause, then let VM
* to clear all ioreq status in HSM after VM pause, then let VM
* reset in hypervisor reset all ioreqs.
*/
vm_clear_ioreq(ctx);
@@ -698,17 +698,17 @@ vm_loop(struct vmctx *ctx)
while (1) {
int vcpu_id;
struct vhm_request *vhm_req;
struct acrn_io_request *io_req;
error = vm_attach_ioreq_client(ctx);
if (error)
break;
for (vcpu_id = 0; vcpu_id < guest_ncpus; vcpu_id++) {
vhm_req = &vhm_req_buf[vcpu_id];
if ((atomic_load(&vhm_req->processed) == REQ_STATE_PROCESSING)
&& (vhm_req->client == ctx->ioreq_client))
handle_vmexit(ctx, vhm_req, vcpu_id);
io_req = &ioreq_buf[vcpu_id];
if ((atomic_load(&io_req->processed) == ACRN_IOREQ_STATE_PROCESSING)
&& !io_req->kernel_handled)
handle_vmexit(ctx, io_req, vcpu_id);
}
if (VM_SUSPEND_FULL_RESET == vm_get_suspend_mode() ||
@@ -1022,7 +1022,7 @@ main(int argc, char *argv[])
for (;;) {
pr_notice("vm_create: %s\n", vmname);
ctx = vm_create(vmname, (unsigned long)vhm_req_buf, &guest_ncpus);
ctx = vm_create(vmname, (unsigned long)ioreq_buf, &guest_ncpus);
if (!ctx) {
pr_err("vm_create failed");
goto create_fail;

View File

@@ -150,7 +150,7 @@ mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
}
int
emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
emulate_mem(struct vmctx *ctx, struct acrn_mmio_request *mmio_req)
{
uint64_t paddr = mmio_req->address;
int size = mmio_req->size;
@@ -179,7 +179,7 @@ emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req)
if (entry == NULL)
return -EINVAL;
if (mmio_req->direction == REQUEST_READ)
if (mmio_req->direction == ACRN_IOREQ_DIR_READ)
err = mem_read(ctx, 0, paddr, (uint64_t *)&mmio_req->value,
size, &entry->mr_param);
else

View File

@@ -259,13 +259,13 @@ err:
int
vm_create_ioreq_client(struct vmctx *ctx)
{
return ioctl(ctx->fd, IC_CREATE_IOREQ_CLIENT, 0);
return ioctl(ctx->fd, ACRN_IOCTL_CREATE_IOREQ_CLIENT, 0);
}
int
vm_destroy_ioreq_client(struct vmctx *ctx)
{
return ioctl(ctx->fd, IC_DESTROY_IOREQ_CLIENT, ctx->ioreq_client);
return ioctl(ctx->fd, ACRN_IOCTL_DESTROY_IOREQ_CLIENT, ctx->ioreq_client);
}
int
@@ -273,7 +273,7 @@ vm_attach_ioreq_client(struct vmctx *ctx)
{
int error;
error = ioctl(ctx->fd, IC_ATTACH_IOREQ_CLIENT, ctx->ioreq_client);
error = ioctl(ctx->fd, ACRN_IOCTL_ATTACH_IOREQ_CLIENT, ctx->ioreq_client);
if (error) {
pr_err("attach ioreq client return %d "
@@ -289,13 +289,13 @@ int
vm_notify_request_done(struct vmctx *ctx, int vcpu)
{
int error;
struct ioreq_notify notify;
struct acrn_ioreq_notify notify;
bzero(&notify, sizeof(notify));
notify.client_id = ctx->ioreq_client;
notify.vmid = ctx->vmid;
notify.vcpu = vcpu;
error = ioctl(ctx->fd, IC_NOTIFY_REQUEST_FINISH, &notify);
error = ioctl(ctx->fd, ACRN_IOCTL_NOTIFY_REQUEST_FINISH, &notify);
if (error) {
pr_err("failed: notify request finish\n");
@@ -488,7 +488,7 @@ vm_reset(struct vmctx *ctx)
void
vm_clear_ioreq(struct vmctx *ctx)
{
ioctl(ctx->fd, IC_CLEAR_VM_IOREQ, NULL);
ioctl(ctx->fd, ACRN_IOCTL_CLEAR_VM_IOREQ, NULL);
}
static enum vm_suspend_how suspend_mode = VM_SUSPEND_NONE;

View File

@@ -32,7 +32,6 @@
#include "types.h"
#include "acrn_common.h"
struct vmctx;
struct vhm_request;
/*
* inout emulation handlers return 0 on success and -1 on failure.
@@ -71,7 +70,7 @@ struct inout_port {
DATA_SET(inout_port_set, __CONCAT(__inout_port, __LINE__))
void init_inout(void);
int emulate_inout(struct vmctx *ctx, int *pvcpu, struct pio_request *req);
int emulate_inout(struct vmctx *ctx, int *pvcpu, struct acrn_pio_request *req);
int register_inout(struct inout_port *iop);
int unregister_inout(struct inout_port *iop);

View File

@@ -50,7 +50,7 @@ struct mem_range {
#define MEM_F_RW (MEM_F_READ | MEM_F_WRITE)
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
int emulate_mem(struct vmctx *ctx, struct mmio_request *mmio_req);
int emulate_mem(struct vmctx *ctx, struct acrn_mmio_request *mmio_req);
int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp);
int unregister_mem(struct mem_range *memp);

View File

@@ -94,13 +94,16 @@
_IOW(ACRN_IOCTL_TYPE, 0x25, __u64)
/* DM ioreq management */
#define IC_ID_IOREQ_BASE 0x30UL
#define IC_SET_IOREQ_BUFFER _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x00)
#define IC_NOTIFY_REQUEST_FINISH _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x01)
#define IC_CREATE_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x02)
#define IC_ATTACH_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x03)
#define IC_DESTROY_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x04)
#define IC_CLEAR_VM_IOREQ _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x05)
#define ACRN_IOCTL_NOTIFY_REQUEST_FINISH \
_IOW(ACRN_IOCTL_TYPE, 0x31, struct acrn_ioreq_notify)
#define ACRN_IOCTL_CREATE_IOREQ_CLIENT \
_IO(ACRN_IOCTL_TYPE, 0x32)
#define ACRN_IOCTL_ATTACH_IOREQ_CLIENT \
_IO(ACRN_IOCTL_TYPE, 0x33)
#define ACRN_IOCTL_DESTROY_IOREQ_CLIENT \
_IO(ACRN_IOCTL_TYPE, 0x34)
#define ACRN_IOCTL_CLEAR_VM_IOREQ \
_IO(ACRN_IOCTL_TYPE, 0x35)
/* Guest memory management */
#define IC_ID_MEM_BASE 0x40UL
@@ -286,11 +289,12 @@ struct ic_ptdev_irq {
/**
* @brief data strcture to notify hypervisor ioreq is handled
*/
struct ioreq_notify {
/** client id to identify ioreq client */
int32_t client_id;
struct acrn_ioreq_notify {
/** VM id to identify ioreq client */
__u16 vmid;
__u16 reserved;
/** identify the ioreq submitter */
uint32_t vcpu;
__u32 vcpu;
};
#define ACRN_PLATFORM_LAPIC_IDS_MAX 64