mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-19 04:02:05 +00:00
dm: Add vm_clear_ioreq to clear ioreq status
VHM will provide a ioctl to clear all IO requests' status. This is useful to handle ioreqs in VM normal reboot and emergency reboot. Tracked-On: #1821 Signed-off-by: Shuo Liu <shuo.a.liu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
1274fca008
commit
2d1ddd8876
@ -550,13 +550,11 @@ vm_reset_vdevs(struct vmctx *ctx)
|
|||||||
static void
|
static void
|
||||||
vm_system_reset(struct vmctx *ctx)
|
vm_system_reset(struct vmctx *ctx)
|
||||||
{
|
{
|
||||||
int vcpu_id = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we get system reset request, we don't want to exit the
|
* If we get system reset request, we don't want to exit the
|
||||||
* vcpu_loop/vm_loop/mevent_loop. So we do:
|
* vcpu_loop/vm_loop/mevent_loop. So we do:
|
||||||
* 1. pause VM
|
* 1. pause VM
|
||||||
* 2. notify request done to reset ioreq state in vhm
|
* 2. flush and clear ioreqs
|
||||||
* 3. reset virtual devices
|
* 3. reset virtual devices
|
||||||
* 4. load software for UOS
|
* 4. load software for UOS
|
||||||
* 5. hypercall reset vm
|
* 5. hypercall reset vm
|
||||||
@ -564,49 +562,23 @@ vm_system_reset(struct vmctx *ctx)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
vm_pause(ctx);
|
vm_pause(ctx);
|
||||||
for (vcpu_id = 0; vcpu_id < 4; vcpu_id++) {
|
|
||||||
struct vhm_request *vhm_req;
|
|
||||||
|
|
||||||
vhm_req = &vhm_req_buf[vcpu_id];
|
|
||||||
/*
|
/*
|
||||||
* The state of the VHM request already assigned to DM can be
|
* After vm_pause, there should be no new coming ioreq.
|
||||||
* COMPLETE if it has already been processed by the vm_loop, or
|
|
||||||
* PROCESSING if the request is assigned to DM after vm_loop
|
|
||||||
* checks the requests but before this point.
|
|
||||||
*
|
*
|
||||||
* Unless under emergency mode, the vcpu writing to the ACPI PM
|
* Unless under emergency mode, the vcpu writing to the ACPI PM
|
||||||
* CR should be the only vcpu of that VM that is still
|
* CR should be the only vcpu of that VM that is still
|
||||||
* running. In this case there should be only one completed
|
* running. In this case there should be only one completed
|
||||||
* request which is the APIC PM CR write. Notify the completion
|
* request which is the APIC PM CR write. VM reset will reset it
|
||||||
* of that request here (after the VM is paused) to reset its
|
|
||||||
* state.
|
|
||||||
*
|
*
|
||||||
* When handling emergency mode triggered by one vcpu without
|
* When handling emergency mode triggered by one vcpu without
|
||||||
* offlining any other vcpus, there can be multiple VHM requests
|
* offlining any other vcpus, there can be multiple VHM requests
|
||||||
* with various states. Currently the context of that VM in the
|
* with various states. We should be careful on potential races
|
||||||
* DM, VHM and hypervisor will be destroyed and recreated,
|
* when resetting especially in SMP SOS. vm_clear_ioreq can be used
|
||||||
* causing the states of VHM requests to be dropped.
|
* to clear all ioreq status in VHM after VM pause, then let VM
|
||||||
*
|
* reset in hypervisor reset all ioreqs.
|
||||||
* TODO: If the emergency mode is handled without context
|
|
||||||
* deletion and recreation, we should be careful on potential
|
|
||||||
* races when reseting VHM request states. Some considerations
|
|
||||||
* include:
|
|
||||||
*
|
|
||||||
* * Use cmpxchg instead of load+store when distributing
|
|
||||||
* requests.
|
|
||||||
*
|
|
||||||
* * vm_reset in VHM should clean up the ioreq bitmap, while
|
|
||||||
* vm_reset in the hypervisor should cleanup the states of
|
|
||||||
* VHM requests.
|
|
||||||
*
|
|
||||||
* * vm_reset in VHM should hold a mutex to block the
|
|
||||||
* request distribution tasklet from assigned more
|
|
||||||
* requests before VM reset is done.
|
|
||||||
*/
|
*/
|
||||||
if ((atomic_load(&vhm_req->processed) == REQ_STATE_COMPLETE) &&
|
vm_clear_ioreq(ctx);
|
||||||
(vhm_req->client == ctx->ioreq_client))
|
|
||||||
vm_notify_request_done(ctx, vcpu_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
vm_reset_vdevs(ctx);
|
vm_reset_vdevs(ctx);
|
||||||
vm_reset(ctx);
|
vm_reset(ctx);
|
||||||
@ -621,31 +593,19 @@ vm_system_reset(struct vmctx *ctx)
|
|||||||
static void
|
static void
|
||||||
vm_suspend_resume(struct vmctx *ctx)
|
vm_suspend_resume(struct vmctx *ctx)
|
||||||
{
|
{
|
||||||
int vcpu_id = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we get warm reboot request, we don't want to exit the
|
* If we get warm reboot request, we don't want to exit the
|
||||||
* vcpu_loop/vm_loop/mevent_loop. So we do:
|
* vcpu_loop/vm_loop/mevent_loop. So we do:
|
||||||
* 1. pause VM
|
* 1. pause VM
|
||||||
* 2. notify request done to reset ioreq state in vhm
|
* 2. flush and clear ioreqs
|
||||||
* 3. stop vm watchdog
|
* 3. stop vm watchdog
|
||||||
* 4. wait for resume signal
|
* 4. wait for resume signal
|
||||||
* 5. reset vm watchdog
|
* 5. reset vm watchdog
|
||||||
* 6. hypercall restart vm
|
* 6. hypercall restart vm
|
||||||
*/
|
*/
|
||||||
vm_pause(ctx);
|
vm_pause(ctx);
|
||||||
for (vcpu_id = 0; vcpu_id < 4; vcpu_id++) {
|
|
||||||
struct vhm_request *vhm_req;
|
|
||||||
|
|
||||||
vhm_req = &vhm_req_buf[vcpu_id];
|
|
||||||
/* See the comments in vm_system_reset() for considerations of
|
|
||||||
* the notification below.
|
|
||||||
*/
|
|
||||||
if ((atomic_load(&vhm_req->processed) == REQ_STATE_COMPLETE) &&
|
|
||||||
(vhm_req->client == ctx->ioreq_client))
|
|
||||||
vm_notify_request_done(ctx, vcpu_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
vm_clear_ioreq(ctx);
|
||||||
vm_stop_watchdog(ctx);
|
vm_stop_watchdog(ctx);
|
||||||
wait_for_resume(ctx);
|
wait_for_resume(ctx);
|
||||||
|
|
||||||
|
@ -386,6 +386,12 @@ vm_reset(struct vmctx *ctx)
|
|||||||
ioctl(ctx->fd, IC_RESET_VM, &ctx->vmid);
|
ioctl(ctx->fd, IC_RESET_VM, &ctx->vmid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_clear_ioreq(struct vmctx *ctx)
|
||||||
|
{
|
||||||
|
ioctl(ctx->fd, IC_CLEAR_VM_IOREQ, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
static int suspend_mode = VM_SUSPEND_NONE;
|
static int suspend_mode = VM_SUSPEND_NONE;
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -88,6 +88,7 @@
|
|||||||
#define IC_CREATE_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x02)
|
#define IC_CREATE_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x02)
|
||||||
#define IC_ATTACH_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x03)
|
#define IC_ATTACH_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x03)
|
||||||
#define IC_DESTROY_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x04)
|
#define IC_DESTROY_IOREQ_CLIENT _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x04)
|
||||||
|
#define IC_CLEAR_VM_IOREQ _IC_ID(IC_ID, IC_ID_IOREQ_BASE + 0x05)
|
||||||
|
|
||||||
/* Guest memory management */
|
/* Guest memory management */
|
||||||
#define IC_ID_MEM_BASE 0x40UL
|
#define IC_ID_MEM_BASE 0x40UL
|
||||||
|
@ -98,6 +98,7 @@ int vm_create_ioreq_client(struct vmctx *ctx);
|
|||||||
int vm_destroy_ioreq_client(struct vmctx *ctx);
|
int vm_destroy_ioreq_client(struct vmctx *ctx);
|
||||||
int vm_attach_ioreq_client(struct vmctx *ctx);
|
int vm_attach_ioreq_client(struct vmctx *ctx);
|
||||||
int vm_notify_request_done(struct vmctx *ctx, int vcpu);
|
int vm_notify_request_done(struct vmctx *ctx, int vcpu);
|
||||||
|
void vm_clear_ioreq(struct vmctx *ctx);
|
||||||
void vm_set_suspend_mode(enum vm_suspend_how how);
|
void vm_set_suspend_mode(enum vm_suspend_how how);
|
||||||
int vm_get_suspend_mode(void);
|
int vm_get_suspend_mode(void);
|
||||||
void vm_destroy(struct vmctx *ctx);
|
void vm_destroy(struct vmctx *ctx);
|
||||||
|
Loading…
Reference in New Issue
Block a user