mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-22 13:37:10 +00:00
hv: pm: support shutting down multiple VMs when pCPUs are shared
More than one VM may request shutdown on the same pCPU before shutdown_vm_from_idle() is called in the idle thread when pCPUs are shared among VMs. Use a per-pCPU bitmap to store all the VMIDs requesting shutdown. v1 -> v2: - use vm_lock to avoid a race on shutdown Tracked-On: #5411 Signed-off-by: Peter Fang <peter.fang@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
14b26d2170
commit
06838df53b
@ -150,20 +150,25 @@ static bool pm1ab_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void enter_s5(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
static inline void enter_s5(struct acrn_vcpu *vcpu, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||||
{
|
{
|
||||||
|
struct acrn_vm *vm = vcpu->vm;
|
||||||
|
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
|
||||||
|
|
||||||
get_vm_lock(vm);
|
get_vm_lock(vm);
|
||||||
/*
|
/*
|
||||||
* It's possible that ACRN come here from SOS and pre-launched VM. Currently, we
|
* Currently, we assume SOS has full ACPI power management stack.
|
||||||
* assume SOS has full ACPI power management stack. That means the value from SOS
|
* That means the value from SOS should be saved and used to shut
|
||||||
* should be saved and used to shutdown the system.
|
* down the system.
|
||||||
*/
|
*/
|
||||||
if (is_sos_vm(vm)) {
|
if (is_sos_vm(vm)) {
|
||||||
save_s5_reg_val(pm1a_cnt_val, pm1b_cnt_val);
|
save_s5_reg_val(pm1a_cnt_val, pm1b_cnt_val);
|
||||||
}
|
}
|
||||||
pause_vm(vm);
|
pause_vm(vm);
|
||||||
(void)shutdown_vm(vm);
|
|
||||||
put_vm_lock(vm);
|
put_vm_lock(vm);
|
||||||
|
|
||||||
|
bitmap_set_nolock(vm->vm_id, &per_cpu(shutdown_vm_bitmap, pcpu_id));
|
||||||
|
make_shutdown_vm_request(pcpu_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||||
@ -206,7 +211,7 @@ static bool pm1ab_io_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t width,
|
|||||||
if (vm->pm.sx_state_data->s3_pkg.val_pm1a == val) {
|
if (vm->pm.sx_state_data->s3_pkg.val_pm1a == val) {
|
||||||
enter_s3(vm, v, 0U);
|
enter_s3(vm, v, 0U);
|
||||||
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1a == val) {
|
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1a == val) {
|
||||||
enter_s5(vm, v, 0U);
|
enter_s5(vcpu, v, 0U);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,7 +225,7 @@ static bool pm1ab_io_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t width,
|
|||||||
if (vm->pm.sx_state_data->s3_pkg.val_pm1b == val) {
|
if (vm->pm.sx_state_data->s3_pkg.val_pm1b == val) {
|
||||||
enter_s3(vm, pm1a_cnt_val, v);
|
enter_s3(vm, pm1a_cnt_val, v);
|
||||||
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1b == val) {
|
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1b == val) {
|
||||||
enter_s5(vm, pm1a_cnt_val, v);
|
enter_s5(vcpu, pm1a_cnt_val, v);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* the case broke ACPI spec */
|
/* the case broke ACPI spec */
|
||||||
|
@ -52,7 +52,8 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
|
|||||||
pause_vm(vm);
|
pause_vm(vm);
|
||||||
put_vm_lock(vm);
|
put_vm_lock(vm);
|
||||||
|
|
||||||
per_cpu(shutdown_vm_id, pcpuid_from_vcpu(vcpu)) = vm->vm_id;
|
bitmap_set_nolock(vm->vm_id,
|
||||||
|
&per_cpu(shutdown_vm_bitmap, pcpuid_from_vcpu(vcpu)));
|
||||||
make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
|
make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -83,8 +84,9 @@ static bool handle_reset_reg_read(struct acrn_vcpu *vcpu, __unused uint16_t addr
|
|||||||
/**
|
/**
|
||||||
* @pre vm != NULL
|
* @pre vm != NULL
|
||||||
*/
|
*/
|
||||||
static bool handle_common_reset_reg_write(struct acrn_vm *vm, bool reset)
|
static bool handle_common_reset_reg_write(struct acrn_vcpu *vcpu, bool reset)
|
||||||
{
|
{
|
||||||
|
struct acrn_vm *vm = vcpu->vm;
|
||||||
bool ret = true;
|
bool ret = true;
|
||||||
|
|
||||||
get_vm_lock(vm);
|
get_vm_lock(vm);
|
||||||
@ -104,11 +106,10 @@ static bool handle_common_reset_reg_write(struct acrn_vm *vm, bool reset)
|
|||||||
* or pre-launched VM reset,
|
* or pre-launched VM reset,
|
||||||
* ACRN doesn't support re-launch, just shutdown the guest.
|
* ACRN doesn't support re-launch, just shutdown the guest.
|
||||||
*/
|
*/
|
||||||
const struct acrn_vcpu *bsp = vcpu_from_vid(vm, BSP_CPU_ID);
|
|
||||||
|
|
||||||
pause_vm(vm);
|
pause_vm(vm);
|
||||||
per_cpu(shutdown_vm_id, pcpuid_from_vcpu(bsp)) = vm->vm_id;
|
bitmap_set_nolock(vm->vm_id,
|
||||||
make_shutdown_vm_request(pcpuid_from_vcpu(bsp));
|
&per_cpu(shutdown_vm_bitmap, pcpuid_from_vcpu(vcpu)));
|
||||||
|
make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (is_postlaunched_vm(vm)) {
|
if (is_postlaunched_vm(vm)) {
|
||||||
@ -132,7 +133,7 @@ static bool handle_common_reset_reg_write(struct acrn_vm *vm, bool reset)
|
|||||||
static bool handle_kb_write(struct acrn_vcpu *vcpu, __unused uint16_t addr, size_t bytes, uint32_t val)
|
static bool handle_kb_write(struct acrn_vcpu *vcpu, __unused uint16_t addr, size_t bytes, uint32_t val)
|
||||||
{
|
{
|
||||||
/* ignore commands other than system reset */
|
/* ignore commands other than system reset */
|
||||||
return handle_common_reset_reg_write(vcpu->vm, ((bytes == 1U) && (val == 0xfeU)));
|
return handle_common_reset_reg_write(vcpu, ((bytes == 1U) && (val == 0xfeU)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool handle_kb_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t bytes)
|
static bool handle_kb_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t bytes)
|
||||||
@ -163,7 +164,7 @@ static bool handle_kb_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t bytes)
|
|||||||
static bool handle_cf9_write(struct acrn_vcpu *vcpu, __unused uint16_t addr, size_t bytes, uint32_t val)
|
static bool handle_cf9_write(struct acrn_vcpu *vcpu, __unused uint16_t addr, size_t bytes, uint32_t val)
|
||||||
{
|
{
|
||||||
/* We don't differentiate among hard/soft/warm/cold reset */
|
/* We don't differentiate among hard/soft/warm/cold reset */
|
||||||
return handle_common_reset_reg_write(vcpu->vm,
|
return handle_common_reset_reg_write(vcpu,
|
||||||
((bytes == 1U) && ((val & 0x4U) == 0x4U) && ((val & 0xaU) != 0U)));
|
((bytes == 1U) && ((val & 0x4U) == 0x4U) && ((val & 0xaU) != 0U)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,7 +180,7 @@ static bool handle_reset_reg_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t
|
|||||||
struct acpi_reset_reg *reset_reg = get_host_reset_reg_data();
|
struct acpi_reset_reg *reset_reg = get_host_reset_reg_data();
|
||||||
|
|
||||||
if (val == reset_reg->val) {
|
if (val == reset_reg->val) {
|
||||||
ret = handle_common_reset_reg_write(vcpu->vm, true);
|
ret = handle_common_reset_reg_write(vcpu, true);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* ACPI defines the reset value but doesn't specify the meaning of other values.
|
* ACPI defines the reset value but doesn't specify the meaning of other values.
|
||||||
@ -234,7 +235,17 @@ void register_reset_port_handler(struct acrn_vm *vm)
|
|||||||
|
|
||||||
void shutdown_vm_from_idle(uint16_t pcpu_id)
|
void shutdown_vm_from_idle(uint16_t pcpu_id)
|
||||||
{
|
{
|
||||||
struct acrn_vm *vm = get_vm_from_vmid(per_cpu(shutdown_vm_id, pcpu_id));
|
uint16_t vm_id;
|
||||||
|
uint64_t *vms = &per_cpu(shutdown_vm_bitmap, pcpu_id);
|
||||||
|
struct acrn_vm *vm;
|
||||||
|
|
||||||
|
for (vm_id = fls64(*vms); vm_id < CONFIG_MAX_VM_NUM; vm_id = fls64(*vms)) {
|
||||||
|
vm = get_vm_from_vmid(vm_id);
|
||||||
|
get_vm_lock(vm);
|
||||||
|
if (is_paused_vm(vm)) {
|
||||||
(void)shutdown_vm(vm);
|
(void)shutdown_vm(vm);
|
||||||
|
}
|
||||||
|
put_vm_lock(vm);
|
||||||
|
bitmap_clear_nolock(vm_id, vms);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -256,10 +256,13 @@ int32_t hcall_destroy_vm(__unused struct acrn_vm *vm, struct acrn_vm *target_vm,
|
|||||||
{
|
{
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
|
get_vm_lock(target_vm);
|
||||||
if (is_paused_vm(target_vm)) {
|
if (is_paused_vm(target_vm)) {
|
||||||
/* TODO: check target_vm guest_flags */
|
/* TODO: check target_vm guest_flags */
|
||||||
ret = shutdown_vm(target_vm);
|
ret = shutdown_vm(target_vm);
|
||||||
}
|
}
|
||||||
|
put_vm_lock(target_vm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ struct per_cpu_region {
|
|||||||
#ifdef PROFILING_ON
|
#ifdef PROFILING_ON
|
||||||
struct profiling_info_wrapper profiling_info;
|
struct profiling_info_wrapper profiling_info;
|
||||||
#endif
|
#endif
|
||||||
uint16_t shutdown_vm_id;
|
uint64_t shutdown_vm_bitmap;
|
||||||
uint64_t tsc_suspend;
|
uint64_t tsc_suspend;
|
||||||
/*
|
/*
|
||||||
* We maintain a per-pCPU array of vCPUs. vCPUs of a VM won't
|
* We maintain a per-pCPU array of vCPUs. vCPUs of a VM won't
|
||||||
|
Loading…
Reference in New Issue
Block a user