mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-31 07:20:55 +00:00
hv: pm: enhencement platform S5 entering operation
Now, we have assumption that SOS control whether the platform should enter S5 or not. So when SOS tries enter S5, we just forward the S5 request to native port which make sure platform S5 is totally aligned with SOS S5. With higher serverity guest introduced,this assumption is not true any more. We need to extend the platform S5 process to handle higher severity guest: - For DM launched RTVM, we need to make sure these guests is off before put the whole platfrom to S5. - For pre-launched VM, there are two cases: * if os running in it support S5, we wait for guests off. * if os running in it doesn't support S5, we expect it will invoke one hypercall to notify HV to shutdown it. NOTE: this case is not supported yet. Will add it in the future. Tracked-On: #3564 Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> Reviewed-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
ce9375874c
commit
f039d75998
@ -130,9 +130,9 @@ int32_t vm_load_pm_s_state(struct acrn_vm *vm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline uint32_t s3_enabled(uint32_t pm1_cnt)
|
||||
static inline bool is_s3_enabled(uint32_t pm1_cnt)
|
||||
{
|
||||
return pm1_cnt & (1U << BIT_SLP_EN);
|
||||
return ((pm1_cnt & (1U << BIT_SLP_EN)) != 0U);
|
||||
}
|
||||
|
||||
static inline uint8_t get_slp_typx(uint32_t pm1_cnt)
|
||||
@ -149,6 +149,47 @@ static bool pm1ab_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
|
||||
return true;
|
||||
}
|
||||
|
||||
#define POWEROFF_TIMEOUT (5 * 60U) /* default poweroff timeout is 5 minutes */
|
||||
/* wait for other vm shutdown done. If POWEROFF_TIMEOUT passed and there are
|
||||
* still some VMs active, we will force platform power off.
|
||||
*
|
||||
* TODO:
|
||||
* - Let user configure whether we wait for ever till all VMs powered off or
|
||||
* force shutdown once pre-defined timeout hit.
|
||||
*/
|
||||
static inline void wait_for_other_vm_shutdown(struct acrn_vm *self_vm)
|
||||
{
|
||||
uint16_t vm_id;
|
||||
bool ready_for_s5;
|
||||
uint32_t timeout = POWEROFF_TIMEOUT;
|
||||
struct acrn_vm *vm;
|
||||
|
||||
while (timeout != 0U) {
|
||||
ready_for_s5 = true;
|
||||
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
|
||||
vm = get_vm_from_vmid(vm_id);
|
||||
|
||||
if ((vm != self_vm) && !is_poweroff_vm(vm)) {
|
||||
ready_for_s5 = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (ready_for_s5) {
|
||||
break;
|
||||
} else {
|
||||
udelay(1000U * 1000U); /* delay 1s in each loop */
|
||||
}
|
||||
|
||||
timeout--;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void enter_s5(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||
{
|
||||
wait_for_other_vm_shutdown(vm);
|
||||
host_enter_s5(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
|
||||
}
|
||||
|
||||
static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||
{
|
||||
uint32_t guest_wakeup_vec32;
|
||||
@ -179,24 +220,30 @@ static bool pm1ab_io_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t width,
|
||||
if (width == 2U) {
|
||||
uint8_t val = get_slp_typx(v);
|
||||
|
||||
if ((addr == vm->pm.sx_state_data->pm1a_cnt.address)
|
||||
&& (val == vm->pm.sx_state_data->s3_pkg.val_pm1a) && (s3_enabled(v) != 0U)) {
|
||||
if ((addr == vm->pm.sx_state_data->pm1a_cnt.address) && is_s3_enabled(v)) {
|
||||
|
||||
if (vm->pm.sx_state_data->pm1b_cnt.address != 0UL) {
|
||||
pm1a_cnt_ready = v;
|
||||
} else {
|
||||
enter_s3(vm, v, 0U);
|
||||
if (vm->pm.sx_state_data->s3_pkg.val_pm1a == val) {
|
||||
enter_s3(vm, v, 0U);
|
||||
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1a == val) {
|
||||
enter_s5(vm, v, 0U);
|
||||
}
|
||||
}
|
||||
|
||||
to_write = false;
|
||||
|
||||
} else if ((addr == vm->pm.sx_state_data->pm1b_cnt.address)
|
||||
&& (val == vm->pm.sx_state_data->s3_pkg.val_pm1b) && (s3_enabled(v) != 0U)) {
|
||||
} else if ((addr == vm->pm.sx_state_data->pm1b_cnt.address) && is_s3_enabled(v)) {
|
||||
|
||||
if (pm1a_cnt_ready != 0U) {
|
||||
pm1a_cnt_val = pm1a_cnt_ready;
|
||||
pm1a_cnt_ready = 0U;
|
||||
enter_s3(vm, pm1a_cnt_val, v);
|
||||
|
||||
if (vm->pm.sx_state_data->s3_pkg.val_pm1b == val) {
|
||||
enter_s3(vm, pm1a_cnt_val, v);
|
||||
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1b == val) {
|
||||
enter_s5(vm, pm1a_cnt_val, v);
|
||||
}
|
||||
} else {
|
||||
/* the case broke ACPI spec */
|
||||
pr_err("PM1B_CNT write error!");
|
||||
|
@ -148,6 +148,11 @@ void do_acpi_sx(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint
|
||||
} while ((s1 & (1U << BIT_WAK_STS)) == 0U);
|
||||
}
|
||||
|
||||
void host_enter_s5(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||
{
|
||||
do_acpi_sx(sstate_data, pm1a_cnt_val, pm1b_cnt_val);
|
||||
}
|
||||
|
||||
void host_enter_s3(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||
{
|
||||
uint64_t pmain_entry_saved;
|
||||
|
@ -31,6 +31,7 @@ struct acpi_reset_reg {
|
||||
|
||||
struct pm_s_state_data *get_host_sstate_data(void);
|
||||
void host_enter_s3(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val);
|
||||
void host_enter_s5(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val);
|
||||
extern void asm_enter_s3(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val);
|
||||
extern void restore_s3_context(void);
|
||||
struct cpu_state_info *get_cpu_pm_state_info(void);
|
||||
|
Loading…
Reference in New Issue
Block a user