mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-05-04 22:47:00 +00:00
Previously ACPI PM1A register info was hardcoded to 0 in HV for generic boards, but SOS still can know the real PM1A info so the system would hang if user trigger S3 in SOS. Enabling PM1A register info fixup will let HV be able to intercept the operation on PM1A and then make basic function of S3 work for all boards; Tracked-On: #2291 Signed-off-by: Victor Sun <victor.sun@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
257 lines
6.7 KiB
C
257 lines
6.7 KiB
C
/*
|
|
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <types.h>
|
|
#include <host_pm.h>
|
|
#include <vm.h>
|
|
#include <io.h>
|
|
#include <logmsg.h>
|
|
#include <platform_acpi_info.h>
|
|
|
|
int32_t validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl)
|
|
{
|
|
int32_t ret = -1;
|
|
|
|
if (is_sos_vm(vm)) {
|
|
ret = 0;
|
|
} else {
|
|
uint8_t i;
|
|
uint8_t px_cnt = vm->pm.px_cnt;
|
|
const struct cpu_px_data *px_data = vm->pm.px_data;
|
|
|
|
if ((px_cnt != 0U) && (px_data != NULL)) {
|
|
for (i = 0U; i < px_cnt; i++) {
|
|
if ((px_data + i)->control == (perf_ctl & 0xffffUL)) {
|
|
ret = 0;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void vm_setup_cpu_px(struct acrn_vm *vm)
|
|
{
|
|
uint32_t px_data_size;
|
|
struct cpu_state_info *pm_state_info = get_cpu_pm_state_info();
|
|
|
|
vm->pm.px_cnt = 0U;
|
|
(void)memset(vm->pm.px_data, 0U, MAX_PSTATE * sizeof(struct cpu_px_data));
|
|
|
|
if ((pm_state_info->px_cnt != 0U) && (pm_state_info->px_data != NULL)) {
|
|
ASSERT((pm_state_info->px_cnt <= MAX_PSTATE), "failed to setup cpu px");
|
|
|
|
vm->pm.px_cnt = pm_state_info->px_cnt;
|
|
px_data_size = ((uint32_t)vm->pm.px_cnt) * sizeof(struct cpu_px_data);
|
|
(void)memcpy_s(vm->pm.px_data, px_data_size, pm_state_info->px_data, px_data_size);
|
|
}
|
|
}
|
|
|
|
static void vm_setup_cpu_cx(struct acrn_vm *vm)
|
|
{
|
|
uint32_t cx_data_size;
|
|
struct cpu_state_info *pm_state_info = get_cpu_pm_state_info();
|
|
|
|
vm->pm.cx_cnt = 0U;
|
|
(void)memset(vm->pm.cx_data, 0U, MAX_CSTATE * sizeof(struct cpu_cx_data));
|
|
|
|
if ((pm_state_info->cx_cnt != 0U) && (pm_state_info->cx_data != NULL)) {
|
|
ASSERT((pm_state_info->cx_cnt <= MAX_CX_ENTRY), "failed to setup cpu cx");
|
|
|
|
vm->pm.cx_cnt = pm_state_info->cx_cnt;
|
|
cx_data_size = ((uint32_t)vm->pm.cx_cnt) * sizeof(struct cpu_cx_data);
|
|
|
|
/* please note pm.cx_data[0] is a empty space holder,
|
|
* pm.cx_data[1...MAX_CX_ENTRY] would be used to store cx entry datas.
|
|
*/
|
|
(void)memcpy_s(vm->pm.cx_data + 1, cx_data_size, pm_state_info->cx_data, cx_data_size);
|
|
}
|
|
}
|
|
|
|
static inline void init_cx_port(struct acrn_vm *vm)
|
|
{
|
|
uint8_t cx_idx;
|
|
|
|
for (cx_idx = 2U; cx_idx <= vm->pm.cx_cnt; cx_idx++) {
|
|
struct cpu_cx_data *cx_data = vm->pm.cx_data + cx_idx;
|
|
|
|
if (cx_data->cx_reg.space_id == SPACE_SYSTEM_IO) {
|
|
uint16_t port = (uint16_t)cx_data->cx_reg.address;
|
|
|
|
allow_guest_pio_access(vm, port, 1U);
|
|
}
|
|
}
|
|
}
|
|
|
|
void vm_setup_cpu_state(struct acrn_vm *vm)
|
|
{
|
|
vm_setup_cpu_px(vm);
|
|
vm_setup_cpu_cx(vm);
|
|
init_cx_port(vm);
|
|
}
|
|
|
|
/* This function is for power management Sx state implementation,
|
|
* VM need to load the Sx state data to implement S3/S5.
|
|
*/
|
|
int32_t vm_load_pm_s_state(struct acrn_vm *vm)
|
|
{
|
|
int32_t ret;
|
|
struct pm_s_state_data *sx_data = get_host_sstate_data();
|
|
|
|
if ((sx_data->pm1a_evt.address == 0UL) || (sx_data->pm1a_cnt.address == 0UL)
|
|
|| (sx_data->wake_vector_32 == NULL)) {
|
|
pr_err("System S3/S5 is NOT supported.");
|
|
ret = -1;
|
|
} else {
|
|
pr_info("System S3/S5 is supported.");
|
|
vm->pm.sx_state_data = sx_data;
|
|
ret = 0;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
static inline uint32_t s3_enabled(uint32_t pm1_cnt)
|
|
{
|
|
return pm1_cnt & (1U << BIT_SLP_EN);
|
|
}
|
|
|
|
static inline uint8_t get_slp_typx(uint32_t pm1_cnt)
|
|
{
|
|
return (uint8_t)((pm1_cnt & 0x1fffU) >> BIT_SLP_TYPx);
|
|
}
|
|
|
|
static bool pm1ab_io_read(__unused struct acrn_vm *vm, struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
|
|
{
|
|
struct pio_request *pio_req = &vcpu->req.reqs.pio;
|
|
|
|
pio_req->value = pio_read(addr, width);
|
|
|
|
return true;
|
|
}
|
|
|
|
static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
|
{
|
|
uint32_t guest_wakeup_vec32;
|
|
|
|
/* Save the wakeup vec set by guest OS. Will return to guest
|
|
* with this wakeup vec as entry.
|
|
*/
|
|
stac();
|
|
guest_wakeup_vec32 = *(vm->pm.sx_state_data->wake_vector_32);
|
|
clac();
|
|
|
|
pause_vm(vm); /* pause sos_vm before suspend system */
|
|
host_enter_s3(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
|
|
resume_vm_from_s3(vm, guest_wakeup_vec32); /* jump back to vm */
|
|
}
|
|
|
|
static bool pm1ab_io_write(struct acrn_vm *vm, uint16_t addr, size_t width, uint32_t v)
|
|
{
|
|
static uint32_t pm1a_cnt_ready = 0U;
|
|
bool to_write = true;
|
|
|
|
if (width == 2U) {
|
|
uint8_t val = get_slp_typx(v);
|
|
|
|
if ((addr == vm->pm.sx_state_data->pm1a_cnt.address)
|
|
&& (val == vm->pm.sx_state_data->s3_pkg.val_pm1a) && (s3_enabled(v) != 0U)) {
|
|
|
|
if (vm->pm.sx_state_data->pm1b_cnt.address != 0UL) {
|
|
pm1a_cnt_ready = v;
|
|
} else {
|
|
enter_s3(vm, v, 0U);
|
|
}
|
|
|
|
to_write = false;
|
|
|
|
} else if ((addr == vm->pm.sx_state_data->pm1b_cnt.address)
|
|
&& (val == vm->pm.sx_state_data->s3_pkg.val_pm1b) && (s3_enabled(v) != 0U)) {
|
|
|
|
if (pm1a_cnt_ready != 0U) {
|
|
enter_s3(vm, pm1a_cnt_ready, v);
|
|
pm1a_cnt_ready = 0U;
|
|
} else {
|
|
/* the case broke ACPI spec */
|
|
pr_err("PM1B_CNT write error!");
|
|
}
|
|
|
|
to_write = false;
|
|
} else {
|
|
/* No other state currently, do nothing */
|
|
}
|
|
}
|
|
|
|
if (to_write) {
|
|
pio_write(v, addr, width);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static void register_gas_io_handler(struct acrn_vm *vm, uint32_t pio_idx, const struct acpi_generic_address *gas)
|
|
{
|
|
uint8_t io_len[5] = {0U, 1U, 2U, 4U, 8U};
|
|
struct vm_io_range gas_io;
|
|
|
|
if ((gas->address != 0UL) && (gas->space_id == SPACE_SYSTEM_IO)
|
|
&& (gas->access_size != 0U) && (gas->access_size <= 4U)) {
|
|
gas_io.flags = IO_ATTR_RW;
|
|
gas_io.base = (uint16_t)gas->address;
|
|
gas_io.len = io_len[gas->access_size];
|
|
|
|
register_pio_emulation_handler(vm, pio_idx, &gas_io, &pm1ab_io_read, &pm1ab_io_write);
|
|
|
|
pr_dbg("Enable PM1A trap for VM %d, port 0x%x, size %d\n", vm->vm_id, gas_io.base, gas_io.len);
|
|
}
|
|
}
|
|
|
|
void register_pm1ab_handler(struct acrn_vm *vm)
|
|
{
|
|
struct pm_s_state_data *sx_data = vm->pm.sx_state_data;
|
|
|
|
register_gas_io_handler(vm, PM1A_EVT_PIO_IDX, &(sx_data->pm1a_evt));
|
|
register_gas_io_handler(vm, PM1B_EVT_PIO_IDX, &(sx_data->pm1b_evt));
|
|
register_gas_io_handler(vm, PM1A_CNT_PIO_IDX, &(sx_data->pm1a_cnt));
|
|
register_gas_io_handler(vm, PM1B_CNT_PIO_IDX, &(sx_data->pm1b_cnt));
|
|
}
|
|
|
|
static bool rt_vm_pm1a_io_read(__unused struct acrn_vm *vm, __unused struct acrn_vcpu *vcpu,
|
|
__unused uint16_t addr, __unused size_t width)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* retval true means that we complete the emulation in HV and no need to re-inject the request to DM.
|
|
* retval false means that we should re-inject the request to DM.
|
|
*/
|
|
static bool rt_vm_pm1a_io_write(struct acrn_vm *vm, uint16_t addr, size_t width, uint32_t v)
|
|
{
|
|
if (width != 2U) {
|
|
pr_dbg("Invalid address (0x%x) or width (0x%x)", addr, width);
|
|
} else {
|
|
if (((v & VIRTUAL_PM1A_SLP_EN) && (((v & VIRTUAL_PM1A_SLP_TYP) >> 10U) == 5U)) != 0U) {
|
|
vm->state = VM_POWERING_OFF;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
void register_rt_vm_pm1a_ctl_handler(struct acrn_vm *vm)
|
|
{
|
|
struct vm_io_range io_range;
|
|
|
|
io_range.flags = IO_ATTR_RW;
|
|
io_range.base = VIRTUAL_PM1A_CNT_ADDR;
|
|
io_range.len = 1U;
|
|
|
|
register_pio_emulation_handler(vm, VIRTUAL_PM1A_CNT_PIO_IDX, &io_range,
|
|
&rt_vm_pm1a_io_read, &rt_vm_pm1a_io_write);
|
|
}
|