mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-09 12:08:30 +00:00
parent
86f6ccd065
commit
a3b65ad0db
@ -14,119 +14,3 @@ void restore_msrs(void)
|
||||
msr_write(MSR_IA32_FS_BASE, (uint64_t)psc);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void acpi_gas_write(struct acpi_generic_address *gas, uint32_t val)
|
||||
{
|
||||
if (gas->space_id == SPACE_SYSTEM_MEMORY)
|
||||
mmio_write_word(val, (void *)HPA2HVA(gas->address));
|
||||
else
|
||||
io_write_word(val, gas->address);
|
||||
}
|
||||
|
||||
static uint32_t acpi_gas_read(struct acpi_generic_address *gas)
|
||||
{
|
||||
uint32_t ret = 0;
|
||||
|
||||
if (gas->space_id == SPACE_SYSTEM_MEMORY)
|
||||
ret = mmio_read_word((void *)HPA2HVA(gas->address));
|
||||
else
|
||||
ret = io_read_word(gas->address);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void do_acpi_s3(struct vm *vm, uint32_t pm1a_cnt_val,
|
||||
uint32_t pm1b_cnt_val)
|
||||
{
|
||||
uint32_t s1, s2;
|
||||
|
||||
acpi_gas_write(&vm->pm.sx_state_data->pm1a_cnt, pm1a_cnt_val);
|
||||
|
||||
if (vm->pm.sx_state_data->pm1b_cnt.address != 0)
|
||||
acpi_gas_write(&vm->pm.sx_state_data->pm1b_cnt, pm1b_cnt_val);
|
||||
|
||||
while (1) {
|
||||
/* polling PM1 state register to detect wether
|
||||
* the Sx state enter is interrupted by wakeup event.
|
||||
*/
|
||||
s1 = s2 = 0;
|
||||
|
||||
s1 = acpi_gas_read(&vm->pm.sx_state_data->pm1a_evt);
|
||||
|
||||
if (vm->pm.sx_state_data->pm1b_evt.address != 0) {
|
||||
s2 = acpi_gas_read(&vm->pm.sx_state_data->pm1b_evt);
|
||||
s1 |= s2;
|
||||
}
|
||||
|
||||
/* According to ACPI spec 4.8.3.1.1 PM1 state register, the bit
|
||||
* WAK_STS(bit 15) is set if system will transition to working
|
||||
* state.
|
||||
*/
|
||||
if ((s1 & (1 << BIT_WAK_STS)) != 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int enter_s3(struct vm *vm, uint32_t pm1a_cnt_val,
|
||||
uint32_t pm1b_cnt_val)
|
||||
{
|
||||
uint32_t pcpu_id;
|
||||
uint64_t pmain_entry_saved;
|
||||
uint64_t *pmain_entry;
|
||||
|
||||
if (vm->pm.sx_state_data == NULL) {
|
||||
pr_err("No Sx state info avaiable. No Sx support");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pause_vm(vm); /* pause vm0 before suspend system */
|
||||
|
||||
pcpu_id = get_cpu_id();
|
||||
|
||||
/* offline all APs */
|
||||
stop_cpus();
|
||||
|
||||
/* Trampoline code is relocatable now. We have to calculate
|
||||
* main_entry address with relocation base address
|
||||
*/
|
||||
pmain_entry =
|
||||
(uint64_t *)(HPA2HVA(trampoline_start16_paddr) +
|
||||
(uint64_t) main_entry);
|
||||
|
||||
/* Save default main entry and we will restore it after
|
||||
* back from S3. So the AP online could jmp to correct
|
||||
* main entry.
|
||||
*/
|
||||
pmain_entry_saved = *pmain_entry;
|
||||
/* Set the main entry for resume from S3 state */
|
||||
*pmain_entry = (uint64_t)restore_s3_context;
|
||||
|
||||
CPU_IRQ_DISABLE();
|
||||
vmx_off(pcpu_id);
|
||||
|
||||
suspend_console();
|
||||
suspend_ioapic();
|
||||
suspend_iommu();
|
||||
suspend_lapic();
|
||||
|
||||
__enter_s3(vm, pm1a_cnt_val, pm1b_cnt_val);
|
||||
|
||||
/* release the lock aquired in trampoline code */
|
||||
spinlock_release(&trampoline_spinlock);
|
||||
|
||||
resume_lapic();
|
||||
resume_iommu();
|
||||
resume_ioapic();
|
||||
resume_console();
|
||||
|
||||
exec_vmxon_instr(pcpu_id);
|
||||
CPU_IRQ_ENABLE();
|
||||
|
||||
/* restore the default main entry */
|
||||
*pmain_entry = pmain_entry_saved;
|
||||
|
||||
/* online all APs again */
|
||||
start_cpus();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3,7 +3,6 @@
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
#include <vcpu.h>
|
||||
#include <spinlock.h>
|
||||
|
||||
.text
|
||||
.align 8
|
||||
@ -11,8 +10,6 @@
|
||||
.extern restore_msrs
|
||||
.extern cpu_ctx
|
||||
.extern load_gdtr_and_tr
|
||||
.extern do_acpi_s3
|
||||
.extern trampoline_spinlock
|
||||
|
||||
.global __enter_s3
|
||||
__enter_s3:
|
||||
@ -50,18 +47,7 @@ __enter_s3:
|
||||
|
||||
wbinvd
|
||||
|
||||
movq CPU_CONTEXT_OFFSET_RDX + cpu_ctx(%rip), %rdx /* pm1b_cnt_val */
|
||||
movq CPU_CONTEXT_OFFSET_RDI + cpu_ctx(%rip), %rdi /* *vm */
|
||||
movq CPU_CONTEXT_OFFSET_RSI + cpu_ctx(%rip), %rsi /* pm1a_cnt_val */
|
||||
|
||||
call do_acpi_s3
|
||||
|
||||
/* if do_acpi_s3 returns, which means ACRN can't enter S3 state.
|
||||
* Then trampoline will not be executed and we need to acquire
|
||||
* trampoline_spinlock here to match release in enter_sleep
|
||||
*/
|
||||
mov $trampoline_spinlock, %rdi
|
||||
spinlock_obtain(%rdi)
|
||||
/* Will add the function call to enter Sx here*/
|
||||
|
||||
|
||||
/*
|
||||
|
@ -5,10 +5,6 @@
|
||||
|
||||
#ifndef HOST_PM_H
|
||||
|
||||
#define BIT_WAK_STS 15U
|
||||
|
||||
int enter_s3(struct vm *vm, uint32_t pm1a_cnt_val,
|
||||
uint32_t pm1b_cnt_val);
|
||||
extern void __enter_s3(struct vm *vm, uint32_t pm1a_cnt_val,
|
||||
uint32_t pm1b_cnt_val);
|
||||
extern void restore_s3_context(void);
|
||||
|
Loading…
Reference in New Issue
Block a user