mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-31 15:30:56 +00:00
hv: s3: add timer support
When resume from s3, Service VM OS will hang because timer interrupt on BSP is not triggered. Hypervisor won't update physical timer because there are expired timers on pcpu timer list. Add suspend and resume ops for modules that use timers. This patch is just for Service VM OS. Support for User VM will be added in the future. Tracked-On: #8623 Signed-off-by: Haiwei Li <haiwei.li@intel.com>
This commit is contained in:
parent
5283c147ef
commit
3d6ca845e2
@ -207,6 +207,8 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_
|
||||
vmx_off();
|
||||
|
||||
suspend_console();
|
||||
suspend_vrtc();
|
||||
suspend_sched();
|
||||
suspend_ioapic();
|
||||
suspend_iommu();
|
||||
suspend_lapic();
|
||||
@ -237,6 +239,8 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_
|
||||
smp_call_function(get_active_pcpu_bitmap(), resume_tsc, NULL);
|
||||
|
||||
/* console must be resumed after TSC restored since it will setup timer base on TSC */
|
||||
resume_sched();
|
||||
resume_vrtc();
|
||||
resume_console();
|
||||
}
|
||||
|
||||
|
@ -215,6 +215,11 @@ static void sched_bvt_init_data(struct thread_object *obj, struct sched_params *
|
||||
data->residual = 0U;
|
||||
}
|
||||
|
||||
static void sched_bvt_suspend(struct sched_control *ctl)
|
||||
{
|
||||
sched_bvt_deinit(ctl);
|
||||
}
|
||||
|
||||
static uint64_t v2p(uint64_t virt_time, uint64_t ratio)
|
||||
{
|
||||
return (uint64_t)(virt_time / ratio);
|
||||
@ -334,4 +339,9 @@ struct acrn_scheduler sched_bvt = {
|
||||
.sleep = sched_bvt_sleep,
|
||||
.wake = sched_bvt_wake,
|
||||
.deinit = sched_bvt_deinit,
|
||||
/* Now suspend is just to do del_timer and add_timer will be delayed to
|
||||
* shedule after resume.
|
||||
* So no need to add .resume now.
|
||||
*/
|
||||
.suspend = sched_bvt_suspend,
|
||||
};
|
||||
|
@ -101,20 +101,12 @@ static void sched_tick_handler(void *param)
|
||||
release_schedule_lock(pcpu_id, rflags);
|
||||
}
|
||||
|
||||
/*
|
||||
* @pre ctl->pcpu_id == get_pcpu_id()
|
||||
*/
|
||||
int sched_iorr_init(struct sched_control *ctl)
|
||||
int sched_iorr_add_timer(struct sched_control *ctl)
|
||||
{
|
||||
struct sched_iorr_control *iorr_ctl = &per_cpu(sched_iorr_ctl, ctl->pcpu_id);
|
||||
uint64_t tick_period = TICKS_PER_MS;
|
||||
int ret = 0;
|
||||
|
||||
ASSERT(get_pcpu_id() == ctl->pcpu_id, "Init scheduler on wrong CPU!");
|
||||
|
||||
ctl->priv = iorr_ctl;
|
||||
INIT_LIST_HEAD(&iorr_ctl->runqueue);
|
||||
|
||||
/* The tick_timer is periodically */
|
||||
initialize_timer(&iorr_ctl->tick_timer, sched_tick_handler, ctl,
|
||||
cpu_ticks() + tick_period, tick_period);
|
||||
@ -126,12 +118,41 @@ int sched_iorr_init(struct sched_control *ctl)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void sched_iorr_deinit(struct sched_control *ctl)
|
||||
static int sched_iorr_del_timer(struct sched_control *ctl)
|
||||
{
|
||||
struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)ctl->priv;
|
||||
del_timer(&iorr_ctl->tick_timer);
|
||||
}
|
||||
|
||||
/*
|
||||
* @pre ctl->pcpu_id == get_pcpu_id()
|
||||
*/
|
||||
int sched_iorr_init(struct sched_control *ctl)
|
||||
{
|
||||
struct sched_iorr_control *iorr_ctl = &per_cpu(sched_iorr_ctl, ctl->pcpu_id);
|
||||
|
||||
ASSERT(get_pcpu_id() == ctl->pcpu_id, "Init scheduler on wrong CPU!");
|
||||
|
||||
ctl->priv = iorr_ctl;
|
||||
INIT_LIST_HEAD(&iorr_ctl->runqueue);
|
||||
return sched_iorr_add_timer(ctl);
|
||||
}
|
||||
|
||||
void sched_iorr_deinit(struct sched_control *ctl)
|
||||
{
|
||||
sched_iorr_del_timer(ctl);
|
||||
}
|
||||
|
||||
static void sched_iorr_suspend(struct sched_control *ctl)
|
||||
{
|
||||
sched_iorr_del_timer(ctl);
|
||||
}
|
||||
|
||||
static void sched_iorr_resume(struct sched_control *ctl)
|
||||
{
|
||||
sched_iorr_add_timer(ctl);
|
||||
}
|
||||
|
||||
void sched_iorr_init_data(struct thread_object *obj, __unused struct sched_params * params)
|
||||
{
|
||||
struct sched_iorr_data *data;
|
||||
@ -201,4 +222,6 @@ struct acrn_scheduler sched_iorr = {
|
||||
.sleep = sched_iorr_sleep,
|
||||
.wake = sched_iorr_wake,
|
||||
.deinit = sched_iorr_deinit,
|
||||
.suspend = sched_iorr_suspend,
|
||||
.resume = sched_iorr_resume,
|
||||
};
|
||||
|
@ -98,6 +98,24 @@ void deinit_sched(uint16_t pcpu_id)
|
||||
}
|
||||
}
|
||||
|
||||
void suspend_sched(void)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, BSP_CPU_ID);
|
||||
|
||||
if (ctl->scheduler->suspend != NULL) {
|
||||
ctl->scheduler->suspend(ctl);
|
||||
}
|
||||
}
|
||||
|
||||
void resume_sched(void)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, BSP_CPU_ID);
|
||||
|
||||
if (ctl->scheduler->resume != NULL) {
|
||||
ctl->scheduler->resume(ctl);
|
||||
}
|
||||
}
|
||||
|
||||
void init_thread_data(struct thread_object *obj, struct sched_params *params)
|
||||
{
|
||||
struct acrn_scheduler *scheduler = get_scheduler(obj->pcpu_id);
|
||||
|
@ -699,6 +699,18 @@ static void vrtc_set_basetime(struct acrn_vrtc *vrtc)
|
||||
spinlock_release(&vrtc_rebase_lock);
|
||||
}
|
||||
|
||||
void suspend_vrtc(void)
|
||||
{
|
||||
/* For service vm */
|
||||
del_timer(&calibrate_timer);
|
||||
}
|
||||
|
||||
void resume_vrtc(void)
|
||||
{
|
||||
/* For service vm */
|
||||
calibrate_setup_timer();
|
||||
}
|
||||
|
||||
void vrtc_init(struct acrn_vm *vm)
|
||||
{
|
||||
struct vm_io_range range = {
|
||||
|
@ -261,6 +261,8 @@ uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_a
|
||||
|
||||
int32_t prepare_os_image(struct acrn_vm *vm);
|
||||
|
||||
void suspend_vrtc(void);
|
||||
void resume_vrtc(void);
|
||||
void vrtc_init(struct acrn_vm *vm);
|
||||
|
||||
bool is_lapic_pt_configured(const struct acrn_vm *vm);
|
||||
|
@ -99,6 +99,10 @@ struct acrn_scheduler {
|
||||
void (*deinit_data)(struct thread_object *obj);
|
||||
/* deinit scheduler */
|
||||
void (*deinit)(struct sched_control *ctl);
|
||||
/* suspend scheduler */
|
||||
void (*suspend)(struct sched_control *ctl);
|
||||
/* resume scheduler */
|
||||
void (*resume)(struct sched_control *ctl);
|
||||
};
|
||||
extern struct acrn_scheduler sched_noop;
|
||||
extern struct acrn_scheduler sched_iorr;
|
||||
@ -131,6 +135,8 @@ struct thread_object *sched_get_current(uint16_t pcpu_id);
|
||||
|
||||
void init_sched(uint16_t pcpu_id);
|
||||
void deinit_sched(uint16_t pcpu_id);
|
||||
void suspend_sched(void);
|
||||
void resume_sched(void);
|
||||
void obtain_schedule_lock(uint16_t pcpu_id, uint64_t *rflag);
|
||||
void release_schedule_lock(uint16_t pcpu_id, uint64_t rflag);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user