diff --git a/hypervisor/arch/x86/pm.c b/hypervisor/arch/x86/pm.c index fe0bcbc9c..33705f7c7 100644 --- a/hypervisor/arch/x86/pm.c +++ b/hypervisor/arch/x86/pm.c @@ -207,6 +207,8 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_ vmx_off(); suspend_console(); + suspend_vrtc(); + suspend_sched(); suspend_ioapic(); suspend_iommu(); suspend_lapic(); @@ -237,6 +239,8 @@ void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_ smp_call_function(get_active_pcpu_bitmap(), resume_tsc, NULL); /* console must be resumed after TSC restored since it will setup timer base on TSC */ + resume_sched(); + resume_vrtc(); resume_console(); } diff --git a/hypervisor/common/sched_bvt.c b/hypervisor/common/sched_bvt.c index b9276238d..cbbfddaa8 100644 --- a/hypervisor/common/sched_bvt.c +++ b/hypervisor/common/sched_bvt.c @@ -215,6 +215,11 @@ static void sched_bvt_init_data(struct thread_object *obj, struct sched_params * data->residual = 0U; } +static void sched_bvt_suspend(struct sched_control *ctl) +{ + sched_bvt_deinit(ctl); +} + static uint64_t v2p(uint64_t virt_time, uint64_t ratio) { return (uint64_t)(virt_time / ratio); @@ -334,4 +339,9 @@ struct acrn_scheduler sched_bvt = { .sleep = sched_bvt_sleep, .wake = sched_bvt_wake, .deinit = sched_bvt_deinit, + /* Now suspend is just to do del_timer and add_timer will be delayed to + * shedule after resume. + * So no need to add .resume now. + */ + .suspend = sched_bvt_suspend, }; diff --git a/hypervisor/common/sched_iorr.c b/hypervisor/common/sched_iorr.c index 039c47a92..ffe7cb881 100644 --- a/hypervisor/common/sched_iorr.c +++ b/hypervisor/common/sched_iorr.c @@ -101,20 +101,12 @@ static void sched_tick_handler(void *param) release_schedule_lock(pcpu_id, rflags); } -/* - * @pre ctl->pcpu_id == get_pcpu_id() - */ -int sched_iorr_init(struct sched_control *ctl) +int sched_iorr_add_timer(struct sched_control *ctl) { struct sched_iorr_control *iorr_ctl = &per_cpu(sched_iorr_ctl, ctl->pcpu_id); uint64_t tick_period = TICKS_PER_MS; int ret = 0; - ASSERT(get_pcpu_id() == ctl->pcpu_id, "Init scheduler on wrong CPU!"); - - ctl->priv = iorr_ctl; - INIT_LIST_HEAD(&iorr_ctl->runqueue); - /* The tick_timer is periodically */ initialize_timer(&iorr_ctl->tick_timer, sched_tick_handler, ctl, cpu_ticks() + tick_period, tick_period); @@ -126,12 +118,41 @@ int sched_iorr_init(struct sched_control *ctl) return ret; } -void sched_iorr_deinit(struct sched_control *ctl) +static int sched_iorr_del_timer(struct sched_control *ctl) { struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)ctl->priv; del_timer(&iorr_ctl->tick_timer); } +/* + * @pre ctl->pcpu_id == get_pcpu_id() + */ +int sched_iorr_init(struct sched_control *ctl) +{ + struct sched_iorr_control *iorr_ctl = &per_cpu(sched_iorr_ctl, ctl->pcpu_id); + + ASSERT(get_pcpu_id() == ctl->pcpu_id, "Init scheduler on wrong CPU!"); + + ctl->priv = iorr_ctl; + INIT_LIST_HEAD(&iorr_ctl->runqueue); + return sched_iorr_add_timer(ctl); +} + +void sched_iorr_deinit(struct sched_control *ctl) +{ + sched_iorr_del_timer(ctl); +} + +static void sched_iorr_suspend(struct sched_control *ctl) +{ + sched_iorr_del_timer(ctl); +} + +static void sched_iorr_resume(struct sched_control *ctl) +{ + sched_iorr_add_timer(ctl); +} + void sched_iorr_init_data(struct thread_object *obj, __unused struct sched_params * params) { struct sched_iorr_data *data; @@ -201,4 +222,6 @@ struct acrn_scheduler sched_iorr = { .sleep = sched_iorr_sleep, .wake = sched_iorr_wake, .deinit = sched_iorr_deinit, + .suspend = sched_iorr_suspend, + .resume = sched_iorr_resume, }; diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c index ce05b9a45..baa1a531f 100644 --- a/hypervisor/common/schedule.c +++ b/hypervisor/common/schedule.c @@ -98,6 +98,24 @@ void deinit_sched(uint16_t pcpu_id) } } +void suspend_sched(void) +{ + struct sched_control *ctl = &per_cpu(sched_ctl, BSP_CPU_ID); + + if (ctl->scheduler->suspend != NULL) { + ctl->scheduler->suspend(ctl); + } +} + +void resume_sched(void) +{ + struct sched_control *ctl = &per_cpu(sched_ctl, BSP_CPU_ID); + + if (ctl->scheduler->resume != NULL) { + ctl->scheduler->resume(ctl); + } +} + void init_thread_data(struct thread_object *obj, struct sched_params *params) { struct acrn_scheduler *scheduler = get_scheduler(obj->pcpu_id); diff --git a/hypervisor/dm/vrtc.c b/hypervisor/dm/vrtc.c index 23ea7ae73..2d10fb801 100644 --- a/hypervisor/dm/vrtc.c +++ b/hypervisor/dm/vrtc.c @@ -699,6 +699,18 @@ static void vrtc_set_basetime(struct acrn_vrtc *vrtc) spinlock_release(&vrtc_rebase_lock); } +void suspend_vrtc(void) +{ + /* For service vm */ + del_timer(&calibrate_timer); +} + +void resume_vrtc(void) +{ + /* For service vm */ + calibrate_setup_timer(); +} + void vrtc_init(struct acrn_vm *vm) { struct vm_io_range range = { diff --git a/hypervisor/include/arch/x86/asm/guest/vm.h b/hypervisor/include/arch/x86/asm/guest/vm.h index 5bb5deea0..71b0329f4 100644 --- a/hypervisor/include/arch/x86/asm/guest/vm.h +++ b/hypervisor/include/arch/x86/asm/guest/vm.h @@ -261,6 +261,8 @@ uint64_t find_space_from_ve820(struct acrn_vm *vm, uint32_t size, uint64_t min_a int32_t prepare_os_image(struct acrn_vm *vm); +void suspend_vrtc(void); +void resume_vrtc(void); void vrtc_init(struct acrn_vm *vm); bool is_lapic_pt_configured(const struct acrn_vm *vm); diff --git a/hypervisor/include/common/schedule.h b/hypervisor/include/common/schedule.h index d801da56f..0512aaab3 100644 --- a/hypervisor/include/common/schedule.h +++ b/hypervisor/include/common/schedule.h @@ -99,6 +99,10 @@ struct acrn_scheduler { void (*deinit_data)(struct thread_object *obj); /* deinit scheduler */ void (*deinit)(struct sched_control *ctl); + /* suspend scheduler */ + void (*suspend)(struct sched_control *ctl); + /* resume scheduler */ + void (*resume)(struct sched_control *ctl); }; extern struct acrn_scheduler sched_noop; extern struct acrn_scheduler sched_iorr; @@ -131,6 +135,8 @@ struct thread_object *sched_get_current(uint16_t pcpu_id); void init_sched(uint16_t pcpu_id); void deinit_sched(uint16_t pcpu_id); +void suspend_sched(void); +void resume_sched(void); void obtain_schedule_lock(uint16_t pcpu_id, uint64_t *rflag); void release_schedule_lock(uint16_t pcpu_id, uint64_t rflag);