From b39630a8e01802d87ecf91517cb19822c4e0ca1d Mon Sep 17 00:00:00 2001 From: Shuo A Liu Date: Mon, 2 Dec 2019 15:41:34 +0800 Subject: [PATCH] hv: sched_iorr: add tick handler and runqueue operations sched_control is per-pcpu, each sched_control has a tick timer running periodically. Every period called a tick. In tick handler, we do 1) compute left timeslice of current thread_object if it's not the idle 2) make a schedule request if current thread_object run out of timeslice For runqueue maintaining, we will keep objects which has timeslice in the front of runqueue and the ones get new replenished in tail. Tracked-On: #4178 Signed-off-by: Jason Chen CJ Signed-off-by: Yu Wang Signed-off-by: Shuo A Liu --- hypervisor/common/sched_iorr.c | 80 +++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 1 deletion(-) diff --git a/hypervisor/common/sched_iorr.c b/hypervisor/common/sched_iorr.c index d0b5ce772..e1c5df4cf 100644 --- a/hypervisor/common/sched_iorr.c +++ b/hypervisor/common/sched_iorr.c @@ -18,8 +18,86 @@ struct sched_iorr_data { int64_t left_cycles; }; -static void sched_tick_handler(__unused void *param) +/* + * @pre obj != NULL + * @pre obj->data != NULL + */ +bool is_inqueue(struct thread_object *obj) { + struct sched_iorr_data *data = (struct sched_iorr_data *)obj->data; + return !list_empty(&data->list); +} + +/* + * @pre obj != NULL + * @pre obj->data != NULL + * @pre obj->sched_ctl != NULL + * @pre obj->sched_ctl->priv != NULL + */ +void runqueue_add_head(struct thread_object *obj) +{ + struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)obj->sched_ctl->priv; + struct sched_iorr_data *data = (struct sched_iorr_data *)obj->data; + + if (!is_inqueue(obj)) { + list_add(&data->list, &iorr_ctl->runqueue); + } +} + +/* + * @pre obj != NULL + * @pre obj->data != NULL + * @pre obj->sched_ctl != NULL + * @pre obj->sched_ctl->priv != NULL + */ +void runqueue_add_tail(struct thread_object *obj) +{ + struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)obj->sched_ctl->priv; + struct sched_iorr_data *data = (struct sched_iorr_data *)obj->data; + + if (!is_inqueue(obj)) { + list_add_tail(&data->list, &iorr_ctl->runqueue); + } +} + +/* + * @pre obj != NULL + * @pre obj->data != NULL + */ +void runqueue_remove(struct thread_object *obj) +{ + struct sched_iorr_data *data = (struct sched_iorr_data *)obj->data; + list_del_init(&data->list); +} + +static void sched_tick_handler(void *param) +{ + struct sched_control *ctl = (struct sched_control *)param; + struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)ctl->priv; + struct sched_iorr_data *data; + struct thread_object *current; + uint16_t pcpu_id = get_pcpu_id(); + uint64_t now = rdtsc(); + uint64_t rflags; + + obtain_schedule_lock(pcpu_id, &rflags); + current = ctl->curr_obj; + /* If no vCPU start scheduling, ignore this tick */ + if (current != NULL ) { + if (!(is_idle_thread(current) && list_empty(&iorr_ctl->runqueue))) { + data = (struct sched_iorr_data *)current->data; + /* consume the left_cycles of current thread_object if it is not idle */ + if (!is_idle_thread(current)) { + data->left_cycles -= now - data->last_cycles; + data->last_cycles = now; + } + /* make reschedule request if current ran out of its cycles */ + if (is_idle_thread(current) || data->left_cycles <= 0) { + make_reschedule_request(pcpu_id, DEL_MODE_IPI); + } + } + } + release_schedule_lock(pcpu_id, rflags); } /*