From 7a71422a6de928d5df5ac44c8d38b42fe852cb8a Mon Sep 17 00:00:00 2001 From: Yin Fegnwei Date: Sat, 2 Jun 2018 22:31:58 +0800 Subject: [PATCH] hv: handle cpu offline request in idle thread Change need_scheduled fileds of schedule context to flags because it's not only used for need_schedule check. Add two functions to request/handle cpu offline. The reason we only handle cpu offline request in idle thread is that we should pause the vcpu running on target pcpu. Then it's only possible that target pcpu get cpu offline request in idle thread. Signed-off-by: Yin Fegnwei Acked-by: Eddie Dong --- hypervisor/common/hv_main.c | 2 +- hypervisor/common/schedule.c | 29 +++++++++++++++++++++------- hypervisor/include/common/schedule.h | 10 +++++++--- 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c index b23a440fe..dc4c267f3 100644 --- a/hypervisor/common/hv_main.c +++ b/hypervisor/common/hv_main.c @@ -44,7 +44,7 @@ void vcpu_thread(struct vcpu *vcpu) continue; } - if (need_rescheduled(vcpu->pcpu_id)) { + if (need_reschedule(vcpu->pcpu_id)) { /* * In extrem case, schedule() could return. Which * means the vcpu resume happens before schedule() diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c index 18128e695..eabeef27c 100644 --- a/hypervisor/common/schedule.c +++ b/hypervisor/common/schedule.c @@ -17,7 +17,7 @@ void init_scheduler(void) spinlock_init(&per_cpu(sched_ctx, i).runqueue_lock); spinlock_init(&per_cpu(sched_ctx, i).scheduler_lock); INIT_LIST_HEAD(&per_cpu(sched_ctx, i).runqueue); - per_cpu(sched_ctx, i).need_scheduled = 0; + per_cpu(sched_ctx, i).flags= 0; per_cpu(sched_ctx, i).curr_vcpu = NULL; } } @@ -90,15 +90,15 @@ static struct vcpu *select_next_vcpu(int pcpu_id) void make_reschedule_request(struct vcpu *vcpu) { - bitmap_set(NEED_RESCHEDULED, - &per_cpu(sched_ctx, vcpu->pcpu_id).need_scheduled); + bitmap_set(NEED_RESCHEDULE, + &per_cpu(sched_ctx, vcpu->pcpu_id).flags); send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU); } -int need_rescheduled(int pcpu_id) +int need_reschedule(int pcpu_id) { - return bitmap_test_and_clear(NEED_RESCHEDULED, - &per_cpu(sched_ctx, pcpu_id).need_scheduled); + return bitmap_test_and_clear(NEED_RESCHEDULE, + &per_cpu(sched_ctx, pcpu_id).flags); } static void context_switch_out(struct vcpu *vcpu) @@ -136,13 +136,28 @@ static void context_switch_in(struct vcpu *vcpu) */ } +void make_pcpu_offline(int pcpu_id) +{ + bitmap_set(NEED_OFFLINE, + &per_cpu(sched_ctx, pcpu_id).flags); + send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU); +} + +int need_offline(int pcpu_id) +{ + return bitmap_test_and_clear(NEED_OFFLINE, + &per_cpu(sched_ctx, pcpu_id).flags); +} + void default_idle(void) { int pcpu_id = get_cpu_id(); while (1) { - if (need_rescheduled(pcpu_id)) + if (need_reschedule(pcpu_id)) schedule(); + else if (need_offline(pcpu_id)) + cpu_dead(pcpu_id); else __asm __volatile("pause" ::: "memory"); } diff --git a/hypervisor/include/common/schedule.h b/hypervisor/include/common/schedule.h index a2e415a47..d85aedc22 100644 --- a/hypervisor/include/common/schedule.h +++ b/hypervisor/include/common/schedule.h @@ -7,12 +7,13 @@ #ifndef _HV_CORE_SCHEDULE_ #define _HV_CORE_SCHEDULE_ -#define NEED_RESCHEDULED (1) +#define NEED_RESCHEDULE (1) +#define NEED_OFFLINE (2) struct sched_context { spinlock_t runqueue_lock; struct list_head runqueue; - unsigned long need_scheduled; + unsigned long flags; struct vcpu *curr_vcpu; spinlock_t scheduler_lock; }; @@ -31,7 +32,10 @@ void remove_vcpu_from_runqueue(struct vcpu *vcpu); void default_idle(void); void make_reschedule_request(struct vcpu *vcpu); -int need_rescheduled(int pcpu_id); +int need_reschedule(int pcpu_id); +void make_pcpu_offline(int pcpu_id); +int need_offline(int pcpu_id); + void schedule(void); void vcpu_thread(struct vcpu *vcpu);