mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-02 08:19:16 +00:00
hv: schedule: remove runqueue_lock in sched_context
Now sched_object and sched_context are protected by scheduler_lock. There's no chance to use runqueue_lock to protect schedule runqueue if we have no plan to support schedule migration. Signed-off-by: Li, Fei1 <fei1.li@intel.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
This commit is contained in:
parent
b1dd3e26f5
commit
e69b3dcf67
@ -620,7 +620,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
|||||||
vcpu->state = new_state;
|
vcpu->state = new_state;
|
||||||
|
|
||||||
if (atomic_load32(&vcpu->running) == 1U) {
|
if (atomic_load32(&vcpu->running) == 1U) {
|
||||||
remove_from_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id);
|
remove_from_cpu_runqueue(&vcpu->sched_obj);
|
||||||
|
|
||||||
if (is_lapic_pt_enabled(vcpu)) {
|
if (is_lapic_pt_enabled(vcpu)) {
|
||||||
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_INIT);
|
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_INIT);
|
||||||
@ -636,7 +636,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
remove_from_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id);
|
remove_from_cpu_runqueue(&vcpu->sched_obj);
|
||||||
release_schedule_lock(vcpu->pcpu_id);
|
release_schedule_lock(vcpu->pcpu_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,6 @@ void init_scheduler(void)
|
|||||||
for (i = 0U; i < pcpu_nums; i++) {
|
for (i = 0U; i < pcpu_nums; i++) {
|
||||||
ctx = &per_cpu(sched_ctx, i);
|
ctx = &per_cpu(sched_ctx, i);
|
||||||
|
|
||||||
spinlock_init(&ctx->runqueue_lock);
|
|
||||||
spinlock_init(&ctx->scheduler_lock);
|
spinlock_init(&ctx->scheduler_lock);
|
||||||
INIT_LIST_HEAD(&ctx->runqueue);
|
INIT_LIST_HEAD(&ctx->runqueue);
|
||||||
ctx->flags = 0UL;
|
ctx->flags = 0UL;
|
||||||
@ -74,33 +73,25 @@ void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id)
|
|||||||
{
|
{
|
||||||
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
|
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
|
||||||
|
|
||||||
spinlock_obtain(&ctx->runqueue_lock);
|
|
||||||
if (list_empty(&obj->run_list)) {
|
if (list_empty(&obj->run_list)) {
|
||||||
list_add_tail(&obj->run_list, &ctx->runqueue);
|
list_add_tail(&obj->run_list, &ctx->runqueue);
|
||||||
}
|
}
|
||||||
spinlock_release(&ctx->runqueue_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id)
|
void remove_from_cpu_runqueue(struct sched_object *obj)
|
||||||
{
|
{
|
||||||
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
|
|
||||||
|
|
||||||
spinlock_obtain(&ctx->runqueue_lock);
|
|
||||||
list_del_init(&obj->run_list);
|
list_del_init(&obj->run_list);
|
||||||
spinlock_release(&ctx->runqueue_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sched_object *get_next_sched_obj(struct sched_context *ctx)
|
static struct sched_object *get_next_sched_obj(struct sched_context *ctx)
|
||||||
{
|
{
|
||||||
struct sched_object *obj = NULL;
|
struct sched_object *obj = NULL;
|
||||||
|
|
||||||
spinlock_obtain(&ctx->runqueue_lock);
|
|
||||||
if (!list_empty(&ctx->runqueue)) {
|
if (!list_empty(&ctx->runqueue)) {
|
||||||
obj = get_first_item(&ctx->runqueue, struct sched_object, run_list);
|
obj = get_first_item(&ctx->runqueue, struct sched_object, run_list);
|
||||||
} else {
|
} else {
|
||||||
obj = &get_cpu_var(idle);
|
obj = &get_cpu_var(idle);
|
||||||
}
|
}
|
||||||
spinlock_release(&ctx->runqueue_lock);
|
|
||||||
|
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
@ -28,11 +28,10 @@ struct sched_object {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct sched_context {
|
struct sched_context {
|
||||||
spinlock_t runqueue_lock;
|
|
||||||
struct list_head runqueue;
|
struct list_head runqueue;
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
struct sched_object *curr_obj;
|
struct sched_object *curr_obj;
|
||||||
spinlock_t scheduler_lock;
|
spinlock_t scheduler_lock; /* to protect sched_context and sched_object */
|
||||||
};
|
};
|
||||||
|
|
||||||
void init_scheduler(void);
|
void init_scheduler(void);
|
||||||
@ -45,7 +44,7 @@ uint16_t allocate_pcpu(void);
|
|||||||
void free_pcpu(uint16_t pcpu_id);
|
void free_pcpu(uint16_t pcpu_id);
|
||||||
|
|
||||||
void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
|
void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
|
||||||
void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
|
void remove_from_cpu_runqueue(struct sched_object *obj);
|
||||||
|
|
||||||
void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode);
|
void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode);
|
||||||
bool need_reschedule(uint16_t pcpu_id);
|
bool need_reschedule(uint16_t pcpu_id);
|
||||||
|
Loading…
Reference in New Issue
Block a user