mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-07-16 08:26:41 +00:00
hv: sched: decouple scheduler from schedule framework
This patch decouple some scheduling logic and abstract into a scheduler. Then we have scheduler, schedule framework. From modulization perspective, schedule framework provides some APIs for other layers to use, also interact with scheduler through scheduler interaces. Tracked-On: #3813 Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com> Signed-off-by: Yu Wang <yu1.wang@intel.com> Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
cad195c018
commit
f04c491259
@ -216,6 +216,7 @@ HW_C_SRCS += arch/x86/cat.c
|
||||
HW_C_SRCS += arch/x86/sgx.c
|
||||
HW_C_SRCS += common/softirq.c
|
||||
HW_C_SRCS += common/schedule.c
|
||||
HW_C_SRCS += common/sched_noop.c
|
||||
HW_C_SRCS += hw/pci.c
|
||||
HW_C_SRCS += arch/x86/configs/vm_config.c
|
||||
HW_C_SRCS += arch/x86/configs/$(CONFIG_BOARD)/board.c
|
||||
|
@ -409,6 +409,7 @@ void cpu_dead(void)
|
||||
int32_t halt = 1;
|
||||
uint16_t pcpu_id = get_pcpu_id();
|
||||
|
||||
deinit_sched(pcpu_id);
|
||||
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
|
||||
/* clean up native stuff */
|
||||
vmx_off();
|
||||
|
@ -729,17 +729,14 @@ static void context_switch_in(struct thread_object *next)
|
||||
vcpu->running = true;
|
||||
}
|
||||
|
||||
void schedule_vcpu(struct acrn_vcpu *vcpu)
|
||||
void launch_vcpu(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
|
||||
|
||||
vcpu->state = VCPU_RUNNING;
|
||||
pr_dbg("vcpu%hu scheduled on pcpu%hu", vcpu->vcpu_id, pcpu_id);
|
||||
|
||||
get_schedule_lock(pcpu_id);
|
||||
insert_thread_obj(&vcpu->thread_obj, pcpu_id);
|
||||
make_reschedule_request(pcpu_id, DEL_MODE_IPI);
|
||||
release_schedule_lock(pcpu_id);
|
||||
wake_thread(&vcpu->thread_obj);
|
||||
}
|
||||
|
||||
/* help function for vcpu create */
|
||||
@ -761,6 +758,7 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
|
||||
vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
|
||||
vcpu->thread_obj.switch_out = context_switch_out;
|
||||
vcpu->thread_obj.switch_in = context_switch_in;
|
||||
init_thread_data(&vcpu->thread_obj);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1172,7 +1172,7 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode, uint32_t
|
||||
set_vcpu_startup_entry(target_vcpu, (icr_low & APIC_VECTOR_MASK) << 12U);
|
||||
/* init vmcs after set_vcpu_startup_entry */
|
||||
init_vmcs(target_vcpu);
|
||||
schedule_vcpu(target_vcpu);
|
||||
launch_vcpu(target_vcpu);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -642,7 +642,7 @@ void start_vm(struct acrn_vm *vm)
|
||||
/* Only start BSP (vid = 0) and let BSP start other APs */
|
||||
bsp = vcpu_from_vid(vm, BOOT_CPU_ID);
|
||||
init_vmcs(bsp);
|
||||
schedule_vcpu(bsp);
|
||||
launch_vcpu(bsp);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -766,7 +766,7 @@ void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec)
|
||||
set_vcpu_startup_entry(bsp, wakeup_vec);
|
||||
|
||||
init_vmcs(bsp);
|
||||
schedule_vcpu(bsp);
|
||||
launch_vcpu(bsp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
55
hypervisor/common/sched_noop.c
Normal file
55
hypervisor/common/sched_noop.c
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (C) 2019 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <per_cpu.h>
|
||||
#include <schedule.h>
|
||||
|
||||
static int32_t sched_noop_init(struct sched_control *ctl)
|
||||
{
|
||||
struct sched_noop_control *noop_ctl = &per_cpu(sched_noop_ctl, ctl->pcpu_id);
|
||||
ctl->priv = noop_ctl;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct thread_object *sched_noop_pick_next(struct sched_control *ctl)
|
||||
{
|
||||
struct sched_noop_control *noop_ctl = (struct sched_noop_control *)ctl->priv;
|
||||
struct thread_object *next = NULL;
|
||||
|
||||
if (noop_ctl->noop_thread_obj != NULL) {
|
||||
next = noop_ctl->noop_thread_obj;
|
||||
} else {
|
||||
next = &get_cpu_var(idle);
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
static void sched_noop_sleep(struct thread_object *obj)
|
||||
{
|
||||
struct sched_noop_control *noop_ctl = (struct sched_noop_control *)obj->sched_ctl->priv;
|
||||
|
||||
if (noop_ctl->noop_thread_obj == obj) {
|
||||
noop_ctl->noop_thread_obj = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void sched_noop_wake(struct thread_object *obj)
|
||||
{
|
||||
struct sched_noop_control *noop_ctl = (struct sched_noop_control *)obj->sched_ctl->priv;
|
||||
|
||||
if (noop_ctl->noop_thread_obj == NULL) {
|
||||
noop_ctl->noop_thread_obj = obj;
|
||||
}
|
||||
}
|
||||
|
||||
struct acrn_scheduler sched_noop = {
|
||||
.name = "sched_noop",
|
||||
.init = sched_noop_init,
|
||||
.pick_next = sched_noop_pick_next,
|
||||
.sleep = sched_noop_sleep,
|
||||
.wake = sched_noop_wake,
|
||||
};
|
@ -39,6 +39,24 @@ static inline void set_thread_status(struct thread_object *obj, enum thread_obje
|
||||
obj->status = status;
|
||||
}
|
||||
|
||||
void get_schedule_lock(uint16_t pcpu_id)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
spinlock_obtain(&ctl->scheduler_lock);
|
||||
}
|
||||
|
||||
void release_schedule_lock(uint16_t pcpu_id)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
spinlock_release(&ctl->scheduler_lock);
|
||||
}
|
||||
|
||||
static struct acrn_scheduler *get_scheduler(uint16_t pcpu_id)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
return ctl->scheduler;
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre obj != NULL
|
||||
*/
|
||||
@ -55,37 +73,40 @@ void init_sched(uint16_t pcpu_id)
|
||||
ctl->flags = 0UL;
|
||||
ctl->curr_obj = NULL;
|
||||
ctl->pcpu_id = pcpu_id;
|
||||
ctl->scheduler = &sched_noop;
|
||||
if (ctl->scheduler->init != NULL) {
|
||||
ctl->scheduler->init(ctl);
|
||||
}
|
||||
}
|
||||
|
||||
void get_schedule_lock(uint16_t pcpu_id)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
spinlock_obtain(&ctl->scheduler_lock);
|
||||
}
|
||||
|
||||
void release_schedule_lock(uint16_t pcpu_id)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
spinlock_release(&ctl->scheduler_lock);
|
||||
}
|
||||
|
||||
void insert_thread_obj(struct thread_object *obj, uint16_t pcpu_id)
|
||||
void deinit_sched(uint16_t pcpu_id)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
|
||||
ctl->thread_obj = obj;
|
||||
if (ctl->scheduler->deinit != NULL) {
|
||||
ctl->scheduler->deinit(ctl);
|
||||
}
|
||||
}
|
||||
|
||||
void remove_thread_obj(__unused struct thread_object *obj, uint16_t pcpu_id)
|
||||
void init_thread_data(struct thread_object *obj)
|
||||
{
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
|
||||
ctl->thread_obj = NULL;
|
||||
struct acrn_scheduler *scheduler = get_scheduler(obj->pcpu_id);
|
||||
get_schedule_lock(obj->pcpu_id);
|
||||
if (scheduler->init_data != NULL) {
|
||||
scheduler->init_data(obj);
|
||||
}
|
||||
/* initial as BLOCKED status, so we can wake it up to run */
|
||||
set_thread_status(obj, THREAD_STS_BLOCKED);
|
||||
release_schedule_lock(obj->pcpu_id);
|
||||
}
|
||||
|
||||
static struct thread_object *get_next_sched_obj(const struct sched_control *ctl)
|
||||
void deinit_thread_data(struct thread_object *obj)
|
||||
{
|
||||
return ctl->thread_obj == NULL ? &get_cpu_var(idle) : ctl->thread_obj;
|
||||
struct acrn_scheduler *scheduler = get_scheduler(obj->pcpu_id);
|
||||
|
||||
if (scheduler->deinit_data != NULL) {
|
||||
scheduler->deinit_data(obj);
|
||||
}
|
||||
}
|
||||
|
||||
struct thread_object *sched_get_current(uint16_t pcpu_id)
|
||||
@ -142,11 +163,13 @@ void schedule(void)
|
||||
{
|
||||
uint16_t pcpu_id = get_pcpu_id();
|
||||
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
struct thread_object *next = NULL;
|
||||
struct thread_object *next = &per_cpu(idle, pcpu_id);
|
||||
struct thread_object *prev = ctl->curr_obj;
|
||||
|
||||
get_schedule_lock(pcpu_id);
|
||||
next = get_next_sched_obj(ctl);
|
||||
if (ctl->scheduler->pick_next != NULL) {
|
||||
next = ctl->scheduler->pick_next(ctl);
|
||||
}
|
||||
bitmap_clear_lock(NEED_RESCHEDULE, &ctl->flags);
|
||||
|
||||
/* Don't change prev object's status if it's not running */
|
||||
@ -168,9 +191,12 @@ void schedule(void)
|
||||
void sleep_thread(struct thread_object *obj)
|
||||
{
|
||||
uint16_t pcpu_id = obj->pcpu_id;
|
||||
struct acrn_scheduler *scheduler = get_scheduler(pcpu_id);
|
||||
|
||||
get_schedule_lock(pcpu_id);
|
||||
remove_thread_obj(obj, pcpu_id);
|
||||
if (scheduler->sleep != NULL) {
|
||||
scheduler->sleep(obj);
|
||||
}
|
||||
if (is_running(obj)) {
|
||||
if (obj->notify_mode == SCHED_NOTIFY_INIT) {
|
||||
make_reschedule_request(pcpu_id, DEL_MODE_INIT);
|
||||
@ -185,10 +211,14 @@ void sleep_thread(struct thread_object *obj)
|
||||
void wake_thread(struct thread_object *obj)
|
||||
{
|
||||
uint16_t pcpu_id = obj->pcpu_id;
|
||||
struct acrn_scheduler *scheduler;
|
||||
|
||||
get_schedule_lock(pcpu_id);
|
||||
if (is_blocked(obj)) {
|
||||
insert_thread_obj(obj, pcpu_id);
|
||||
scheduler = get_scheduler(pcpu_id);
|
||||
if (scheduler->wake != NULL) {
|
||||
scheduler->wake(obj);
|
||||
}
|
||||
set_thread_status(obj, THREAD_STS_RUNNABLE);
|
||||
make_reschedule_request(pcpu_id, DEL_MODE_IPI);
|
||||
}
|
||||
@ -197,6 +227,7 @@ void wake_thread(struct thread_object *obj)
|
||||
|
||||
void run_thread(struct thread_object *obj)
|
||||
{
|
||||
init_thread_data(obj);
|
||||
get_schedule_lock(obj->pcpu_id);
|
||||
get_cpu_var(sched_ctl).curr_obj = obj;
|
||||
set_thread_status(obj, THREAD_STS_RUNNING);
|
||||
|
@ -634,7 +634,7 @@ void resume_vcpu(struct acrn_vcpu *vcpu);
|
||||
*
|
||||
* @return None
|
||||
*/
|
||||
void schedule_vcpu(struct acrn_vcpu *vcpu);
|
||||
void launch_vcpu(struct acrn_vcpu *vcpu);
|
||||
|
||||
/**
|
||||
* @brief create a vcpu for the vm and mapped to the pcpu.
|
||||
|
@ -37,6 +37,7 @@ struct per_cpu_region {
|
||||
#endif
|
||||
struct per_cpu_timers cpu_timers;
|
||||
struct sched_control sched_ctl;
|
||||
struct sched_noop_control sched_noop_ctl;
|
||||
struct thread_object idle;
|
||||
struct host_gdt gdt;
|
||||
struct tss_64 tss;
|
||||
|
@ -13,6 +13,8 @@
|
||||
#define DEL_MODE_INIT (1U)
|
||||
#define DEL_MODE_IPI (2U)
|
||||
|
||||
#define THREAD_DATA_SIZE (256U)
|
||||
|
||||
enum thread_object_state {
|
||||
THREAD_STS_RUNNING = 1,
|
||||
THREAD_STS_RUNNABLE,
|
||||
@ -38,6 +40,8 @@ struct thread_object {
|
||||
uint64_t host_sp;
|
||||
switch_t switch_out;
|
||||
switch_t switch_in;
|
||||
|
||||
uint8_t data[THREAD_DATA_SIZE];
|
||||
};
|
||||
|
||||
struct sched_control {
|
||||
@ -45,8 +49,37 @@ struct sched_control {
|
||||
uint64_t flags;
|
||||
struct thread_object *curr_obj;
|
||||
spinlock_t scheduler_lock; /* to protect sched_control and thread_object */
|
||||
struct acrn_scheduler *scheduler;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct thread_object *thread_obj;
|
||||
#define SCHEDULER_MAX_NUMBER 4U
|
||||
struct acrn_scheduler {
|
||||
char name[16];
|
||||
|
||||
/* init scheduler */
|
||||
int32_t (*init)(struct sched_control *ctl);
|
||||
/* init private data of scheduler */
|
||||
void (*init_data)(struct thread_object *obj);
|
||||
/* pick the next thread object */
|
||||
struct thread_object* (*pick_next)(struct sched_control *ctl);
|
||||
/* put thread object into sleep */
|
||||
void (*sleep)(struct thread_object *obj);
|
||||
/* wake up thread object from sleep status */
|
||||
void (*wake)(struct thread_object *obj);
|
||||
/* yield current thread object */
|
||||
void (*yield)(struct sched_control *ctl);
|
||||
/* prioritize the thread object */
|
||||
void (*prioritize)(struct thread_object *obj);
|
||||
/* deinit private data of scheduler */
|
||||
void (*deinit_data)(struct thread_object *obj);
|
||||
/* deinit scheduler */
|
||||
void (*deinit)(struct sched_control *ctl);
|
||||
};
|
||||
extern struct acrn_scheduler sched_noop;
|
||||
|
||||
struct sched_noop_control {
|
||||
struct thread_object *noop_thread_obj;
|
||||
};
|
||||
|
||||
bool is_idle_thread(const struct thread_object *obj);
|
||||
@ -54,11 +87,12 @@ uint16_t sched_get_pcpuid(const struct thread_object *obj);
|
||||
struct thread_object *sched_get_current(uint16_t pcpu_id);
|
||||
|
||||
void init_sched(uint16_t pcpu_id);
|
||||
void deinit_sched(uint16_t pcpu_id);
|
||||
void get_schedule_lock(uint16_t pcpu_id);
|
||||
void release_schedule_lock(uint16_t pcpu_id);
|
||||
|
||||
void insert_thread_obj(struct thread_object *obj, uint16_t pcpu_id);
|
||||
void remove_thread_obj(struct thread_object *obj, uint16_t pcpu_id);
|
||||
void init_thread_data(struct thread_object *obj);
|
||||
void deinit_thread_data(struct thread_object *obj);
|
||||
|
||||
void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode);
|
||||
bool need_reschedule(uint16_t pcpu_id);
|
||||
|
Loading…
Reference in New Issue
Block a user