hv: sched: move pcpu_id from acrn_vcpu to thread_object

With cpu sharing enabled, we will map acrn_vcpu to thread_object
in scheduling. From modulization perspective, we'd better hide the
pcpu_id in acrn_vcpu and move it to thread_object.

Tracked-On: #3813
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Signed-off-by: Yu Wang <yu1.wang@intel.com>
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shuo A Liu 2019-10-14 16:38:58 +08:00 committed by ACRN System Integration
parent f85106d1ed
commit 891e46453d
18 changed files with 85 additions and 53 deletions

View File

@ -387,7 +387,6 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
/* Initialize CPU ID for this VCPU */ /* Initialize CPU ID for this VCPU */
vcpu->vcpu_id = vcpu_id; vcpu->vcpu_id = vcpu_id;
vcpu->pcpu_id = pcpu_id;
per_cpu(ever_run_vcpu, pcpu_id) = vcpu; per_cpu(ever_run_vcpu, pcpu_id) = vcpu;
/* Initialize the parent VM reference */ /* Initialize the parent VM reference */
@ -403,8 +402,8 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
per_cpu(vcpu, pcpu_id) = vcpu; per_cpu(vcpu, pcpu_id) = vcpu;
pr_info("PCPU%d is working as VM%d VCPU%d, Role: %s", pr_info("Create VM%d-VCPU%d, Role: %s",
vcpu->pcpu_id, vcpu->vm->vm_id, vcpu->vcpu_id, vcpu->vm->vm_id, vcpu->vcpu_id,
is_vcpu_bsp(vcpu) ? "PRIMARY" : "SECONDARY"); is_vcpu_bsp(vcpu) ? "PRIMARY" : "SECONDARY");
/* /*
@ -585,7 +584,7 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
void offline_vcpu(struct acrn_vcpu *vcpu) void offline_vcpu(struct acrn_vcpu *vcpu)
{ {
vlapic_free(vcpu); vlapic_free(vcpu);
per_cpu(ever_run_vcpu, vcpu->pcpu_id) = NULL; per_cpu(ever_run_vcpu, pcpuid_from_vcpu(vcpu)) = NULL;
vcpu->state = VCPU_OFFLINE; vcpu->state = VCPU_OFFLINE;
} }
@ -656,49 +655,51 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state) void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
{ {
uint16_t pcpu_id = get_pcpu_id(); uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
pr_dbg("vcpu%hu paused, new state: %d", pr_dbg("vcpu%hu paused, new state: %d",
vcpu->vcpu_id, new_state); vcpu->vcpu_id, new_state);
get_schedule_lock(vcpu->pcpu_id); get_schedule_lock(pcpu_id);
vcpu->prev_state = vcpu->state; vcpu->prev_state = vcpu->state;
vcpu->state = new_state; vcpu->state = new_state;
if (vcpu->running) { if (vcpu->running) {
remove_thread_obj(&vcpu->thread_obj, vcpu->pcpu_id); remove_thread_obj(&vcpu->thread_obj, pcpu_id);
if (is_lapic_pt_enabled(vcpu)) { if (is_lapic_pt_enabled(vcpu)) {
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_INIT); make_reschedule_request(pcpu_id, DEL_MODE_INIT);
} else { } else {
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_IPI); make_reschedule_request(pcpu_id, DEL_MODE_IPI);
} }
release_schedule_lock(vcpu->pcpu_id); release_schedule_lock(pcpu_id);
if (vcpu->pcpu_id != pcpu_id) { if (pcpu_id != get_pcpu_id()) {
while (vcpu->running) { while (vcpu->running) {
asm_pause(); asm_pause();
} }
} }
} else { } else {
remove_thread_obj(&vcpu->thread_obj, vcpu->pcpu_id); remove_thread_obj(&vcpu->thread_obj, pcpu_id);
release_schedule_lock(vcpu->pcpu_id); release_schedule_lock(pcpu_id);
} }
} }
void resume_vcpu(struct acrn_vcpu *vcpu) void resume_vcpu(struct acrn_vcpu *vcpu)
{ {
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
pr_dbg("vcpu%hu resumed", vcpu->vcpu_id); pr_dbg("vcpu%hu resumed", vcpu->vcpu_id);
get_schedule_lock(vcpu->pcpu_id); get_schedule_lock(pcpu_id);
vcpu->state = vcpu->prev_state; vcpu->state = vcpu->prev_state;
if (vcpu->state == VCPU_RUNNING) { if (vcpu->state == VCPU_RUNNING) {
insert_thread_obj(&vcpu->thread_obj, vcpu->pcpu_id); insert_thread_obj(&vcpu->thread_obj, pcpu_id);
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_IPI); make_reschedule_request(pcpu_id, DEL_MODE_IPI);
} }
release_schedule_lock(vcpu->pcpu_id); release_schedule_lock(pcpu_id);
} }
static void context_switch_out(struct thread_object *prev) static void context_switch_out(struct thread_object *prev)
@ -728,13 +729,15 @@ static void context_switch_in(struct thread_object *next)
void schedule_vcpu(struct acrn_vcpu *vcpu) void schedule_vcpu(struct acrn_vcpu *vcpu)
{ {
vcpu->state = VCPU_RUNNING; uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
pr_dbg("vcpu%hu scheduled", vcpu->vcpu_id);
get_schedule_lock(vcpu->pcpu_id); vcpu->state = VCPU_RUNNING;
insert_thread_obj(&vcpu->thread_obj, vcpu->pcpu_id); pr_dbg("vcpu%hu scheduled on pcpu%hu", vcpu->vcpu_id, pcpu_id);
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_IPI);
release_schedule_lock(vcpu->pcpu_id); get_schedule_lock(pcpu_id);
insert_thread_obj(&vcpu->thread_obj, pcpu_id);
make_reschedule_request(pcpu_id, DEL_MODE_IPI);
release_schedule_lock(pcpu_id);
} }
/* help function for vcpu create */ /* help function for vcpu create */
@ -748,7 +751,9 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
if (ret == 0) { if (ret == 0) {
snprintf(thread_name, 16U, "vm%hu:vcpu%hu", vm->vm_id, vcpu->vcpu_id); snprintf(thread_name, 16U, "vm%hu:vcpu%hu", vm->vm_id, vcpu->vcpu_id);
(void)strncpy_s(vcpu->thread_obj.name, 16U, thread_name, 16U); (void)strncpy_s(vcpu->thread_obj.name, 16U, thread_name, 16U);
vcpu->thread_obj.sched_ctl = &per_cpu(sched_ctl, pcpu_id);
vcpu->thread_obj.thread_entry = vcpu_thread; vcpu->thread_obj.thread_entry = vcpu_thread;
vcpu->thread_obj.pcpu_id = pcpu_id;
vcpu->thread_obj.host_sp = build_stack_frame(vcpu); vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
vcpu->thread_obj.switch_out = context_switch_out; vcpu->thread_obj.switch_out = context_switch_out;
vcpu->thread_obj.switch_in = context_switch_in; vcpu->thread_obj.switch_in = context_switch_in;
@ -757,6 +762,14 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
return ret; return ret;
} }
/**
* @pre vcpu != NULL
*/
uint16_t pcpuid_from_vcpu(const struct acrn_vcpu *vcpu)
{
return sched_get_pcpuid(&vcpu->thread_obj);
}
uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask) uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask)
{ {
uint16_t vcpu_id; uint16_t vcpu_id;
@ -766,7 +779,7 @@ uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask)
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) { for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
if ((vdmask & (1UL << vcpu_id)) != 0UL) { if ((vdmask & (1UL << vcpu_id)) != 0UL) {
vcpu = vcpu_from_vid(vm, vcpu_id); vcpu = vcpu_from_vid(vm, vcpu_id);
bitmap_set_nolock(vcpu->pcpu_id, &dmask); bitmap_set_nolock(pcpuid_from_vcpu(vcpu), &dmask);
} }
} }

View File

@ -106,6 +106,8 @@ static bool is_guest_irq_enabled(struct acrn_vcpu *vcpu)
void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid) void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
{ {
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
bitmap_set_lock(eventid, &vcpu->arch.pending_req); bitmap_set_lock(eventid, &vcpu->arch.pending_req);
/* /*
* if current hostcpu is not the target vcpu's hostcpu, we need * if current hostcpu is not the target vcpu's hostcpu, we need
@ -116,8 +118,8 @@ void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
* scheduling, we need change here to determine it target vcpu is * scheduling, we need change here to determine it target vcpu is
* VMX non-root or root mode * VMX non-root or root mode
*/ */
if (get_pcpu_id() != vcpu->pcpu_id) { if (get_pcpu_id() != pcpu_id) {
send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU); send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU);
} }
} }

View File

@ -555,8 +555,8 @@ static void apicv_advanced_accept_intr(struct acrn_vlapic *vlapic, uint32_t vect
*/ */
bitmap_set_lock(ACRN_REQUEST_EVENT, &vlapic->vcpu->arch.pending_req); bitmap_set_lock(ACRN_REQUEST_EVENT, &vlapic->vcpu->arch.pending_req);
if (get_pcpu_id() != vlapic->vcpu->pcpu_id) { if (get_pcpu_id() != pcpuid_from_vcpu(vlapic->vcpu)) {
apicv_post_intr(vlapic->vcpu->pcpu_id); apicv_post_intr(pcpuid_from_vcpu(vlapic->vcpu));
} }
} }
} }
@ -2049,7 +2049,7 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
default: default:
/* convert the dest from virtual apic_id to physical apic_id */ /* convert the dest from virtual apic_id to physical apic_id */
if (is_x2apic_enabled(vcpu_vlapic(target_vcpu))) { if (is_x2apic_enabled(vcpu_vlapic(target_vcpu))) {
papic_id = per_cpu(lapic_id, target_vcpu->pcpu_id); papic_id = per_cpu(lapic_id, pcpuid_from_vcpu(target_vcpu));
dev_dbg(ACRN_DBG_LAPICPT, dev_dbg(ACRN_DBG_LAPICPT,
"%s vapic_id: 0x%08lx papic_id: 0x%08lx icr_low:0x%08lx", "%s vapic_id: 0x%08lx papic_id: 0x%08lx icr_low:0x%08lx",
__func__, vapic_id, papic_id, icr_low); __func__, vapic_id, papic_id, icr_low);

View File

@ -414,7 +414,7 @@ static uint64_t lapic_pt_enabled_pcpu_bitmap(struct acrn_vm *vm)
if (is_lapic_pt_configured(vm)) { if (is_lapic_pt_configured(vm)) {
foreach_vcpu(i, vm, vcpu) { foreach_vcpu(i, vm, vcpu) {
if (is_lapic_pt_enabled(vcpu)) { if (is_lapic_pt_enabled(vcpu)) {
bitmap_set_nolock(vcpu->pcpu_id, &bitmap); bitmap_set_nolock(pcpuid_from_vcpu(vcpu), &bitmap);
} }
} }
} }
@ -598,8 +598,8 @@ int32_t shutdown_vm(struct acrn_vm *vm)
reset_vcpu(vcpu); reset_vcpu(vcpu);
offline_vcpu(vcpu); offline_vcpu(vcpu);
if (bitmap_test(vcpu->pcpu_id, &mask)) { if (bitmap_test(pcpuid_from_vcpu(vcpu), &mask)) {
make_pcpu_offline(vcpu->pcpu_id); make_pcpu_offline(pcpuid_from_vcpu(vcpu));
} }
} }
@ -679,8 +679,8 @@ int32_t reset_vm(struct acrn_vm *vm)
foreach_vcpu(i, vm, vcpu) { foreach_vcpu(i, vm, vcpu) {
reset_vcpu(vcpu); reset_vcpu(vcpu);
if (bitmap_test(vcpu->pcpu_id, &mask)) { if (bitmap_test(pcpuid_from_vcpu(vcpu), &mask)) {
make_pcpu_offline(vcpu->pcpu_id); make_pcpu_offline(pcpuid_from_vcpu(vcpu));
} }
} }

View File

@ -47,8 +47,8 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
/* Either SOS or pre-launched VMs */ /* Either SOS or pre-launched VMs */
pause_vm(vm); pause_vm(vm);
per_cpu(shutdown_vm_id, vcpu->pcpu_id) = vm->vm_id; per_cpu(shutdown_vm_id, pcpuid_from_vcpu(vcpu)) = vm->vm_id;
make_shutdown_vm_request(vcpu->pcpu_id); make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
} }
} }

View File

@ -534,7 +534,7 @@ static void do_init_vmcs(struct acrn_vcpu *vcpu)
*/ */
void init_vmcs(struct acrn_vcpu *vcpu) void init_vmcs(struct acrn_vcpu *vcpu)
{ {
uint16_t pcpu_id = vcpu->pcpu_id; uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
if (pcpu_id == get_pcpu_id()) { if (pcpu_id == get_pcpu_id()) {
do_init_vmcs(vcpu); do_init_vmcs(vcpu);

View File

@ -174,7 +174,7 @@ int32_t vmexit_handler(struct acrn_vcpu *vcpu)
uint16_t basic_exit_reason; uint16_t basic_exit_reason;
int32_t ret; int32_t ret;
if (get_pcpu_id() != vcpu->pcpu_id) { if (get_pcpu_id() != pcpuid_from_vcpu(vcpu)) {
pr_fatal("vcpu is not running on its pcpu!"); pr_fatal("vcpu is not running on its pcpu!");
ret = -EINVAL; ret = -EINVAL;
} else { } else {

View File

@ -298,7 +298,7 @@ static void init_msr_area(struct acrn_vcpu *vcpu)
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX; vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX;
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].value = vcpu->vcpu_id; vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].value = vcpu->vcpu_id;
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX; vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX;
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = vcpu->pcpu_id; vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = pcpuid_from_vcpu(vcpu);
vcpu->arch.msr_area.count++; vcpu->arch.msr_area.count++;
/* only load/restore MSR IA32_PQR_ASSOC when hv and guest have differnt settings */ /* only load/restore MSR IA32_PQR_ASSOC when hv and guest have differnt settings */

View File

@ -26,7 +26,7 @@ void vcpu_thread(struct thread_object *obj)
} }
/* Don't open interrupt window between here and vmentry */ /* Don't open interrupt window between here and vmentry */
if (need_reschedule(vcpu->pcpu_id)) { if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
schedule(); schedule();
} }

View File

@ -408,7 +408,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
while (vcpu_id != INVALID_BIT_INDEX) { while (vcpu_id != INVALID_BIT_INDEX) {
bitmap_clear_nolock(vcpu_id, &vdmask); bitmap_clear_nolock(vcpu_id, &vdmask);
vcpu = vcpu_from_vid(vm, vcpu_id); vcpu = vcpu_from_vid(vm, vcpu_id);
dest |= per_cpu(lapic_ldr, vcpu->pcpu_id); dest |= per_cpu(lapic_ldr, pcpuid_from_vcpu(vcpu));
vcpu_id = ffs64(vdmask); vcpu_id = ffs64(vdmask);
} }

View File

@ -13,6 +13,14 @@
#include <schedule.h> #include <schedule.h>
#include <sprintf.h> #include <sprintf.h>
/**
* @pre obj != NULL
*/
uint16_t sched_get_pcpuid(const struct thread_object *obj)
{
return obj->pcpu_id;
}
void init_scheduler(void) void init_scheduler(void)
{ {
struct sched_control *ctl; struct sched_control *ctl;
@ -141,6 +149,7 @@ void switch_to_idle(thread_entry_t idle_thread)
snprintf(idle_name, 16U, "idle%hu", pcpu_id); snprintf(idle_name, 16U, "idle%hu", pcpu_id);
(void)strncpy_s(idle->name, 16U, idle_name, 16U); (void)strncpy_s(idle->name, 16U, idle_name, 16U);
idle->pcpu_id = pcpu_id;
idle->thread_entry = idle_thread; idle->thread_entry = idle_thread;
idle->switch_out = NULL; idle->switch_out = NULL;
idle->switch_in = NULL; idle->switch_in = NULL;

View File

@ -67,12 +67,14 @@ struct intr_excp_ctx *crash_ctx;
static void dump_guest_reg(struct acrn_vcpu *vcpu) static void dump_guest_reg(struct acrn_vcpu *vcpu)
{ {
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
pr_acrnlog("\n\n================================================"); pr_acrnlog("\n\n================================================");
pr_acrnlog("================================\n\n"); pr_acrnlog("================================\n\n");
pr_acrnlog("Guest Registers:\r\n"); pr_acrnlog("Guest Registers:\r\n");
pr_acrnlog("= VM ID %d ==== vCPU ID %hu === pCPU ID %d ====" pr_acrnlog("= VM ID %d ==== vCPU ID %hu === pCPU ID %d ===="
"world %d =============\r\n", "world %d =============\r\n",
vcpu->vm->vm_id, vcpu->vcpu_id, vcpu->pcpu_id, vcpu->vm->vm_id, vcpu->vcpu_id, pcpu_id,
vcpu->arch.cur_context); vcpu->arch.cur_context);
pr_acrnlog("= RIP=0x%016llx RSP=0x%016llx " pr_acrnlog("= RIP=0x%016llx RSP=0x%016llx "
"RFLAGS=0x%016llx\r\n", "RFLAGS=0x%016llx\r\n",

View File

@ -888,7 +888,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id
= vcpu->vcpu_id; = vcpu->vcpu_id;
vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id
= vcpu->pcpu_id; = pcpuid_from_vcpu(vcpu);
vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id = 0; vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id = 0;
vm_info_list.vm_list[vm_idx].num_vcpus++; vm_info_list.vm_list[vm_idx].num_vcpus++;
} }
@ -1369,7 +1369,7 @@ void profiling_pre_vmexit_handler(struct acrn_vcpu *vcpu)
*/ */
void profiling_post_vmexit_handler(struct acrn_vcpu *vcpu) void profiling_post_vmexit_handler(struct acrn_vcpu *vcpu)
{ {
per_cpu(profiling_info.s_state, vcpu->pcpu_id).total_vmexit_count++; per_cpu(profiling_info.s_state, pcpuid_from_vcpu(vcpu)).total_vmexit_count++;
if ((get_cpu_var(profiling_info.s_state).pmu_state == PMU_RUNNING) || if ((get_cpu_var(profiling_info.s_state).pmu_state == PMU_RUNNING) ||
(get_cpu_var(profiling_info.soc_state) == SW_RUNNING)) { (get_cpu_var(profiling_info.soc_state) == SW_RUNNING)) {

View File

@ -664,7 +664,7 @@ static int32_t shell_list_vcpu(__unused int32_t argc, __unused char **argv)
snprintf(temp_str, MAX_STR_SIZE, snprintf(temp_str, MAX_STR_SIZE,
" %-9d %-10d %-7hu %-12s %-16s\r\n", " %-9d %-10d %-7hu %-12s %-16s\r\n",
vm->vm_id, vm->vm_id,
vcpu->pcpu_id, pcpuid_from_vcpu(vcpu),
vcpu->vcpu_id, vcpu->vcpu_id,
is_vcpu_bsp(vcpu) ? is_vcpu_bsp(vcpu) ?
"PRIMARY" : "SECONDARY", "PRIMARY" : "SECONDARY",
@ -767,7 +767,7 @@ static int32_t shell_vcpu_dumpreg(int32_t argc, char **argv)
{ {
int32_t status = 0; int32_t status = 0;
uint16_t vm_id; uint16_t vm_id;
uint16_t vcpu_id; uint16_t vcpu_id, pcpu_id;
struct acrn_vm *vm; struct acrn_vm *vm;
struct acrn_vcpu *vcpu; struct acrn_vcpu *vcpu;
uint64_t mask = 0UL; uint64_t mask = 0UL;
@ -807,13 +807,14 @@ static int32_t shell_vcpu_dumpreg(int32_t argc, char **argv)
goto out; goto out;
} }
pcpu_id = pcpuid_from_vcpu(vcpu);
dump.vcpu = vcpu; dump.vcpu = vcpu;
dump.str = shell_log_buf; dump.str = shell_log_buf;
dump.str_max = SHELL_LOG_BUF_SIZE; dump.str_max = SHELL_LOG_BUF_SIZE;
if (vcpu->pcpu_id == get_pcpu_id()) { if (pcpu_id == get_pcpu_id()) {
vcpu_dumpreg(&dump); vcpu_dumpreg(&dump);
} else { } else {
bitmap_set_nolock(vcpu->pcpu_id, &mask); bitmap_set_nolock(pcpu_id, &mask);
smp_call_function(mask, vcpu_dumpreg, &dump); smp_call_function(mask, vcpu_dumpreg, &dump);
} }
shell_puts(shell_log_buf); shell_puts(shell_log_buf);

View File

@ -133,14 +133,14 @@ int32_t acrn_insert_request(struct acrn_vcpu *vcpu, const struct io_request *io_
* In this case, we cannot come back to polling status again. Currently, * In this case, we cannot come back to polling status again. Currently,
* it's OK as we needn't handle IO completion in zombie status. * it's OK as we needn't handle IO completion in zombie status.
*/ */
while (!need_reschedule(vcpu->pcpu_id)) { while (!need_reschedule(pcpuid_from_vcpu(vcpu))) {
if (has_complete_ioreq(vcpu)) { if (has_complete_ioreq(vcpu)) {
/* we have completed ioreq pending */ /* we have completed ioreq pending */
break; break;
} }
asm_pause(); asm_pause();
} }
} else if (need_reschedule(vcpu->pcpu_id)) { } else if (need_reschedule(pcpuid_from_vcpu(vcpu))) {
schedule(); schedule();
} else { } else {
ret = -EINVAL; ret = -EINVAL;

View File

@ -239,7 +239,6 @@ struct acrn_vcpu {
/* Architecture specific definitions for this VCPU */ /* Architecture specific definitions for this VCPU */
struct acrn_vcpu_arch arch; struct acrn_vcpu_arch arch;
uint16_t pcpu_id; /* Physical CPU ID of this VCPU */
uint16_t vcpu_id; /* virtual identifier for VCPU */ uint16_t vcpu_id; /* virtual identifier for VCPU */
struct acrn_vm *vm; /* Reference to the VM this VCPU belongs to */ struct acrn_vm *vm; /* Reference to the VM this VCPU belongs to */
@ -286,6 +285,7 @@ vcpu_vlapic(struct acrn_vcpu *vcpu)
return &(vcpu->arch.vlapic); return &(vcpu->arch.vlapic);
} }
uint16_t pcpuid_from_vcpu(const struct acrn_vcpu *vcpu);
void default_idle(__unused struct thread_object *obj); void default_idle(__unused struct thread_object *obj);
void vcpu_thread(struct thread_object *obj); void vcpu_thread(struct thread_object *obj);

View File

@ -183,7 +183,7 @@ static inline struct acrn_vcpu *vcpu_from_pid(struct acrn_vm *vm, uint16_t pcpu_
struct acrn_vcpu *vcpu, *target_vcpu = NULL; struct acrn_vcpu *vcpu, *target_vcpu = NULL;
foreach_vcpu(i, vm, vcpu) { foreach_vcpu(i, vm, vcpu) {
if (vcpu->pcpu_id == pcpu_id) { if (pcpuid_from_vcpu(vcpu) == pcpu_id) {
target_vcpu = vcpu; target_vcpu = vcpu;
break; break;
} }

View File

@ -18,8 +18,11 @@ typedef void (*thread_entry_t)(struct thread_object *obj);
typedef void (*switch_t)(struct thread_object *obj); typedef void (*switch_t)(struct thread_object *obj);
struct thread_object { struct thread_object {
char name[16]; char name[16];
uint64_t host_sp; uint16_t pcpu_id;
struct sched_control *sched_ctl;
thread_entry_t thread_entry; thread_entry_t thread_entry;
uint64_t host_sp;
switch_t switch_out; switch_t switch_out;
switch_t switch_in; switch_t switch_in;
}; };
@ -32,6 +35,8 @@ struct sched_control {
struct thread_object *thread_obj; struct thread_object *thread_obj;
}; };
uint16_t sched_get_pcpuid(const struct thread_object *obj);
void init_scheduler(void); void init_scheduler(void);
void switch_to_idle(thread_entry_t idle_thread); void switch_to_idle(thread_entry_t idle_thread);
void get_schedule_lock(uint16_t pcpu_id); void get_schedule_lock(uint16_t pcpu_id);