mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-09-22 09:17:58 +00:00
hv: sched: move pcpu_id from acrn_vcpu to thread_object
With cpu sharing enabled, we will map acrn_vcpu to thread_object in scheduling. From modulization perspective, we'd better hide the pcpu_id in acrn_vcpu and move it to thread_object. Tracked-On: #3813 Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com> Signed-off-by: Yu Wang <yu1.wang@intel.com> Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
committed by
ACRN System Integration
parent
f85106d1ed
commit
891e46453d
@@ -387,7 +387,6 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
|
||||
|
||||
/* Initialize CPU ID for this VCPU */
|
||||
vcpu->vcpu_id = vcpu_id;
|
||||
vcpu->pcpu_id = pcpu_id;
|
||||
per_cpu(ever_run_vcpu, pcpu_id) = vcpu;
|
||||
|
||||
/* Initialize the parent VM reference */
|
||||
@@ -403,8 +402,8 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
|
||||
|
||||
per_cpu(vcpu, pcpu_id) = vcpu;
|
||||
|
||||
pr_info("PCPU%d is working as VM%d VCPU%d, Role: %s",
|
||||
vcpu->pcpu_id, vcpu->vm->vm_id, vcpu->vcpu_id,
|
||||
pr_info("Create VM%d-VCPU%d, Role: %s",
|
||||
vcpu->vm->vm_id, vcpu->vcpu_id,
|
||||
is_vcpu_bsp(vcpu) ? "PRIMARY" : "SECONDARY");
|
||||
|
||||
/*
|
||||
@@ -585,7 +584,7 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
|
||||
void offline_vcpu(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
vlapic_free(vcpu);
|
||||
per_cpu(ever_run_vcpu, vcpu->pcpu_id) = NULL;
|
||||
per_cpu(ever_run_vcpu, pcpuid_from_vcpu(vcpu)) = NULL;
|
||||
vcpu->state = VCPU_OFFLINE;
|
||||
}
|
||||
|
||||
@@ -656,49 +655,51 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
|
||||
|
||||
void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
||||
{
|
||||
uint16_t pcpu_id = get_pcpu_id();
|
||||
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
|
||||
|
||||
pr_dbg("vcpu%hu paused, new state: %d",
|
||||
vcpu->vcpu_id, new_state);
|
||||
|
||||
get_schedule_lock(vcpu->pcpu_id);
|
||||
get_schedule_lock(pcpu_id);
|
||||
vcpu->prev_state = vcpu->state;
|
||||
vcpu->state = new_state;
|
||||
|
||||
if (vcpu->running) {
|
||||
remove_thread_obj(&vcpu->thread_obj, vcpu->pcpu_id);
|
||||
remove_thread_obj(&vcpu->thread_obj, pcpu_id);
|
||||
|
||||
if (is_lapic_pt_enabled(vcpu)) {
|
||||
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_INIT);
|
||||
make_reschedule_request(pcpu_id, DEL_MODE_INIT);
|
||||
} else {
|
||||
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_IPI);
|
||||
make_reschedule_request(pcpu_id, DEL_MODE_IPI);
|
||||
}
|
||||
|
||||
release_schedule_lock(vcpu->pcpu_id);
|
||||
release_schedule_lock(pcpu_id);
|
||||
|
||||
if (vcpu->pcpu_id != pcpu_id) {
|
||||
if (pcpu_id != get_pcpu_id()) {
|
||||
while (vcpu->running) {
|
||||
asm_pause();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
remove_thread_obj(&vcpu->thread_obj, vcpu->pcpu_id);
|
||||
release_schedule_lock(vcpu->pcpu_id);
|
||||
remove_thread_obj(&vcpu->thread_obj, pcpu_id);
|
||||
release_schedule_lock(pcpu_id);
|
||||
}
|
||||
}
|
||||
|
||||
void resume_vcpu(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
|
||||
|
||||
pr_dbg("vcpu%hu resumed", vcpu->vcpu_id);
|
||||
|
||||
get_schedule_lock(vcpu->pcpu_id);
|
||||
get_schedule_lock(pcpu_id);
|
||||
vcpu->state = vcpu->prev_state;
|
||||
|
||||
if (vcpu->state == VCPU_RUNNING) {
|
||||
insert_thread_obj(&vcpu->thread_obj, vcpu->pcpu_id);
|
||||
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_IPI);
|
||||
insert_thread_obj(&vcpu->thread_obj, pcpu_id);
|
||||
make_reschedule_request(pcpu_id, DEL_MODE_IPI);
|
||||
}
|
||||
release_schedule_lock(vcpu->pcpu_id);
|
||||
release_schedule_lock(pcpu_id);
|
||||
}
|
||||
|
||||
static void context_switch_out(struct thread_object *prev)
|
||||
@@ -728,13 +729,15 @@ static void context_switch_in(struct thread_object *next)
|
||||
|
||||
void schedule_vcpu(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
vcpu->state = VCPU_RUNNING;
|
||||
pr_dbg("vcpu%hu scheduled", vcpu->vcpu_id);
|
||||
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
|
||||
|
||||
get_schedule_lock(vcpu->pcpu_id);
|
||||
insert_thread_obj(&vcpu->thread_obj, vcpu->pcpu_id);
|
||||
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_IPI);
|
||||
release_schedule_lock(vcpu->pcpu_id);
|
||||
vcpu->state = VCPU_RUNNING;
|
||||
pr_dbg("vcpu%hu scheduled on pcpu%hu", vcpu->vcpu_id, pcpu_id);
|
||||
|
||||
get_schedule_lock(pcpu_id);
|
||||
insert_thread_obj(&vcpu->thread_obj, pcpu_id);
|
||||
make_reschedule_request(pcpu_id, DEL_MODE_IPI);
|
||||
release_schedule_lock(pcpu_id);
|
||||
}
|
||||
|
||||
/* help function for vcpu create */
|
||||
@@ -748,7 +751,9 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
|
||||
if (ret == 0) {
|
||||
snprintf(thread_name, 16U, "vm%hu:vcpu%hu", vm->vm_id, vcpu->vcpu_id);
|
||||
(void)strncpy_s(vcpu->thread_obj.name, 16U, thread_name, 16U);
|
||||
vcpu->thread_obj.sched_ctl = &per_cpu(sched_ctl, pcpu_id);
|
||||
vcpu->thread_obj.thread_entry = vcpu_thread;
|
||||
vcpu->thread_obj.pcpu_id = pcpu_id;
|
||||
vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
|
||||
vcpu->thread_obj.switch_out = context_switch_out;
|
||||
vcpu->thread_obj.switch_in = context_switch_in;
|
||||
@@ -757,6 +762,14 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre vcpu != NULL
|
||||
*/
|
||||
uint16_t pcpuid_from_vcpu(const struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return sched_get_pcpuid(&vcpu->thread_obj);
|
||||
}
|
||||
|
||||
uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask)
|
||||
{
|
||||
uint16_t vcpu_id;
|
||||
@@ -766,7 +779,7 @@ uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask)
|
||||
for (vcpu_id = 0U; vcpu_id < vm->hw.created_vcpus; vcpu_id++) {
|
||||
if ((vdmask & (1UL << vcpu_id)) != 0UL) {
|
||||
vcpu = vcpu_from_vid(vm, vcpu_id);
|
||||
bitmap_set_nolock(vcpu->pcpu_id, &dmask);
|
||||
bitmap_set_nolock(pcpuid_from_vcpu(vcpu), &dmask);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -106,6 +106,8 @@ static bool is_guest_irq_enabled(struct acrn_vcpu *vcpu)
|
||||
|
||||
void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
|
||||
{
|
||||
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
|
||||
|
||||
bitmap_set_lock(eventid, &vcpu->arch.pending_req);
|
||||
/*
|
||||
* if current hostcpu is not the target vcpu's hostcpu, we need
|
||||
@@ -116,8 +118,8 @@ void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
|
||||
* scheduling, we need change here to determine it target vcpu is
|
||||
* VMX non-root or root mode
|
||||
*/
|
||||
if (get_pcpu_id() != vcpu->pcpu_id) {
|
||||
send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
|
||||
if (get_pcpu_id() != pcpu_id) {
|
||||
send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -555,8 +555,8 @@ static void apicv_advanced_accept_intr(struct acrn_vlapic *vlapic, uint32_t vect
|
||||
*/
|
||||
bitmap_set_lock(ACRN_REQUEST_EVENT, &vlapic->vcpu->arch.pending_req);
|
||||
|
||||
if (get_pcpu_id() != vlapic->vcpu->pcpu_id) {
|
||||
apicv_post_intr(vlapic->vcpu->pcpu_id);
|
||||
if (get_pcpu_id() != pcpuid_from_vcpu(vlapic->vcpu)) {
|
||||
apicv_post_intr(pcpuid_from_vcpu(vlapic->vcpu));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2049,7 +2049,7 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
|
||||
default:
|
||||
/* convert the dest from virtual apic_id to physical apic_id */
|
||||
if (is_x2apic_enabled(vcpu_vlapic(target_vcpu))) {
|
||||
papic_id = per_cpu(lapic_id, target_vcpu->pcpu_id);
|
||||
papic_id = per_cpu(lapic_id, pcpuid_from_vcpu(target_vcpu));
|
||||
dev_dbg(ACRN_DBG_LAPICPT,
|
||||
"%s vapic_id: 0x%08lx papic_id: 0x%08lx icr_low:0x%08lx",
|
||||
__func__, vapic_id, papic_id, icr_low);
|
||||
|
@@ -414,7 +414,7 @@ static uint64_t lapic_pt_enabled_pcpu_bitmap(struct acrn_vm *vm)
|
||||
if (is_lapic_pt_configured(vm)) {
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
if (is_lapic_pt_enabled(vcpu)) {
|
||||
bitmap_set_nolock(vcpu->pcpu_id, &bitmap);
|
||||
bitmap_set_nolock(pcpuid_from_vcpu(vcpu), &bitmap);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -598,8 +598,8 @@ int32_t shutdown_vm(struct acrn_vm *vm)
|
||||
reset_vcpu(vcpu);
|
||||
offline_vcpu(vcpu);
|
||||
|
||||
if (bitmap_test(vcpu->pcpu_id, &mask)) {
|
||||
make_pcpu_offline(vcpu->pcpu_id);
|
||||
if (bitmap_test(pcpuid_from_vcpu(vcpu), &mask)) {
|
||||
make_pcpu_offline(pcpuid_from_vcpu(vcpu));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -679,8 +679,8 @@ int32_t reset_vm(struct acrn_vm *vm)
|
||||
foreach_vcpu(i, vm, vcpu) {
|
||||
reset_vcpu(vcpu);
|
||||
|
||||
if (bitmap_test(vcpu->pcpu_id, &mask)) {
|
||||
make_pcpu_offline(vcpu->pcpu_id);
|
||||
if (bitmap_test(pcpuid_from_vcpu(vcpu), &mask)) {
|
||||
make_pcpu_offline(pcpuid_from_vcpu(vcpu));
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -47,8 +47,8 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
|
||||
/* Either SOS or pre-launched VMs */
|
||||
pause_vm(vm);
|
||||
|
||||
per_cpu(shutdown_vm_id, vcpu->pcpu_id) = vm->vm_id;
|
||||
make_shutdown_vm_request(vcpu->pcpu_id);
|
||||
per_cpu(shutdown_vm_id, pcpuid_from_vcpu(vcpu)) = vm->vm_id;
|
||||
make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -534,7 +534,7 @@ static void do_init_vmcs(struct acrn_vcpu *vcpu)
|
||||
*/
|
||||
void init_vmcs(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
uint16_t pcpu_id = vcpu->pcpu_id;
|
||||
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
|
||||
|
||||
if (pcpu_id == get_pcpu_id()) {
|
||||
do_init_vmcs(vcpu);
|
||||
|
@@ -174,7 +174,7 @@ int32_t vmexit_handler(struct acrn_vcpu *vcpu)
|
||||
uint16_t basic_exit_reason;
|
||||
int32_t ret;
|
||||
|
||||
if (get_pcpu_id() != vcpu->pcpu_id) {
|
||||
if (get_pcpu_id() != pcpuid_from_vcpu(vcpu)) {
|
||||
pr_fatal("vcpu is not running on its pcpu!");
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
|
@@ -298,7 +298,7 @@ static void init_msr_area(struct acrn_vcpu *vcpu)
|
||||
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX;
|
||||
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].value = vcpu->vcpu_id;
|
||||
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX;
|
||||
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = vcpu->pcpu_id;
|
||||
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = pcpuid_from_vcpu(vcpu);
|
||||
vcpu->arch.msr_area.count++;
|
||||
|
||||
/* only load/restore MSR IA32_PQR_ASSOC when hv and guest have differnt settings */
|
||||
|
Reference in New Issue
Block a user