hv: use kick-mode in per-cpu to control kick pCPU

INIT signal has been used to kick off the partitioned pCPU, like RTVM,
whose LAPIC is pass-through. notification IPI is used to kick off
sharing pCPU.

Add mode_to_kick_pcpu in per-cpu to control the way of kicking
pCPU.

Tracked-On: #8207
Signed-off-by: Minggui Cao <minggui.cao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Minggui Cao 2022-08-18 15:50:10 +08:00 committed by acrnsi-robot
parent 39cdf6f7a6
commit 2c140addaf
11 changed files with 32 additions and 49 deletions

View File

@ -415,7 +415,7 @@ void make_pcpu_offline(uint16_t pcpu_id)
{ {
bitmap_set_lock(NEED_OFFLINE, &per_cpu(pcpu_flag, pcpu_id)); bitmap_set_lock(NEED_OFFLINE, &per_cpu(pcpu_flag, pcpu_id));
if (get_pcpu_id() != pcpu_id) { if (get_pcpu_id() != pcpu_id) {
send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR); kick_pcpu(pcpu_id);
} }
} }

View File

@ -255,9 +255,6 @@ static void vcpu_reset_internal(struct acrn_vcpu *vcpu, enum reset_mode mode)
sizeof(struct run_context)); sizeof(struct run_context));
} }
/* TODO: we may need to add one scheduler->reset_data to reset the thread_obj */
vcpu->thread_obj.notify_mode = SCHED_NOTIFY_IPI;
vlapic = vcpu_vlapic(vcpu); vlapic = vcpu_vlapic(vcpu);
vlapic_reset(vlapic, apicv_ops, mode); vlapic_reset(vlapic, apicv_ops, mode);
@ -529,6 +526,12 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
vcpu->vcpu_id = vcpu_id; vcpu->vcpu_id = vcpu_id;
per_cpu(ever_run_vcpu, pcpu_id) = vcpu; per_cpu(ever_run_vcpu, pcpu_id) = vcpu;
if (is_lapic_pt_configured(vm)) {
per_cpu(mode_to_kick_pcpu, pcpu_id) = DEL_MODE_INIT;
} else {
per_cpu(mode_to_kick_pcpu, pcpu_id) = DEL_MODE_IPI;
}
/* Initialize the parent VM reference */ /* Initialize the parent VM reference */
vcpu->vm = vm; vcpu->vm = vm;
@ -790,14 +793,8 @@ void kick_vcpu(struct acrn_vcpu *vcpu)
{ {
uint16_t pcpu_id = pcpuid_from_vcpu(vcpu); uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
if ((get_pcpu_id() != pcpu_id) && if ((get_pcpu_id() != pcpu_id) && (per_cpu(vmcs_run, pcpu_id) == vcpu->arch.vmcs)) {
(per_cpu(vmcs_run, pcpu_id) == vcpu->arch.vmcs)) { kick_pcpu(pcpu_id);
if (is_lapic_pt_enabled(vcpu)) {
/* For lapic-pt vCPUs */
send_single_init(pcpu_id);
} else {
send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
}
} }
} }
@ -970,7 +967,6 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
vcpu->thread_obj.sched_ctl = &per_cpu(sched_ctl, pcpu_id); vcpu->thread_obj.sched_ctl = &per_cpu(sched_ctl, pcpu_id);
vcpu->thread_obj.thread_entry = vcpu_thread; vcpu->thread_obj.thread_entry = vcpu_thread;
vcpu->thread_obj.pcpu_id = pcpu_id; vcpu->thread_obj.pcpu_id = pcpu_id;
/* vcpu->thread_obj.notify_mode is initialized in vcpu_reset_internal() when create vcpu */
vcpu->thread_obj.host_sp = build_stack_frame(vcpu); vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
vcpu->thread_obj.switch_out = context_switch_out; vcpu->thread_obj.switch_out = context_switch_out;
vcpu->thread_obj.switch_in = context_switch_in; vcpu->thread_obj.switch_in = context_switch_in;

View File

@ -1245,7 +1245,7 @@ void make_shutdown_vm_request(uint16_t pcpu_id)
{ {
bitmap_set_lock(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag, pcpu_id)); bitmap_set_lock(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag, pcpu_id));
if (get_pcpu_id() != pcpu_id) { if (get_pcpu_id() != pcpu_id) {
send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR); kick_pcpu(pcpu_id);
} }
} }

View File

@ -631,12 +631,6 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
update_msr_bitmap_x2apic_passthru(vcpu); update_msr_bitmap_x2apic_passthru(vcpu);
/*
* After passthroughing lapic to guest, we should use INIT signal to
* notify vcpu thread instead of IPI. Because the IPI will be delivered
* the guest directly without vmexit.
*/
vcpu->thread_obj.notify_mode = SCHED_NOTIFY_INIT;
} else { } else {
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2); value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2);
value32 &= ~VMX_PROCBASED_CTLS2_VAPIC; value32 &= ~VMX_PROCBASED_CTLS2_VAPIC;

View File

@ -12,6 +12,7 @@
#include <asm/cpu_caps.h> #include <asm/cpu_caps.h>
#include <asm/lapic.h> #include <asm/lapic.h>
#include <asm/apicreg.h> #include <asm/apicreg.h>
#include <asm/irq.h>
#include <delay.h> #include <delay.h>
/* intr_lapic_icr_delivery_mode */ /* intr_lapic_icr_delivery_mode */
@ -294,3 +295,12 @@ void send_single_init(uint16_t pcpu_id)
msr_write(MSR_IA32_EXT_APIC_ICR, icr.value); msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
} }
void kick_pcpu(uint16_t pcpu_id)
{
if (per_cpu(mode_to_kick_pcpu, pcpu_id) == DEL_MODE_INIT) {
send_single_init(pcpu_id);
} else {
send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
}
}

View File

@ -139,11 +139,11 @@ static void sched_tick_handler(void *param)
if (!is_idle_thread(current)) { if (!is_idle_thread(current)) {
data->run_countdown -= 1U; data->run_countdown -= 1U;
if (data->run_countdown == 0U) { if (data->run_countdown == 0U) {
make_reschedule_request(pcpu_id, DEL_MODE_IPI); make_reschedule_request(pcpu_id);
} }
} else { } else {
if (!list_empty(&bvt_ctl->runqueue)) { if (!list_empty(&bvt_ctl->runqueue)) {
make_reschedule_request(pcpu_id, DEL_MODE_IPI); make_reschedule_request(pcpu_id);
} }
} }
} }

View File

@ -94,7 +94,7 @@ static void sched_tick_handler(void *param)
} }
/* make reschedule request if current ran out of its cycles */ /* make reschedule request if current ran out of its cycles */
if (is_idle_thread(current) || data->left_cycles <= 0) { if (is_idle_thread(current) || data->left_cycles <= 0) {
make_reschedule_request(pcpu_id, DEL_MODE_IPI); make_reschedule_request(pcpu_id);
} }
} }
} }

View File

@ -127,23 +127,13 @@ struct thread_object *sched_get_current(uint16_t pcpu_id)
/** /**
* @pre delmode == DEL_MODE_IPI || delmode == DEL_MODE_INIT * @pre delmode == DEL_MODE_IPI || delmode == DEL_MODE_INIT
*/ */
void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode) void make_reschedule_request(uint16_t pcpu_id)
{ {
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id); struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
bitmap_set_lock(NEED_RESCHEDULE, &ctl->flags); bitmap_set_lock(NEED_RESCHEDULE, &ctl->flags);
if (get_pcpu_id() != pcpu_id) { if (get_pcpu_id() != pcpu_id) {
switch (delmode) { kick_pcpu(pcpu_id);
case DEL_MODE_IPI:
send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
break;
case DEL_MODE_INIT:
send_single_init(pcpu_id);
break;
default:
ASSERT(false, "Unknown delivery mode %u for pCPU%u", delmode, pcpu_id);
break;
}
} }
} }
@ -202,11 +192,7 @@ void sleep_thread(struct thread_object *obj)
scheduler->sleep(obj); scheduler->sleep(obj);
} }
if (is_running(obj)) { if (is_running(obj)) {
if (obj->notify_mode == SCHED_NOTIFY_INIT) { make_reschedule_request(pcpu_id);
make_reschedule_request(pcpu_id, DEL_MODE_INIT);
} else {
make_reschedule_request(pcpu_id, DEL_MODE_IPI);
}
obj->be_blocking = true; obj->be_blocking = true;
} else { } else {
set_thread_status(obj, THREAD_STS_BLOCKED); set_thread_status(obj, THREAD_STS_BLOCKED);
@ -236,7 +222,7 @@ void wake_thread(struct thread_object *obj)
} }
if (is_blocked(obj)) { if (is_blocked(obj)) {
set_thread_status(obj, THREAD_STS_RUNNABLE); set_thread_status(obj, THREAD_STS_RUNNABLE);
make_reschedule_request(pcpu_id, DEL_MODE_IPI); make_reschedule_request(pcpu_id);
} }
obj->be_blocking = false; obj->be_blocking = false;
} }
@ -245,7 +231,7 @@ void wake_thread(struct thread_object *obj)
void yield_current(void) void yield_current(void)
{ {
make_reschedule_request(get_pcpu_id(), DEL_MODE_IPI); make_reschedule_request(get_pcpu_id());
} }
void run_thread(struct thread_object *obj) void run_thread(struct thread_object *obj)

View File

@ -126,4 +126,6 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t vector);
*/ */
void send_single_init(uint16_t pcpu_id); void send_single_init(uint16_t pcpu_id);
void kick_pcpu(uint16_t pcpu_id);
#endif /* ARCH_X86_LAPIC_H */ #endif /* ARCH_X86_LAPIC_H */

View File

@ -54,6 +54,7 @@ struct per_cpu_region {
uint32_t lapic_id; uint32_t lapic_id;
uint32_t lapic_ldr; uint32_t lapic_ldr;
uint32_t softirq_servicing; uint32_t softirq_servicing;
uint32_t mode_to_kick_pcpu;
struct smp_call_info_data smp_call_info; struct smp_call_info_data smp_call_info;
struct list_head softirq_dev_entry_list; struct list_head softirq_dev_entry_list;
#ifdef PROFILING_ON #ifdef PROFILING_ON

View File

@ -23,11 +23,6 @@ enum thread_object_state {
THREAD_STS_BLOCKED THREAD_STS_BLOCKED
}; };
enum sched_notify_mode {
SCHED_NOTIFY_INIT,
SCHED_NOTIFY_IPI
};
/* Tools can configure a VM to use PRIO_LOW or PRIO_HIGH */ /* Tools can configure a VM to use PRIO_LOW or PRIO_HIGH */
enum thread_priority { enum thread_priority {
PRIO_IDLE = 0, PRIO_IDLE = 0,
@ -46,7 +41,6 @@ struct thread_object {
thread_entry_t thread_entry; thread_entry_t thread_entry;
volatile enum thread_object_state status; volatile enum thread_object_state status;
bool be_blocking; bool be_blocking;
enum sched_notify_mode notify_mode;
uint64_t host_sp; uint64_t host_sp;
switch_t switch_out; switch_t switch_out;
@ -126,7 +120,7 @@ void release_schedule_lock(uint16_t pcpu_id, uint64_t rflag);
void init_thread_data(struct thread_object *obj); void init_thread_data(struct thread_object *obj);
void deinit_thread_data(struct thread_object *obj); void deinit_thread_data(struct thread_object *obj);
void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode); void make_reschedule_request(uint16_t pcpu_id);
bool need_reschedule(uint16_t pcpu_id); bool need_reschedule(uint16_t pcpu_id);
void run_thread(struct thread_object *obj); void run_thread(struct thread_object *obj);