mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-08-01 07:49:39 +00:00
hv: schedule: vCPU schedule state setting don't need to be atomic
vCPU schedule state change is under schedule lock protection. So there's no need to be atomic. Tracked-On: #1842 Signed-off-by: Li, Fei1 <fei1.li@intel.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
8af334cbb2
commit
b39526f759
@ -13,7 +13,6 @@
|
||||
#include <cpu_caps.h>
|
||||
#include <per_cpu.h>
|
||||
#include <init.h>
|
||||
#include <atomic.h>
|
||||
#include <vm.h>
|
||||
#include <vmcs.h>
|
||||
#include <mmu.h>
|
||||
@ -416,7 +415,7 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
|
||||
*rtn_vcpu_handle = vcpu;
|
||||
|
||||
vcpu->launched = false;
|
||||
vcpu->running = 0U;
|
||||
vcpu->running = false;
|
||||
vcpu->arch.nr_sipi = 0U;
|
||||
vcpu->state = VCPU_INIT;
|
||||
|
||||
@ -587,7 +586,7 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
|
||||
vcpu->state = VCPU_INIT;
|
||||
|
||||
vcpu->launched = false;
|
||||
vcpu->running = 0U;
|
||||
vcpu->running = false;
|
||||
vcpu->arch.nr_sipi = 0U;
|
||||
|
||||
vcpu->arch.exception_info.exception = VECTOR_INVALID;
|
||||
@ -619,7 +618,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
||||
vcpu->prev_state = vcpu->state;
|
||||
vcpu->state = new_state;
|
||||
|
||||
if (atomic_load32(&vcpu->running) == 1U) {
|
||||
if (vcpu->running) {
|
||||
remove_from_cpu_runqueue(&vcpu->sched_obj);
|
||||
|
||||
if (is_lapic_pt_enabled(vcpu)) {
|
||||
@ -631,7 +630,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
||||
release_schedule_lock(vcpu->pcpu_id);
|
||||
|
||||
if (vcpu->pcpu_id != pcpu_id) {
|
||||
while (atomic_load32(&vcpu->running) == 1U) {
|
||||
while (vcpu->running) {
|
||||
asm_pause();
|
||||
}
|
||||
}
|
||||
@ -659,7 +658,7 @@ static void context_switch_out(struct sched_object *prev)
|
||||
{
|
||||
struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, sched_obj);
|
||||
|
||||
atomic_store32(&vcpu->running, 0U);
|
||||
vcpu->running = false;
|
||||
/* do prev vcpu context switch out */
|
||||
/* For now, we don't need to invalid ept.
|
||||
* But if we have more than one vcpu on one pcpu,
|
||||
@ -671,7 +670,7 @@ static void context_switch_in(struct sched_object *next)
|
||||
{
|
||||
struct acrn_vcpu *vcpu = list_entry(next, struct acrn_vcpu, sched_obj);
|
||||
|
||||
atomic_store32(&vcpu->running, 1U);
|
||||
vcpu->running = true;
|
||||
/* FIXME:
|
||||
* Now, we don't need to load new vcpu VMCS because
|
||||
* we only do switch between vcpu loop and idle loop.
|
||||
|
@ -358,7 +358,7 @@ struct acrn_vcpu {
|
||||
|
||||
struct sched_object sched_obj;
|
||||
bool launched; /* Whether the vcpu is launched on target pcpu */
|
||||
uint32_t running; /* vcpu is picked up and run? */
|
||||
bool running; /* vcpu is picked up and run? */
|
||||
|
||||
struct instr_emul_ctxt inst_ctxt;
|
||||
struct io_request req; /* used by io/ept emulation */
|
||||
|
Loading…
Reference in New Issue
Block a user