mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-24 22:42:53 +00:00
Merge pull request #5054 from mingqiangchi/add_vm_vcpu_state_lock
hv:add per-vm lock for vm & vcpu state change
This commit is contained in:
commit
28c9e35771
@ -152,6 +152,7 @@ static bool pm1ab_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
|
|||||||
|
|
||||||
static inline void enter_s5(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
static inline void enter_s5(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||||
{
|
{
|
||||||
|
get_vm_lock(vm);
|
||||||
/*
|
/*
|
||||||
* It's possible that ACRN come here from SOS and pre-launched VM. Currently, we
|
* It's possible that ACRN come here from SOS and pre-launched VM. Currently, we
|
||||||
* assume SOS has full ACPI power management stack. That means the value from SOS
|
* assume SOS has full ACPI power management stack. That means the value from SOS
|
||||||
@ -162,12 +163,14 @@ static inline void enter_s5(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t
|
|||||||
}
|
}
|
||||||
pause_vm(vm);
|
pause_vm(vm);
|
||||||
(void)shutdown_vm(vm);
|
(void)shutdown_vm(vm);
|
||||||
|
put_vm_lock(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
|
||||||
{
|
{
|
||||||
uint32_t guest_wakeup_vec32;
|
uint32_t guest_wakeup_vec32;
|
||||||
|
|
||||||
|
get_vm_lock(vm);
|
||||||
/* Save the wakeup vec set by guest OS. Will return to guest
|
/* Save the wakeup vec set by guest OS. Will return to guest
|
||||||
* with this wakeup vec as entry.
|
* with this wakeup vec as entry.
|
||||||
*/
|
*/
|
||||||
@ -178,6 +181,7 @@ static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t
|
|||||||
pause_vm(vm); /* pause sos_vm before suspend system */
|
pause_vm(vm); /* pause sos_vm before suspend system */
|
||||||
host_enter_s3(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
|
host_enter_s3(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
|
||||||
resume_vm_from_s3(vm, guest_wakeup_vec32); /* jump back to vm */
|
resume_vm_from_s3(vm, guest_wakeup_vec32); /* jump back to vm */
|
||||||
|
put_vm_lock(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1101,6 +1101,7 @@ vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast
|
|||||||
static void
|
static void
|
||||||
vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode, uint32_t icr_low)
|
vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode, uint32_t icr_low)
|
||||||
{
|
{
|
||||||
|
get_vm_lock(target_vcpu->vm);
|
||||||
if (mode == APIC_DELMODE_INIT) {
|
if (mode == APIC_DELMODE_INIT) {
|
||||||
if ((icr_low & APIC_LEVEL_MASK) != APIC_LEVEL_DEASSERT) {
|
if ((icr_low & APIC_LEVEL_MASK) != APIC_LEVEL_DEASSERT) {
|
||||||
|
|
||||||
@ -1144,6 +1145,7 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode, uint32_t
|
|||||||
} else {
|
} else {
|
||||||
/* No other state currently, do nothing */
|
/* No other state currently, do nothing */
|
||||||
}
|
}
|
||||||
|
put_vm_lock(target_vcpu->vm);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -394,7 +394,8 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
|
|||||||
|
|
||||||
/* Allocate memory for virtual machine */
|
/* Allocate memory for virtual machine */
|
||||||
vm = &vm_array[vm_id];
|
vm = &vm_array[vm_id];
|
||||||
(void)memset((void *)vm, 0U, sizeof(struct acrn_vm));
|
/* the vm_state lock field need to remain unchanged in vm data structure */
|
||||||
|
(void)memset((void *)&vm->arch_vm, 0U, (sizeof(struct acrn_vm) - sizeof(spinlock_t)));
|
||||||
vm->vm_id = vm_id;
|
vm->vm_id = vm_id;
|
||||||
vm->hw.created_vcpus = 0U;
|
vm->hw.created_vcpus = 0U;
|
||||||
|
|
||||||
@ -437,7 +438,6 @@ int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *v
|
|||||||
|
|
||||||
if (status == 0) {
|
if (status == 0) {
|
||||||
prepare_epc_vm_memmap(vm);
|
prepare_epc_vm_memmap(vm);
|
||||||
|
|
||||||
spinlock_init(&vm->vlapic_mode_lock);
|
spinlock_init(&vm->vlapic_mode_lock);
|
||||||
spinlock_init(&vm->ept_lock);
|
spinlock_init(&vm->ept_lock);
|
||||||
spinlock_init(&vm->emul_mmio_lock);
|
spinlock_init(&vm->emul_mmio_lock);
|
||||||
@ -891,3 +891,18 @@ bool need_shutdown_vm(uint16_t pcpu_id)
|
|||||||
{
|
{
|
||||||
return bitmap_test_and_clear_lock(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag, pcpu_id));
|
return bitmap_test_and_clear_lock(NEED_SHUTDOWN_VM, &per_cpu(pcpu_flag, pcpu_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @pre vm != NULL
|
||||||
|
*/
|
||||||
|
void get_vm_lock(struct acrn_vm *vm)
|
||||||
|
{
|
||||||
|
spinlock_obtain(&vm->vm_state_lock);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* @pre vm != NULL
|
||||||
|
*/
|
||||||
|
void put_vm_lock(struct acrn_vm *vm)
|
||||||
|
{
|
||||||
|
spinlock_release(&vm->vm_state_lock);
|
||||||
|
}
|
||||||
|
@ -38,15 +38,19 @@ void triple_fault_shutdown_vm(struct acrn_vcpu *vcpu)
|
|||||||
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
|
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
|
||||||
struct acrn_vm *pl_vm = get_vm_from_vmid(vm_id);
|
struct acrn_vm *pl_vm = get_vm_from_vmid(vm_id);
|
||||||
|
|
||||||
|
get_vm_lock(pl_vm);
|
||||||
if (!is_poweroff_vm(pl_vm) && is_postlaunched_vm(pl_vm) && !is_rt_vm(pl_vm)) {
|
if (!is_poweroff_vm(pl_vm) && is_postlaunched_vm(pl_vm) && !is_rt_vm(pl_vm)) {
|
||||||
pause_vm(pl_vm);
|
pause_vm(pl_vm);
|
||||||
(void)shutdown_vm(pl_vm);
|
(void)shutdown_vm(pl_vm);
|
||||||
}
|
}
|
||||||
|
put_vm_lock(pl_vm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Either SOS or pre-launched VMs */
|
/* Either SOS or pre-launched VMs */
|
||||||
|
get_vm_lock(vm);
|
||||||
pause_vm(vm);
|
pause_vm(vm);
|
||||||
|
put_vm_lock(vm);
|
||||||
|
|
||||||
per_cpu(shutdown_vm_id, pcpuid_from_vcpu(vcpu)) = vm->vm_id;
|
per_cpu(shutdown_vm_id, pcpuid_from_vcpu(vcpu)) = vm->vm_id;
|
||||||
make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
|
make_shutdown_vm_request(pcpuid_from_vcpu(vcpu));
|
||||||
@ -83,6 +87,7 @@ static bool handle_common_reset_reg_write(struct acrn_vm *vm, bool reset)
|
|||||||
{
|
{
|
||||||
bool ret = true;
|
bool ret = true;
|
||||||
|
|
||||||
|
get_vm_lock(vm);
|
||||||
if (reset) {
|
if (reset) {
|
||||||
if (get_highest_severity_vm(true) == vm) {
|
if (get_highest_severity_vm(true) == vm) {
|
||||||
reset_host();
|
reset_host();
|
||||||
@ -115,6 +120,7 @@ static bool handle_common_reset_reg_write(struct acrn_vm *vm, bool reset)
|
|||||||
* equivalent to hide this port from guests.
|
* equivalent to hide this port from guests.
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
put_vm_lock(vm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -14,11 +14,6 @@
|
|||||||
#include <trace.h>
|
#include <trace.h>
|
||||||
#include <logmsg.h>
|
#include <logmsg.h>
|
||||||
|
|
||||||
static spinlock_t vmm_hypercall_lock = {
|
|
||||||
.head = 0U,
|
|
||||||
.tail = 0U,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu)
|
static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct acrn_vm *sos_vm = vcpu->vm;
|
struct acrn_vm *sos_vm = vcpu->vm;
|
||||||
@ -36,9 +31,7 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu)
|
|||||||
|
|
||||||
switch (hypcall_id) {
|
switch (hypcall_id) {
|
||||||
case HC_SOS_OFFLINE_CPU:
|
case HC_SOS_OFFLINE_CPU:
|
||||||
spinlock_obtain(&vmm_hypercall_lock);
|
|
||||||
ret = hcall_sos_offline_cpu(sos_vm, param1);
|
ret = hcall_sos_offline_cpu(sos_vm, param1);
|
||||||
spinlock_release(&vmm_hypercall_lock);
|
|
||||||
break;
|
break;
|
||||||
case HC_GET_API_VERSION:
|
case HC_GET_API_VERSION:
|
||||||
ret = hcall_get_api_version(sos_vm, param1);
|
ret = hcall_get_api_version(sos_vm, param1);
|
||||||
@ -54,44 +47,34 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case HC_CREATE_VM:
|
case HC_CREATE_VM:
|
||||||
spinlock_obtain(&vmm_hypercall_lock);
|
|
||||||
ret = hcall_create_vm(sos_vm, param1);
|
ret = hcall_create_vm(sos_vm, param1);
|
||||||
spinlock_release(&vmm_hypercall_lock);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case HC_DESTROY_VM:
|
case HC_DESTROY_VM:
|
||||||
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
||||||
if (vmid_is_valid) {
|
if (vmid_is_valid) {
|
||||||
spinlock_obtain(&vmm_hypercall_lock);
|
|
||||||
ret = hcall_destroy_vm(vm_id);
|
ret = hcall_destroy_vm(vm_id);
|
||||||
spinlock_release(&vmm_hypercall_lock);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case HC_START_VM:
|
case HC_START_VM:
|
||||||
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
||||||
if (vmid_is_valid) {
|
if (vmid_is_valid) {
|
||||||
spinlock_obtain(&vmm_hypercall_lock);
|
|
||||||
ret = hcall_start_vm(vm_id);
|
ret = hcall_start_vm(vm_id);
|
||||||
spinlock_release(&vmm_hypercall_lock);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case HC_RESET_VM:
|
case HC_RESET_VM:
|
||||||
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
||||||
if (vmid_is_valid) {
|
if (vmid_is_valid) {
|
||||||
spinlock_obtain(&vmm_hypercall_lock);
|
|
||||||
ret = hcall_reset_vm(vm_id);
|
ret = hcall_reset_vm(vm_id);
|
||||||
spinlock_release(&vmm_hypercall_lock);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case HC_PAUSE_VM:
|
case HC_PAUSE_VM:
|
||||||
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
||||||
if (vmid_is_valid) {
|
if (vmid_is_valid) {
|
||||||
spinlock_obtain(&vmm_hypercall_lock);
|
|
||||||
ret = hcall_pause_vm(vm_id);
|
ret = hcall_pause_vm(vm_id);
|
||||||
spinlock_release(&vmm_hypercall_lock);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -102,9 +85,7 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu)
|
|||||||
case HC_SET_VCPU_REGS:
|
case HC_SET_VCPU_REGS:
|
||||||
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
||||||
if (vmid_is_valid) {
|
if (vmid_is_valid) {
|
||||||
spinlock_obtain(&vmm_hypercall_lock);
|
|
||||||
ret = hcall_set_vcpu_regs(sos_vm, vm_id, param2);
|
ret = hcall_set_vcpu_regs(sos_vm, vm_id, param2);
|
||||||
spinlock_release(&vmm_hypercall_lock);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -126,9 +107,7 @@ static int32_t dispatch_sos_hypercall(const struct acrn_vcpu *vcpu)
|
|||||||
case HC_SET_IOREQ_BUFFER:
|
case HC_SET_IOREQ_BUFFER:
|
||||||
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
/* param1: relative vmid to sos, vm_id: absolute vmid */
|
||||||
if (vmid_is_valid) {
|
if (vmid_is_valid) {
|
||||||
spinlock_obtain(&vmm_hypercall_lock);
|
|
||||||
ret = hcall_set_ioreq_buffer(sos_vm, vm_id, param2);
|
ret = hcall_set_ioreq_buffer(sos_vm, vm_id, param2);
|
||||||
spinlock_release(&vmm_hypercall_lock);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -35,7 +35,9 @@ void vcpu_thread(struct thread_object *obj)
|
|||||||
ret = acrn_handle_pending_request(vcpu);
|
ret = acrn_handle_pending_request(vcpu);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pr_fatal("vcpu handling pending request fail");
|
pr_fatal("vcpu handling pending request fail");
|
||||||
|
get_vm_lock(vcpu->vm);
|
||||||
zombie_vcpu(vcpu, VCPU_ZOMBIE);
|
zombie_vcpu(vcpu, VCPU_ZOMBIE);
|
||||||
|
put_vm_lock(vcpu->vm);
|
||||||
/* Fatal error happened (triple fault). Stop the vcpu running. */
|
/* Fatal error happened (triple fault). Stop the vcpu running. */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -47,7 +49,9 @@ void vcpu_thread(struct thread_object *obj)
|
|||||||
ret = run_vcpu(vcpu);
|
ret = run_vcpu(vcpu);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
pr_fatal("vcpu resume failed");
|
pr_fatal("vcpu resume failed");
|
||||||
|
get_vm_lock(vcpu->vm);
|
||||||
zombie_vcpu(vcpu, VCPU_ZOMBIE);
|
zombie_vcpu(vcpu, VCPU_ZOMBIE);
|
||||||
|
put_vm_lock(vcpu->vm);
|
||||||
/* Fatal error happened (resume vcpu failed). Stop the vcpu running. */
|
/* Fatal error happened (resume vcpu failed). Stop the vcpu running. */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -55,6 +55,7 @@ int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid)
|
|||||||
uint16_t i;
|
uint16_t i;
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
|
|
||||||
|
get_vm_lock(vm);
|
||||||
pr_info("sos offline cpu with lapicid %ld", lapicid);
|
pr_info("sos offline cpu with lapicid %ld", lapicid);
|
||||||
|
|
||||||
foreach_vcpu(i, vm, vcpu) {
|
foreach_vcpu(i, vm, vcpu) {
|
||||||
@ -68,6 +69,7 @@ int32_t hcall_sos_offline_cpu(struct acrn_vm *vm, uint64_t lapicid)
|
|||||||
offline_vcpu(vcpu);
|
offline_vcpu(vcpu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
put_vm_lock(vm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -159,45 +161,50 @@ int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param)
|
|||||||
|
|
||||||
if (copy_from_gpa(vm, &cv, param, sizeof(cv)) == 0) {
|
if (copy_from_gpa(vm, &cv, param, sizeof(cv)) == 0) {
|
||||||
vm_id = get_vmid_by_uuid(&cv.uuid[0]);
|
vm_id = get_vmid_by_uuid(&cv.uuid[0]);
|
||||||
if ((vm_id > vm->vm_id) && (vm_id < CONFIG_MAX_VM_NUM)
|
if ((vm_id > vm->vm_id) && (vm_id < CONFIG_MAX_VM_NUM)) {
|
||||||
&& (is_poweroff_vm(get_vm_from_vmid(vm_id)))) {
|
get_vm_lock(get_vm_from_vmid(vm_id));
|
||||||
vm_config = get_vm_config(vm_id);
|
if (is_poweroff_vm(get_vm_from_vmid(vm_id))) {
|
||||||
|
|
||||||
/* Filter out the bits should not set by DM and then assign it to guest_flags */
|
vm_config = get_vm_config(vm_id);
|
||||||
vm_config->guest_flags |= (cv.vm_flag & DM_OWNED_GUEST_FLAG_MASK);
|
|
||||||
|
|
||||||
/* post-launched VM is allowed to choose pCPUs from vm_config->cpu_affinity only */
|
/* Filter out the bits should not set by DM and then assign it to guest_flags */
|
||||||
if ((cv.cpu_affinity & ~(vm_config->cpu_affinity)) == 0UL) {
|
vm_config->guest_flags |= (cv.vm_flag & DM_OWNED_GUEST_FLAG_MASK);
|
||||||
/* By default launch VM with all the configured pCPUs */
|
|
||||||
uint64_t pcpu_bitmap = vm_config->cpu_affinity;
|
|
||||||
|
|
||||||
if (cv.cpu_affinity != 0UL) {
|
/* post-launched VM is allowed to choose pCPUs from vm_config->cpu_affinity only */
|
||||||
/* overwrite the statically configured CPU affinity */
|
if ((cv.cpu_affinity & ~(vm_config->cpu_affinity)) == 0UL) {
|
||||||
pcpu_bitmap = cv.cpu_affinity;
|
/* By default launch VM with all the configured pCPUs */
|
||||||
}
|
uint64_t pcpu_bitmap = vm_config->cpu_affinity;
|
||||||
|
|
||||||
/*
|
if (cv.cpu_affinity != 0UL) {
|
||||||
* GUEST_FLAG_RT must be set if we have GUEST_FLAG_LAPIC_PASSTHROUGH
|
/* overwrite the statically configured CPU affinity */
|
||||||
* set in guest_flags
|
pcpu_bitmap = cv.cpu_affinity;
|
||||||
*/
|
|
||||||
if (((vm_config->guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH) != 0UL)
|
|
||||||
&& ((vm_config->guest_flags & GUEST_FLAG_RT) == 0UL)) {
|
|
||||||
pr_err("Wrong guest flags 0x%lx\n", vm_config->guest_flags);
|
|
||||||
} else {
|
|
||||||
if (create_vm(vm_id, pcpu_bitmap, vm_config, &target_vm) == 0) {
|
|
||||||
/* return a relative vm_id from SOS view */
|
|
||||||
cv.vmid = vmid_2_rel_vmid(vm->vm_id, vm_id);
|
|
||||||
cv.vcpu_num = target_vm->hw.created_vcpus;
|
|
||||||
} else {
|
|
||||||
dev_dbg(DBG_LEVEL_HYCALL, "HCALL: Create VM failed");
|
|
||||||
cv.vmid = ACRN_INVALID_VMID;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = copy_to_gpa(vm, &cv, param, sizeof(cv));
|
/*
|
||||||
|
* GUEST_FLAG_RT must be set if we have GUEST_FLAG_LAPIC_PASSTHROUGH
|
||||||
|
* set in guest_flags
|
||||||
|
*/
|
||||||
|
if (((vm_config->guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH) != 0UL)
|
||||||
|
&& ((vm_config->guest_flags & GUEST_FLAG_RT) == 0UL)) {
|
||||||
|
pr_err("Wrong guest flags 0x%lx\n", vm_config->guest_flags);
|
||||||
|
} else {
|
||||||
|
if (create_vm(vm_id, pcpu_bitmap, vm_config, &target_vm) == 0) {
|
||||||
|
/* return a relative vm_id from SOS view */
|
||||||
|
cv.vmid = vmid_2_rel_vmid(vm->vm_id, vm_id);
|
||||||
|
cv.vcpu_num = target_vm->hw.created_vcpus;
|
||||||
|
} else {
|
||||||
|
dev_dbg(DBG_LEVEL_HYCALL, "HCALL: Create VM failed");
|
||||||
|
cv.vmid = ACRN_INVALID_VMID;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = copy_to_gpa(vm, &cv, param, sizeof(cv));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pr_err("Post-launched VM%u chooses invalid pCPUs(0x%llx).",
|
||||||
|
vm_id, cv.cpu_affinity);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
pr_err("Post-launched VM%u chooses invalid pCPUs(0x%llx).", vm_id, cv.cpu_affinity);
|
|
||||||
}
|
}
|
||||||
|
put_vm_lock(get_vm_from_vmid(vm_id));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -219,11 +226,12 @@ int32_t hcall_destroy_vm(uint16_t vmid)
|
|||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
|
get_vm_lock(target_vm);
|
||||||
if (is_paused_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
if (is_paused_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
/* TODO: check target_vm guest_flags */
|
/* TODO: check target_vm guest_flags */
|
||||||
ret = shutdown_vm(target_vm);
|
ret = shutdown_vm(target_vm);
|
||||||
}
|
}
|
||||||
|
put_vm_lock(target_vm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -243,11 +251,13 @@ int32_t hcall_start_vm(uint16_t vmid)
|
|||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
|
get_vm_lock(target_vm);
|
||||||
if ((is_created_vm(target_vm)) && (is_postlaunched_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) {
|
if ((is_created_vm(target_vm)) && (is_postlaunched_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) {
|
||||||
/* TODO: check target_vm guest_flags */
|
/* TODO: check target_vm guest_flags */
|
||||||
start_vm(target_vm);
|
start_vm(target_vm);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
put_vm_lock(target_vm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -268,12 +278,13 @@ int32_t hcall_pause_vm(uint16_t vmid)
|
|||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
|
get_vm_lock(target_vm);
|
||||||
if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
if (!is_poweroff_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
/* TODO: check target_vm guest_flags */
|
/* TODO: check target_vm guest_flags */
|
||||||
pause_vm(target_vm);
|
pause_vm(target_vm);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
put_vm_lock(target_vm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,10 +305,12 @@ int32_t hcall_reset_vm(uint16_t vmid)
|
|||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
|
get_vm_lock(target_vm);
|
||||||
if (is_paused_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
if (is_paused_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
/* TODO: check target_vm guest_flags */
|
/* TODO: check target_vm guest_flags */
|
||||||
ret = reset_vm(target_vm);
|
ret = reset_vm(target_vm);
|
||||||
}
|
}
|
||||||
|
put_vm_lock(target_vm);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -323,6 +336,7 @@ int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
struct acrn_vcpu *vcpu;
|
struct acrn_vcpu *vcpu;
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
|
get_vm_lock(target_vm);
|
||||||
/* Only allow setup init ctx while target_vm is inactive */
|
/* Only allow setup init ctx while target_vm is inactive */
|
||||||
if ((!is_poweroff_vm(target_vm)) && (param != 0U) && (is_postlaunched_vm(target_vm)) &&
|
if ((!is_poweroff_vm(target_vm)) && (param != 0U) && (is_postlaunched_vm(target_vm)) &&
|
||||||
(target_vm->state != VM_RUNNING)) {
|
(target_vm->state != VM_RUNNING)) {
|
||||||
@ -337,6 +351,7 @@ int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
put_vm_lock(target_vm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -508,6 +523,7 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
|
|||||||
uint16_t i;
|
uint16_t i;
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
|
get_vm_lock(target_vm);
|
||||||
if (is_created_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
if (is_created_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
struct acrn_set_ioreq_buffer iobuf;
|
struct acrn_set_ioreq_buffer iobuf;
|
||||||
|
|
||||||
@ -529,6 +545,7 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
put_vm_lock(target_vm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -119,6 +119,10 @@ struct vm_arch {
|
|||||||
} __aligned(PAGE_SIZE);
|
} __aligned(PAGE_SIZE);
|
||||||
|
|
||||||
struct acrn_vm {
|
struct acrn_vm {
|
||||||
|
/* vm_state_lock MUST be the first field in VM structure it will not clear zero when creating VM
|
||||||
|
* this lock initialization depends on the clear BSS section
|
||||||
|
*/
|
||||||
|
spinlock_t vm_state_lock;/* Spin-lock used to protect vm/vcpu state transition for a VM */
|
||||||
struct vm_arch arch_vm; /* Reference to this VM's arch information */
|
struct vm_arch arch_vm; /* Reference to this VM's arch information */
|
||||||
struct vm_hw_info hw; /* Reference to this VM's HW information */
|
struct vm_hw_info hw; /* Reference to this VM's HW information */
|
||||||
struct vm_sw_info sw; /* Reference to SW associated with this VM */
|
struct vm_sw_info sw; /* Reference to SW associated with this VM */
|
||||||
@ -132,7 +136,6 @@ struct acrn_vm {
|
|||||||
struct iommu_domain *iommu; /* iommu domain of this VM */
|
struct iommu_domain *iommu; /* iommu domain of this VM */
|
||||||
spinlock_t vlapic_mode_lock; /* Spin-lock used to protect vlapic_mode modifications for a VM */
|
spinlock_t vlapic_mode_lock; /* Spin-lock used to protect vlapic_mode modifications for a VM */
|
||||||
spinlock_t ept_lock; /* Spin-lock used to protect ept add/modify/remove for a VM */
|
spinlock_t ept_lock; /* Spin-lock used to protect ept add/modify/remove for a VM */
|
||||||
|
|
||||||
spinlock_t emul_mmio_lock; /* Used to protect emulation mmio_node concurrent access for a VM */
|
spinlock_t emul_mmio_lock; /* Used to protect emulation mmio_node concurrent access for a VM */
|
||||||
uint16_t max_emul_mmio_regions; /* max index of the emulated mmio_region */
|
uint16_t max_emul_mmio_regions; /* max index of the emulated mmio_region */
|
||||||
struct mem_io_node emul_mmio[CONFIG_MAX_EMULATED_MMIO_REGIONS];
|
struct mem_io_node emul_mmio[CONFIG_MAX_EMULATED_MMIO_REGIONS];
|
||||||
@ -152,7 +155,6 @@ struct acrn_vm {
|
|||||||
uint32_t vcpuid_entry_nr, vcpuid_level, vcpuid_xlevel;
|
uint32_t vcpuid_entry_nr, vcpuid_level, vcpuid_xlevel;
|
||||||
struct vcpuid_entry vcpuid_entries[MAX_VM_VCPUID_ENTRIES];
|
struct vcpuid_entry vcpuid_entries[MAX_VM_VCPUID_ENTRIES];
|
||||||
struct acrn_vpci vpci;
|
struct acrn_vpci vpci;
|
||||||
|
|
||||||
uint8_t vrtc_offset;
|
uint8_t vrtc_offset;
|
||||||
|
|
||||||
uint64_t intr_inject_delay_delta; /* delay of intr injection */
|
uint64_t intr_inject_delay_delta; /* delay of intr injection */
|
||||||
@ -246,6 +248,15 @@ struct acrn_vm *get_highest_severity_vm(bool runtime);
|
|||||||
bool vm_hide_mtrr(const struct acrn_vm *vm);
|
bool vm_hide_mtrr(const struct acrn_vm *vm);
|
||||||
void update_vm_vlapic_state(struct acrn_vm *vm);
|
void update_vm_vlapic_state(struct acrn_vm *vm);
|
||||||
enum vm_vlapic_mode check_vm_vlapic_mode(const struct acrn_vm *vm);
|
enum vm_vlapic_mode check_vm_vlapic_mode(const struct acrn_vm *vm);
|
||||||
|
/*
|
||||||
|
* @pre vm != NULL
|
||||||
|
*/
|
||||||
|
void get_vm_lock(struct acrn_vm *vm);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @pre vm != NULL
|
||||||
|
*/
|
||||||
|
void put_vm_lock(struct acrn_vm *vm);
|
||||||
#endif /* !ASSEMBLER */
|
#endif /* !ASSEMBLER */
|
||||||
|
|
||||||
#endif /* VM_H_ */
|
#endif /* VM_H_ */
|
||||||
|
Loading…
Reference in New Issue
Block a user