mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-26 15:31:35 +00:00
hv: Rename NORMAL_VM to POST_LAUNCHED_VM
The name NORMAL_VM does not clearly reflect the attribute that these VMs are launched "later". POST_LAUNCHED_VM is closer to the fact that these VMs are launched "later" by one of the VMs launched by ACRN. Tracked-On: #3034 Signed-off-by: Sainath Grandhi <sainath.grandhi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
536bc5bd12
commit
9214c84600
@ -982,7 +982,7 @@ ACPI Emulation
|
|||||||
--------------
|
--------------
|
||||||
|
|
||||||
An alternative ACPI resource abstraction option is for the SOS (SOS_VM) to
|
An alternative ACPI resource abstraction option is for the SOS (SOS_VM) to
|
||||||
own all devices and emulate a set of virtual devices for the UOS (NORMAL_VM).
|
own all devices and emulate a set of virtual devices for the UOS (POST_LAUNCHED_VM).
|
||||||
This is the most popular ACPI resource model for virtualization,
|
This is the most popular ACPI resource model for virtualization,
|
||||||
as shown in the picture below. ACRN currently
|
as shown in the picture below. ACRN currently
|
||||||
uses device emulation plus some device passthrough for UOS.
|
uses device emulation plus some device passthrough for UOS.
|
||||||
|
@ -63,7 +63,7 @@ The ACRN hypervisor can support both Linux\* VM and Android\* VM as a
|
|||||||
User OS, with the User OS managed by the ACRN hypervisor. Developers and
|
User OS, with the User OS managed by the ACRN hypervisor. Developers and
|
||||||
OEMs can use this reference stack to run their own VMs, together with
|
OEMs can use this reference stack to run their own VMs, together with
|
||||||
IC, IVI, and RSE VMs. The Service OS runs as SOS_VM (also known as Dom0 in
|
IC, IVI, and RSE VMs. The Service OS runs as SOS_VM (also known as Dom0 in
|
||||||
other hypervisors) and the User OS runs as NORMAL_VM, (also known as DomU).
|
other hypervisors) and the User OS runs as POST_LAUNCHED_VM, (also known as DomU).
|
||||||
|
|
||||||
:numref:`ivi-block` shows an example block diagram of using the ACRN
|
:numref:`ivi-block` shows an example block diagram of using the ACRN
|
||||||
hypervisor.
|
hypervisor.
|
||||||
|
@ -8,7 +8,7 @@ config SDC
|
|||||||
bool "Software Defined Cockpit"
|
bool "Software Defined Cockpit"
|
||||||
help
|
help
|
||||||
SDC (Software Defined Cockpit) is a typical scenario that ACRN supported.
|
SDC (Software Defined Cockpit) is a typical scenario that ACRN supported.
|
||||||
SDC will have one pre-launched SOS VM and one post-launched NORMAL VM.
|
SDC will have one pre-launched SOS VM and one post-launched VM.
|
||||||
|
|
||||||
config LOGICAL_PARTITION
|
config LOGICAL_PARTITION
|
||||||
bool "Logical Partition VMs"
|
bool "Logical Partition VMs"
|
||||||
|
@ -106,8 +106,8 @@ bool sanitize_vm_config(void)
|
|||||||
vm_config->pcpu_bitmap = sos_pcpu_bitmap;
|
vm_config->pcpu_bitmap = sos_pcpu_bitmap;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case NORMAL_VM:
|
case POST_LAUNCHED_VM:
|
||||||
/* Nothing to do here for a NORMAL_VM, break directly. */
|
/* Nothing to do here for a POST_LAUNCHED_VM, break directly. */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* Nothing to do for a UNDEFINED_VM, break directly. */
|
/* Nothing to do for a UNDEFINED_VM, break directly. */
|
||||||
|
@ -374,7 +374,7 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
|
|||||||
case 0x0bU:
|
case 0x0bU:
|
||||||
/* Patching X2APIC */
|
/* Patching X2APIC */
|
||||||
if (is_lapic_pt(vcpu->vm)) {
|
if (is_lapic_pt(vcpu->vm)) {
|
||||||
/* for VM with LAPIC_PT, eg. PRE_LAUNCHED_VM or NORMAL_VM with LAPIC_PT*/
|
/* for VM with LAPIC_PT, eg. PRE_LAUNCHED_VM or POST_LAUNCHED_VM with LAPIC_PT*/
|
||||||
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
||||||
} else if (is_sos_vm(vcpu->vm)) {
|
} else if (is_sos_vm(vcpu->vm)) {
|
||||||
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
|
||||||
|
@ -66,9 +66,9 @@ bool is_sos_vm(const struct acrn_vm *vm)
|
|||||||
* @pre vm != NULL
|
* @pre vm != NULL
|
||||||
* @pre vm->vmid < CONFIG_MAX_VM_NUM
|
* @pre vm->vmid < CONFIG_MAX_VM_NUM
|
||||||
*/
|
*/
|
||||||
bool is_normal_vm(const struct acrn_vm *vm)
|
bool is_postlaunched_vm(const struct acrn_vm *vm)
|
||||||
{
|
{
|
||||||
return (get_vm_config(vm->vm_id)->type == NORMAL_VM);
|
return (get_vm_config(vm->vm_id)->type == POST_LAUNCHED_VM);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -363,7 +363,7 @@ int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_
|
|||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
/* For PRE_LAUNCHED_VM and NORMAL_VM */
|
/* For PRE_LAUNCHED_VM and POST_LAUNCHED_VM */
|
||||||
if ((vm_config->guest_flags & GUEST_FLAG_SECURE_WORLD_ENABLED) != 0U) {
|
if ((vm_config->guest_flags & GUEST_FLAG_SECURE_WORLD_ENABLED) != 0U) {
|
||||||
vm->sworld_control.flag.supported = 1U;
|
vm->sworld_control.flag.supported = 1U;
|
||||||
}
|
}
|
||||||
@ -429,7 +429,7 @@ int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_
|
|||||||
/* Populate return VM handle */
|
/* Populate return VM handle */
|
||||||
*rtn_vm = vm;
|
*rtn_vm = vm;
|
||||||
vm->sw.io_shared_page = NULL;
|
vm->sw.io_shared_page = NULL;
|
||||||
if ((vm_config->type == NORMAL_VM) && (vm_config->guest_flags & GUEST_FLAG_IO_COMPLETION_POLLING) != 0U) {
|
if ((vm_config->type == POST_LAUNCHED_VM) && (vm_config->guest_flags & GUEST_FLAG_IO_COMPLETION_POLLING) != 0U) {
|
||||||
/* enable IO completion polling mode per its guest flags in vm_config. */
|
/* enable IO completion polling mode per its guest flags in vm_config. */
|
||||||
vm->sw.is_completion_polling = true;
|
vm->sw.is_completion_polling = true;
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ int32_t hcall_destroy_vm(uint16_t vmid)
|
|||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
/* TODO: check target_vm guest_flags */
|
/* TODO: check target_vm guest_flags */
|
||||||
ret = shutdown_vm(target_vm);
|
ret = shutdown_vm(target_vm);
|
||||||
}
|
}
|
||||||
@ -230,7 +230,7 @@ int32_t hcall_start_vm(uint16_t vmid)
|
|||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if ((is_valid_vm(target_vm)) && (is_normal_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) {
|
if ((is_valid_vm(target_vm)) && (is_postlaunched_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) {
|
||||||
/* TODO: check target_vm guest_flags */
|
/* TODO: check target_vm guest_flags */
|
||||||
start_vm(target_vm);
|
start_vm(target_vm);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -255,7 +255,7 @@ int32_t hcall_pause_vm(uint16_t vmid)
|
|||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
/* TODO: check target_vm guest_flags */
|
/* TODO: check target_vm guest_flags */
|
||||||
pause_vm(target_vm);
|
pause_vm(target_vm);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -286,7 +286,7 @@ int32_t hcall_create_vcpu(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
struct acrn_create_vcpu cv;
|
struct acrn_create_vcpu cv;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm) && (param != 0U)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm) && (param != 0U)) {
|
||||||
if (copy_from_gpa(vm, &cv, param, sizeof(cv)) != 0) {
|
if (copy_from_gpa(vm, &cv, param, sizeof(cv)) != 0) {
|
||||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||||
} else {
|
} else {
|
||||||
@ -319,7 +319,7 @@ int32_t hcall_reset_vm(uint16_t vmid)
|
|||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
/* TODO: check target_vm guest_flags */
|
/* TODO: check target_vm guest_flags */
|
||||||
ret = reset_vm(target_vm);
|
ret = reset_vm(target_vm);
|
||||||
}
|
}
|
||||||
@ -348,7 +348,7 @@ int32_t hcall_set_vcpu_regs(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
/* Only allow setup init ctx while target_vm is inactive */
|
/* Only allow setup init ctx while target_vm is inactive */
|
||||||
if ((is_valid_vm(target_vm)) && (param != 0U) && (is_normal_vm(target_vm)) && (target_vm->state != VM_STARTED)) {
|
if ((is_valid_vm(target_vm)) && (param != 0U) && (is_postlaunched_vm(target_vm)) && (target_vm->state != VM_STARTED)) {
|
||||||
if (copy_from_gpa(vm, &vcpu_regs, param, sizeof(vcpu_regs)) != 0) {
|
if (copy_from_gpa(vm, &vcpu_regs, param, sizeof(vcpu_regs)) != 0) {
|
||||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||||
} else if (vcpu_regs.vcpu_id >= CONFIG_MAX_VCPUS_PER_VM) {
|
} else if (vcpu_regs.vcpu_id >= CONFIG_MAX_VCPUS_PER_VM) {
|
||||||
@ -386,7 +386,7 @@ int32_t hcall_set_irqline(const struct acrn_vm *vm, uint16_t vmid,
|
|||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
if (ops->gsi < vioapic_pincount(vm)) {
|
if (ops->gsi < vioapic_pincount(vm)) {
|
||||||
if (ops->gsi < vpic_pincount()) {
|
if (ops->gsi < vpic_pincount()) {
|
||||||
/*
|
/*
|
||||||
@ -473,7 +473,7 @@ int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
struct acrn_msi_entry msi;
|
struct acrn_msi_entry msi;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
(void)memset((void *)&msi, 0U, sizeof(msi));
|
(void)memset((void *)&msi, 0U, sizeof(msi));
|
||||||
if (copy_from_gpa(vm, &msi, param, sizeof(msi)) != 0) {
|
if (copy_from_gpa(vm, &msi, param, sizeof(msi)) != 0) {
|
||||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||||
@ -514,7 +514,7 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
|
|||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
(void)memset((void *)&iobuf, 0U, sizeof(iobuf));
|
(void)memset((void *)&iobuf, 0U, sizeof(iobuf));
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
if (copy_from_gpa(vm, &iobuf, param, sizeof(iobuf)) != 0) {
|
if (copy_from_gpa(vm, &iobuf, param, sizeof(iobuf)) != 0) {
|
||||||
pr_err("%p %s: Unable copy param to vm\n", target_vm, __func__);
|
pr_err("%p %s: Unable copy param to vm\n", target_vm, __func__);
|
||||||
} else {
|
} else {
|
||||||
@ -557,7 +557,7 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
|
|||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
/* make sure we have set req_buf */
|
/* make sure we have set req_buf */
|
||||||
if ((is_valid_vm(target_vm)) && (is_normal_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) {
|
if ((is_valid_vm(target_vm)) && (is_postlaunched_vm(target_vm)) && (target_vm->sw.io_shared_page != NULL)) {
|
||||||
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
|
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
|
||||||
vmid, vcpu_id);
|
vmid, vcpu_id);
|
||||||
|
|
||||||
@ -696,7 +696,7 @@ int32_t hcall_set_vm_memory_regions(struct acrn_vm *vm, uint64_t param)
|
|||||||
if (regions.vmid < CONFIG_MAX_VM_NUM) {
|
if (regions.vmid < CONFIG_MAX_VM_NUM) {
|
||||||
target_vm = get_vm_from_vmid(regions.vmid);
|
target_vm = get_vm_from_vmid(regions.vmid);
|
||||||
}
|
}
|
||||||
if ((target_vm != NULL) && is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if ((target_vm != NULL) && is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
idx = 0U;
|
idx = 0U;
|
||||||
while (idx < regions.mr_num) {
|
while (idx < regions.mr_num) {
|
||||||
if (copy_from_gpa(vm, &mr, regions.regions_gpa + idx * sizeof(mr), sizeof(mr)) != 0) {
|
if (copy_from_gpa(vm, &mr, regions.regions_gpa + idx * sizeof(mr), sizeof(mr)) != 0) {
|
||||||
@ -773,7 +773,7 @@ int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_
|
|||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
int32_t ret = -1;
|
int32_t ret = -1;
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
(void)memset((void *)&wp, 0U, sizeof(wp));
|
(void)memset((void *)&wp, 0U, sizeof(wp));
|
||||||
|
|
||||||
if (copy_from_gpa(vm, &wp, wp_gpa, sizeof(wp)) != 0) {
|
if (copy_from_gpa(vm, &wp, wp_gpa, sizeof(wp)) != 0) {
|
||||||
@ -846,7 +846,7 @@ int32_t hcall_assign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
bool bdf_valid = true;
|
bool bdf_valid = true;
|
||||||
bool iommu_valid = true;
|
bool iommu_valid = true;
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
if (param < 0x10000UL) {
|
if (param < 0x10000UL) {
|
||||||
bdf = (uint16_t) param;
|
bdf = (uint16_t) param;
|
||||||
} else {
|
} else {
|
||||||
@ -906,7 +906,7 @@ int32_t hcall_deassign_ptdev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
bool bdf_valid = true;
|
bool bdf_valid = true;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
if (param < 0x10000UL) {
|
if (param < 0x10000UL) {
|
||||||
bdf = (uint16_t) param;
|
bdf = (uint16_t) param;
|
||||||
} else {
|
} else {
|
||||||
@ -943,7 +943,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t pa
|
|||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
(void)memset((void *)&irq, 0U, sizeof(irq));
|
(void)memset((void *)&irq, 0U, sizeof(irq));
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) {
|
if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) {
|
||||||
pr_err("%s: Unable copy param to vm\n", __func__);
|
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||||
} else {
|
} else {
|
||||||
@ -985,7 +985,7 @@ hcall_reset_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
struct hc_ptdev_irq irq;
|
struct hc_ptdev_irq irq;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
(void)memset((void *)&irq, 0U, sizeof(irq));
|
(void)memset((void *)&irq, 0U, sizeof(irq));
|
||||||
|
|
||||||
if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) {
|
if (copy_from_gpa(vm, &irq, param, sizeof(irq)) != 0) {
|
||||||
@ -1037,7 +1037,7 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param)
|
|||||||
if (target_vm_id < CONFIG_MAX_VM_NUM) {
|
if (target_vm_id < CONFIG_MAX_VM_NUM) {
|
||||||
target_vm = get_vm_from_vmid(target_vm_id);
|
target_vm = get_vm_from_vmid(target_vm_id);
|
||||||
}
|
}
|
||||||
if ((target_vm != NULL) && (is_valid_vm(target_vm)) && (is_normal_vm(target_vm))) {
|
if ((target_vm != NULL) && (is_valid_vm(target_vm)) && (is_postlaunched_vm(target_vm))) {
|
||||||
switch (cmd & PMCMD_TYPE_MASK) {
|
switch (cmd & PMCMD_TYPE_MASK) {
|
||||||
case PMCMD_GET_PX_CNT: {
|
case PMCMD_GET_PX_CNT: {
|
||||||
|
|
||||||
@ -1151,7 +1151,7 @@ int32_t hcall_vm_intr_monitor(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||||||
uint64_t hpa;
|
uint64_t hpa;
|
||||||
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
|
||||||
|
|
||||||
if (is_valid_vm(target_vm) && is_normal_vm(target_vm)) {
|
if (is_valid_vm(target_vm) && is_postlaunched_vm(target_vm)) {
|
||||||
/* the param for this hypercall is page aligned */
|
/* the param for this hypercall is page aligned */
|
||||||
hpa = gpa2hpa(vm, param);
|
hpa = gpa2hpa(vm, param);
|
||||||
if (hpa != INVALID_HPA) {
|
if (hpa != INVALID_HPA) {
|
||||||
|
@ -545,7 +545,7 @@ emulate_io(struct acrn_vcpu *vcpu, struct io_request *io_req)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((status == -ENODEV) && (vm_config->type == NORMAL_VM)) {
|
if ((status == -ENODEV) && (vm_config->type == POST_LAUNCHED_VM)) {
|
||||||
/*
|
/*
|
||||||
* No handler from HV side, search from VHM in Dom0
|
* No handler from HV side, search from VHM in Dom0
|
||||||
*
|
*
|
||||||
|
@ -205,7 +205,7 @@ void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config);
|
|||||||
void launch_vms(uint16_t pcpu_id);
|
void launch_vms(uint16_t pcpu_id);
|
||||||
bool is_valid_vm(const struct acrn_vm *vm);
|
bool is_valid_vm(const struct acrn_vm *vm);
|
||||||
bool is_sos_vm(const struct acrn_vm *vm);
|
bool is_sos_vm(const struct acrn_vm *vm);
|
||||||
bool is_normal_vm(const struct acrn_vm *vm);
|
bool is_postlaunched_vm(const struct acrn_vm *vm);
|
||||||
bool is_prelaunched_vm(const struct acrn_vm *vm);
|
bool is_prelaunched_vm(const struct acrn_vm *vm);
|
||||||
uint16_t get_vmid_by_uuid(const uint8_t *uuid);
|
uint16_t get_vmid_by_uuid(const uint8_t *uuid);
|
||||||
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id);
|
struct acrn_vm *get_vm_from_vmid(uint16_t vm_id);
|
||||||
|
@ -20,13 +20,13 @@
|
|||||||
/*
|
/*
|
||||||
* PRE_LAUNCHED_VM is launched by ACRN hypervisor, with LAPIC_PT;
|
* PRE_LAUNCHED_VM is launched by ACRN hypervisor, with LAPIC_PT;
|
||||||
* SOS_VM is launched by ACRN hypervisor, without LAPIC_PT;
|
* SOS_VM is launched by ACRN hypervisor, without LAPIC_PT;
|
||||||
* NORMAL_VM is launched by ACRN devicemodel, with/without LAPIC_PT depends on usecases.
|
* POST_LAUNCHED_VM is launched by ACRN devicemodel, with/without LAPIC_PT depends on usecases.
|
||||||
*/
|
*/
|
||||||
enum acrn_vm_type {
|
enum acrn_vm_type {
|
||||||
UNDEFINED_VM = 0,
|
UNDEFINED_VM = 0,
|
||||||
PRE_LAUNCHED_VM,
|
PRE_LAUNCHED_VM,
|
||||||
SOS_VM,
|
SOS_VM,
|
||||||
NORMAL_VM /* Post-launched VM */
|
POST_LAUNCHED_VM /* Launched by Devicemodel in SOS_VM */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct acrn_vm_mem_config {
|
struct acrn_vm_mem_config {
|
||||||
|
@ -36,7 +36,7 @@ struct acrn_vm_config vm_configs[CONFIG_MAX_VM_NUM] = {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.type = NORMAL_VM,
|
.type = POST_LAUNCHED_VM,
|
||||||
.uuid = {0xd2U, 0x79U, 0x54U, 0x38U, 0x25U, 0xd6U, 0x11U, 0xe8U, \
|
.uuid = {0xd2U, 0x79U, 0x54U, 0x38U, 0x25U, 0xd6U, 0x11U, 0xe8U, \
|
||||||
0x86U, 0x4eU, 0xcbU, 0x7aU, 0x18U, 0xb3U, 0x46U, 0x43U},
|
0x86U, 0x4eU, 0xcbU, 0x7aU, 0x18U, 0xb3U, 0x46U, 0x43U},
|
||||||
/* d2795438-25d6-11e8-864e-cb7a18b34643 */
|
/* d2795438-25d6-11e8-864e-cb7a18b34643 */
|
||||||
|
Loading…
Reference in New Issue
Block a user