mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-28 16:27:01 +00:00
hv:fix MISRA-C violations in create_vm
-- fix "more than one exit point" and "goto detected" violations -- change prepare_vm0_memmap to void type -- Add free_vm_id when create vm failed Tracked-On: #861 Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com> Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
parent
bb47184f3c
commit
81a9de6067
@ -463,7 +463,7 @@ int32_t copy_to_gva(struct acrn_vcpu *vcpu, void *h_ptr, uint64_t gva,
|
|||||||
* @pre vm != NULL
|
* @pre vm != NULL
|
||||||
* @pre is_vm0(vm) == true
|
* @pre is_vm0(vm) == true
|
||||||
*/
|
*/
|
||||||
int32_t prepare_vm0_memmap(struct acrn_vm *vm)
|
void prepare_vm0_memmap(struct acrn_vm *vm)
|
||||||
{
|
{
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED);
|
uint64_t attr_uc = (EPT_RWX | EPT_UNCACHED);
|
||||||
@ -507,5 +507,4 @@ int32_t prepare_vm0_memmap(struct acrn_vm *vm)
|
|||||||
*/
|
*/
|
||||||
hv_hpa = get_hv_image_base();
|
hv_hpa = get_hv_image_base();
|
||||||
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_HV_RAM_SIZE);
|
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_HV_RAM_SIZE);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
@ -26,9 +26,7 @@ static inline uint16_t alloc_vm_id(void)
|
|||||||
}
|
}
|
||||||
id = ffz64(vmid_bitmap);
|
id = ffz64(vmid_bitmap);
|
||||||
}
|
}
|
||||||
|
return (id < CONFIG_MAX_VM_NUM) ? id : INVALID_VM_ID;
|
||||||
id = (id >= CONFIG_MAX_VM_NUM) ? INVALID_VM_ID : id;
|
|
||||||
return id;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void free_vm_id(const struct acrn_vm *vm)
|
static inline void free_vm_id(const struct acrn_vm *vm)
|
||||||
@ -62,9 +60,10 @@ struct acrn_vm *get_vm_from_vmid(uint16_t vm_id)
|
|||||||
*/
|
*/
|
||||||
int32_t create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
|
int32_t create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
|
||||||
{
|
{
|
||||||
struct acrn_vm *vm;
|
struct acrn_vm *vm = NULL;
|
||||||
int32_t status;
|
int32_t status = 0;
|
||||||
uint16_t vm_id;
|
uint16_t vm_id;
|
||||||
|
bool need_cleanup = false;
|
||||||
|
|
||||||
#ifdef CONFIG_PARTITION_MODE
|
#ifdef CONFIG_PARTITION_MODE
|
||||||
vm_id = vm_desc->vm_id;
|
vm_id = vm_desc->vm_id;
|
||||||
@ -72,129 +71,131 @@ int32_t create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
|
|||||||
#else
|
#else
|
||||||
vm_id = alloc_vm_id();
|
vm_id = alloc_vm_id();
|
||||||
#endif
|
#endif
|
||||||
if (vm_id >= CONFIG_MAX_VM_NUM) {
|
|
||||||
pr_err("%s, vm id is invalid!\n", __func__);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Allocate memory for virtual machine */
|
if (vm_id < CONFIG_MAX_VM_NUM) {
|
||||||
vm = &vm_array[vm_id];
|
/* Allocate memory for virtual machine */
|
||||||
(void)memset((void *)vm, 0U, sizeof(struct acrn_vm));
|
vm = &vm_array[vm_id];
|
||||||
vm->vm_id = vm_id;
|
(void)memset((void *)vm, 0U, sizeof(struct acrn_vm));
|
||||||
|
vm->vm_id = vm_id;
|
||||||
#ifdef CONFIG_PARTITION_MODE
|
#ifdef CONFIG_PARTITION_MODE
|
||||||
/* Map Virtual Machine to its VM Description */
|
/* Map Virtual Machine to its VM Description */
|
||||||
vm->vm_desc = vm_desc;
|
vm->vm_desc = vm_desc;
|
||||||
#endif
|
#endif
|
||||||
vm->hw.created_vcpus = 0U;
|
vm->hw.created_vcpus = 0U;
|
||||||
vm->emul_mmio_regions = 0U;
|
vm->emul_mmio_regions = 0U;
|
||||||
vm->snoopy_mem = true;
|
vm->snoopy_mem = true;
|
||||||
|
|
||||||
/* gpa_lowtop are used for system start up */
|
/* gpa_lowtop are used for system start up */
|
||||||
vm->hw.gpa_lowtop = 0UL;
|
vm->hw.gpa_lowtop = 0UL;
|
||||||
|
|
||||||
init_ept_mem_ops(vm);
|
init_ept_mem_ops(vm);
|
||||||
vm->arch_vm.nworld_eptp = vm->arch_vm.ept_mem_ops.get_pml4_page(vm->arch_vm.ept_mem_ops.info);
|
vm->arch_vm.nworld_eptp = vm->arch_vm.ept_mem_ops.get_pml4_page(vm->arch_vm.ept_mem_ops.info);
|
||||||
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp);
|
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp);
|
||||||
|
|
||||||
|
/* Only for SOS: Configure VM software information */
|
||||||
|
/* For UOS: This VM software information is configure in DM */
|
||||||
|
if (is_vm0(vm)) {
|
||||||
|
vm->snoopy_mem = false;
|
||||||
|
rebuild_vm0_e820();
|
||||||
|
prepare_vm0_memmap(vm);
|
||||||
|
|
||||||
/* Only for SOS: Configure VM software information */
|
|
||||||
/* For UOS: This VM software information is configure in DM */
|
|
||||||
if (is_vm0(vm)) {
|
|
||||||
vm->snoopy_mem = false;
|
|
||||||
rebuild_vm0_e820();
|
|
||||||
status = prepare_vm0_memmap(vm);
|
|
||||||
if (status != 0) {
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
#ifndef CONFIG_EFI_STUB
|
#ifndef CONFIG_EFI_STUB
|
||||||
status = init_vm_boot_info(vm);
|
status = init_vm_boot_info(vm);
|
||||||
if (status != 0) {
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
init_iommu_vm0_domain(vm);
|
if (status == 0) {
|
||||||
} else {
|
init_iommu_vm0_domain(vm);
|
||||||
/* populate UOS vm fields according to vm_desc */
|
} else {
|
||||||
vm->sworld_control.flag.supported = vm_desc->sworld_supported;
|
need_cleanup = true;
|
||||||
if (vm->sworld_control.flag.supported != 0UL) {
|
}
|
||||||
struct memory_ops *ept_mem_ops = &vm->arch_vm.ept_mem_ops;
|
|
||||||
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
|
||||||
hva2hpa(ept_mem_ops->get_sworld_memory_base(ept_mem_ops->info)),
|
|
||||||
TRUSTY_EPT_REBASE_GPA, TRUSTY_RAM_SIZE, EPT_WB | EPT_RWX);
|
|
||||||
}
|
|
||||||
|
|
||||||
(void)memcpy_s(&vm->GUID[0], sizeof(vm->GUID),
|
} else {
|
||||||
&vm_desc->GUID[0], sizeof(vm_desc->GUID));
|
/* populate UOS vm fields according to vm_desc */
|
||||||
|
vm->sworld_control.flag.supported = vm_desc->sworld_supported;
|
||||||
|
if (vm->sworld_control.flag.supported != 0UL) {
|
||||||
|
struct memory_ops *ept_mem_ops = &vm->arch_vm.ept_mem_ops;
|
||||||
|
|
||||||
|
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||||
|
hva2hpa(ept_mem_ops->get_sworld_memory_base(ept_mem_ops->info)),
|
||||||
|
TRUSTY_EPT_REBASE_GPA, TRUSTY_RAM_SIZE, EPT_WB | EPT_RWX);
|
||||||
|
}
|
||||||
|
|
||||||
|
(void)memcpy_s(&vm->GUID[0], sizeof(vm->GUID),
|
||||||
|
&vm_desc->GUID[0], sizeof(vm_desc->GUID));
|
||||||
#ifdef CONFIG_PARTITION_MODE
|
#ifdef CONFIG_PARTITION_MODE
|
||||||
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
|
||||||
vm_desc->start_hpa, 0UL, vm_desc->mem_size,
|
vm_desc->start_hpa, 0UL, vm_desc->mem_size,
|
||||||
EPT_RWX|EPT_WB);
|
EPT_RWX|EPT_WB);
|
||||||
init_vm_boot_info(vm);
|
init_vm_boot_info(vm);
|
||||||
#endif
|
#endif
|
||||||
}
|
|
||||||
|
|
||||||
enable_iommu();
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vm->softirq_dev_entry_list);
|
|
||||||
spinlock_init(&vm->softirq_dev_lock);
|
|
||||||
vm->intr_inject_delay_delta = 0UL;
|
|
||||||
|
|
||||||
/* Set up IO bit-mask such that VM exit occurs on
|
|
||||||
* selected IO ranges
|
|
||||||
*/
|
|
||||||
setup_io_bitmap(vm);
|
|
||||||
|
|
||||||
vm_setup_cpu_state(vm);
|
|
||||||
|
|
||||||
if (is_vm0(vm)) {
|
|
||||||
/* Load pm S state data */
|
|
||||||
if (vm_load_pm_s_state(vm) == 0) {
|
|
||||||
register_pm1ab_handler(vm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create virtual uart */
|
if (status == 0) {
|
||||||
vuart_init(vm);
|
enable_iommu();
|
||||||
}
|
|
||||||
vpic_init(vm);
|
INIT_LIST_HEAD(&vm->softirq_dev_entry_list);
|
||||||
|
spinlock_init(&vm->softirq_dev_lock);
|
||||||
|
vm->intr_inject_delay_delta = 0UL;
|
||||||
|
|
||||||
|
/* Set up IO bit-mask such that VM exit occurs on
|
||||||
|
* selected IO ranges
|
||||||
|
*/
|
||||||
|
setup_io_bitmap(vm);
|
||||||
|
|
||||||
|
vm_setup_cpu_state(vm);
|
||||||
|
|
||||||
|
if (is_vm0(vm)) {
|
||||||
|
/* Load pm S state data */
|
||||||
|
if (vm_load_pm_s_state(vm) == 0) {
|
||||||
|
register_pm1ab_handler(vm);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Create virtual uart */
|
||||||
|
vuart_init(vm);
|
||||||
|
}
|
||||||
|
vpic_init(vm);
|
||||||
|
|
||||||
#ifdef CONFIG_PARTITION_MODE
|
#ifdef CONFIG_PARTITION_MODE
|
||||||
/* Create virtual uart */
|
/* Create virtual uart */
|
||||||
if (vm_desc->vm_vuart) {
|
if (vm_desc->vm_vuart) {
|
||||||
vuart_init(vm);
|
vuart_init(vm);
|
||||||
}
|
}
|
||||||
vrtc_init(vm);
|
vrtc_init(vm);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
vpci_init(vm);
|
vpci_init(vm);
|
||||||
|
|
||||||
/* vpic wire_mode default is INTR */
|
/* vpic wire_mode default is INTR */
|
||||||
vm->wire_mode = VPIC_WIRE_INTR;
|
vm->wire_mode = VPIC_WIRE_INTR;
|
||||||
|
|
||||||
/* Init full emulated vIOAPIC instance */
|
/* Init full emulated vIOAPIC instance */
|
||||||
vioapic_init(vm);
|
vioapic_init(vm);
|
||||||
|
|
||||||
/* Populate return VM handle */
|
/* Populate return VM handle */
|
||||||
*rtn_vm = vm;
|
*rtn_vm = vm;
|
||||||
vm->sw.io_shared_page = NULL;
|
vm->sw.io_shared_page = NULL;
|
||||||
#ifdef CONFIG_IOREQ_POLLING
|
#ifdef CONFIG_IOREQ_POLLING
|
||||||
/* Now, enable IO completion polling mode for all VMs with CONFIG_IOREQ_POLLING. */
|
/* Now, enable IO completion polling mode for all VMs with CONFIG_IOREQ_POLLING. */
|
||||||
vm->sw.is_completion_polling = true;
|
vm->sw.is_completion_polling = true;
|
||||||
#endif
|
#endif
|
||||||
|
status = set_vcpuid_entries(vm);
|
||||||
|
if (status == 0) {
|
||||||
|
vm->state = VM_CREATED;
|
||||||
|
} else {
|
||||||
|
need_cleanup = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
status = set_vcpuid_entries(vm);
|
} else {
|
||||||
if (status != 0) {
|
pr_err("%s, vm id is invalid!\n", __func__);
|
||||||
goto err;
|
status = -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
vm->state = VM_CREATED;
|
if (need_cleanup && (vm != NULL)) {
|
||||||
|
if (vm->arch_vm.nworld_eptp != NULL) {
|
||||||
return 0;
|
(void)memset(vm->arch_vm.nworld_eptp, 0U, PAGE_SIZE);
|
||||||
|
}
|
||||||
err:
|
free_vm_id(vm);
|
||||||
|
|
||||||
if (vm->arch_vm.nworld_eptp != NULL) {
|
|
||||||
(void)memset(vm->arch_vm.nworld_eptp, 0U, PAGE_SIZE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@
|
|||||||
#define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
|
#define LDTR_AR (0x0082U) /* LDT, type must be 2, refer to SDM Vol3 26.3.1.2 */
|
||||||
#define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */
|
#define TR_AR (0x008bU) /* TSS (busy), refer to SDM Vol3 26.3.1.2 */
|
||||||
|
|
||||||
int32_t prepare_vm0_memmap(struct acrn_vm *vm);
|
void prepare_vm0_memmap(struct acrn_vm *vm);
|
||||||
|
|
||||||
/* Definition for a mem map lookup */
|
/* Definition for a mem map lookup */
|
||||||
struct vm_lu_mem_map {
|
struct vm_lu_mem_map {
|
||||||
|
Loading…
Reference in New Issue
Block a user