diff --git a/hypervisor/arch/x86/configs/vm_config.c b/hypervisor/arch/x86/configs/vm_config.c index c4c45fd61..1f63ef449 100644 --- a/hypervisor/arch/x86/configs/vm_config.c +++ b/hypervisor/arch/x86/configs/vm_config.c @@ -101,8 +101,9 @@ static bool check_vm_clos_config(uint16_t vm_id) uint16_t i; bool ret = true; struct acrn_vm_config *vm_config = get_vm_config(vm_id); + uint16_t vcpu_num = bitmap_weight(vm_config->cpu_affinity); - for (i = 0U; i < vm_config->vcpu_num; i++) { + for (i = 0U; i < vcpu_num; i++) { if (vm_config->clos[i] >= valid_clos_num) { pr_err("vm%u: vcpu%u clos(%u) exceed the max clos(%u).", vm_id, i, vm_config->clos[i], valid_clos_num); @@ -124,24 +125,22 @@ bool sanitize_vm_config(void) struct acrn_vm_config *vm_config; /* All physical CPUs except ocuppied by Pre-launched VMs are all - * belong to SOS_VM. i.e. The cpu_affinity_bitmap of a SOS_VM is decided - * by cpu_affinity_bitmap status in PRE_LAUNCHED_VMs. + * belong to SOS_VM. i.e. The cpu_affinity of a SOS_VM is decided + * by cpu_affinity status in PRE_LAUNCHED_VMs. * We need to setup a rule, that the vm_configs[] array should follow * the order of PRE_LAUNCHED_VM first, and then SOS_VM. */ for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) { vm_config = get_vm_config(vm_id); - vm_config->vcpu_num = bitmap_weight(vm_config->cpu_affinity_bitmap); - if ((vm_config->cpu_affinity_bitmap & ~ALL_CPUS_MASK) != 0UL) { - pr_err("%s: vm%u assigns invalid PCPU (pcpu bitmap: 0x%llx)", __func__, vm_id, - vm_config->cpu_affinity_bitmap); + if ((vm_config->cpu_affinity & ~ALL_CPUS_MASK) != 0UL) { + pr_err("%s: vm%u assigns invalid PCPU (0x%llx)", __func__, vm_id, vm_config->cpu_affinity); ret = false; } switch (vm_config->load_order) { case PRE_LAUNCHED_VM: - if (vm_config->vcpu_num == 0U) { + if (vm_config->cpu_affinity == 0UL) { ret = false; /* GUEST_FLAG_RT must be set if we have GUEST_FLAG_LAPIC_PASSTHROUGH set in guest_flags */ } else if (((vm_config->guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH) != 0U) @@ -152,21 +151,20 @@ bool sanitize_vm_config(void) } else if (is_safety_vm_uuid(vm_config->uuid) && (vm_config->severity != (uint8_t)SEVERITY_SAFETY_VM)) { ret = false; } else { - pre_launch_pcpu_bitmap |= vm_config->cpu_affinity_bitmap; + pre_launch_pcpu_bitmap |= vm_config->cpu_affinity; } break; case SOS_VM: /* Deduct pcpus of PRE_LAUNCHED_VMs */ - vm_config->cpu_affinity_bitmap = ALL_CPUS_MASK ^ pre_launch_pcpu_bitmap; - vm_config->vcpu_num = bitmap_weight(vm_config->cpu_affinity_bitmap); - if ((vm_config->vcpu_num == 0U) || (vm_config->severity != (uint8_t)SEVERITY_SOS) || + vm_config->cpu_affinity = ALL_CPUS_MASK ^ pre_launch_pcpu_bitmap; + if ((vm_config->cpu_affinity == 0UL) || (vm_config->severity != (uint8_t)SEVERITY_SOS) || ((vm_config->guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH) != 0U)) { ret = false; } break; case POST_LAUNCHED_VM: - if ((vm_config->vcpu_num == 0U) || - ((vm_config->cpu_affinity_bitmap & pre_launch_pcpu_bitmap) != 0UL)) { + if ((vm_config->cpu_affinity == 0UL) || + ((vm_config->cpu_affinity & pre_launch_pcpu_bitmap) != 0UL)) { pr_err("%s: Post-launch VM has no pcpus or share pcpu with Pre-launch VM!", __func__); ret = false; } diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index 1eeeadf7c..1e61f9edc 100644 --- a/hypervisor/arch/x86/guest/vm.c +++ b/hypervisor/arch/x86/guest/vm.c @@ -214,14 +214,13 @@ struct acrn_vm *get_sos_vm(void) /** * @pre vm_config != NULL */ -static inline uint16_t get_vm_bsp_pcpu_id(const struct acrn_vm_config *vm_config) +static inline uint16_t get_configured_bsp_pcpu_id(const struct acrn_vm_config *vm_config) { - uint16_t cpu_id = INVALID_CPU_ID; - - /* The set least significant bit represents the pCPU ID for BSP */ - cpu_id = ffs64(vm_config->cpu_affinity_bitmap); - - return (cpu_id < get_pcpu_nums()) ? cpu_id : INVALID_CPU_ID; + /* + * The set least significant bit represents the pCPU ID for BSP + * vm_config->cpu_affinity has been sanitized to contain valid pCPU IDs + */ + return ffs64(vm_config->cpu_affinity); } /** @@ -398,7 +397,7 @@ static uint64_t lapic_pt_enabled_pcpu_bitmap(struct acrn_vm *vm) * @pre vm_id < CONFIG_MAX_VM_NUM && vm_config != NULL && rtn_vm != NULL * @pre vm->state == VM_POWERED_OFF */ -int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm) +int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm) { struct acrn_vm *vm = NULL; int32_t status = 0; @@ -504,12 +503,14 @@ int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_ if (status == 0) { /* We have assumptions: * 1) vcpus used by SOS has been offlined by DM before UOS re-use it. - * 2) cpu_affinity_bitmap passed sanitization is OK for vcpu creating. + * 2) pcpu_bitmap passed sanitization is OK for vcpu creating. */ - uint64_t pcpu_bitmap = vm_config->cpu_affinity_bitmap; - while (pcpu_bitmap != 0UL) { - pcpu_id = ffs64(pcpu_bitmap); - bitmap_clear_nolock(pcpu_id, &pcpu_bitmap); + vm->hw.cpu_affinity = pcpu_bitmap; + + uint64_t tmp64 = pcpu_bitmap; + while (tmp64 != 0UL) { + pcpu_id = ffs64(tmp64); + bitmap_clear_nolock(pcpu_id, &tmp64); status = prepare_vcpu(vm, pcpu_id); if (status != 0) { break; @@ -745,7 +746,8 @@ void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config) int32_t err = 0; struct acrn_vm *vm = NULL; - err = create_vm(vm_id, vm_config, &vm); + /* SOS and pre-launched VMs launch on all pCPUs defined in vm_config->cpu_affinity */ + err = create_vm(vm_id, vm_config->cpu_affinity, vm_config, &vm); if (err == 0) { if (is_prelaunched_vm(vm)) { @@ -766,7 +768,7 @@ void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config) */ void launch_vms(uint16_t pcpu_id) { - uint16_t vm_id, bsp_id; + uint16_t vm_id; struct acrn_vm_config *vm_config; for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) { @@ -776,8 +778,7 @@ void launch_vms(uint16_t pcpu_id) sos_vm_ptr = &vm_array[vm_id]; } - bsp_id = get_vm_bsp_pcpu_id(vm_config); - if (pcpu_id == bsp_id) { + if (pcpu_id == get_configured_bsp_pcpu_id(vm_config)) { prepare_vm(vm_id, vm_config); } } diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index d52cb3c12..2ad316d95 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -166,14 +166,14 @@ int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param) /* Filter out the bits should not set by DM and then assign it to guest_flags */ vm_config->guest_flags |= (cv.vm_flag & DM_OWNED_GUEST_FLAG_MASK); - /* post-launched VM is allowed to choose pCPUs from vm_config->cpu_affinity_bitmap only */ - if ((cv.cpu_affinity & ~(vm_config->cpu_affinity_bitmap)) != 0UL) { - pr_err("%s: Post-launch VM can't share PCPU with Pre-launch VM!", __func__); - } else { - /* DM could overwrite the statically configured PCPU bitmap */ - if (bitmap_weight(cv.cpu_affinity) != 0U) { - vm_config->vcpu_num = bitmap_weight(cv.cpu_affinity); - vm_config->cpu_affinity_bitmap = cv.cpu_affinity; + /* post-launched VM is allowed to choose pCPUs from vm_config->cpu_affinity only */ + if ((cv.cpu_affinity & ~(vm_config->cpu_affinity)) == 0UL) { + /* By default launch VM with all the configured pCPUs */ + uint64_t pcpu_bitmap = vm_config->cpu_affinity; + + if (cv.cpu_affinity != 0UL) { + /* overwrite the statically configured CPU affinity */ + pcpu_bitmap = cv.cpu_affinity; } /* @@ -184,17 +184,19 @@ int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param) && ((vm_config->guest_flags & GUEST_FLAG_RT) == 0UL)) { pr_err("Wrong guest flags 0x%lx\n", vm_config->guest_flags); } else { - if (create_vm(vm_id, vm_config, &target_vm) != 0) { - dev_dbg(DBG_LEVEL_HYCALL, "HCALL: Create VM failed"); - cv.vmid = ACRN_INVALID_VMID; - } else { + if (create_vm(vm_id, pcpu_bitmap, vm_config, &target_vm) == 0) { /* return a relative vm_id from SOS view */ cv.vmid = vmid_2_rel_vmid(vm->vm_id, vm_id); - cv.vcpu_num = vm_config->vcpu_num; + cv.vcpu_num = target_vm->hw.created_vcpus; + } else { + dev_dbg(DBG_LEVEL_HYCALL, "HCALL: Create VM failed"); + cv.vmid = ACRN_INVALID_VMID; } ret = copy_to_gpa(vm, &cv, param, sizeof(cv)); } + } else { + pr_err("Post-launched VM%u chooses invalid pCPUs(0x%llx).", vm_id, cv.cpu_affinity); } } } diff --git a/hypervisor/include/arch/x86/guest/vm.h b/hypervisor/include/arch/x86/guest/vm.h index fb30c3b28..deb948ae1 100644 --- a/hypervisor/include/arch/x86/guest/vm.h +++ b/hypervisor/include/arch/x86/guest/vm.h @@ -42,6 +42,7 @@ struct vm_hw_info { /* vcpu array of this VM */ struct acrn_vcpu vcpu_array[MAX_VCPUS_PER_VM]; uint16_t created_vcpus; /* Number of created vcpus */ + uint64_t cpu_affinity; /* Actual pCPUs this VM runs on. The set bits represent the pCPU IDs */ } __aligned(PAGE_SIZE); struct sw_module_info { @@ -239,7 +240,7 @@ void pause_vm(struct acrn_vm *vm); void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec); void start_vm(struct acrn_vm *vm); int32_t reset_vm(struct acrn_vm *vm); -int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm); +int32_t create_vm(uint16_t vm_id, uint64_t pcpu_bitmap, struct acrn_vm_config *vm_config, struct acrn_vm **rtn_vm); void prepare_vm(uint16_t vm_id, struct acrn_vm_config *vm_config); void launch_vms(uint16_t pcpu_id); bool is_poweroff_vm(const struct acrn_vm *vm); diff --git a/hypervisor/include/arch/x86/vm_config.h b/hypervisor/include/arch/x86/vm_config.h index d2490f645..5ae0a0544 100644 --- a/hypervisor/include/arch/x86/vm_config.h +++ b/hypervisor/include/arch/x86/vm_config.h @@ -145,9 +145,11 @@ struct acrn_vm_config { enum acrn_vm_load_order load_order; /* specify the load order of VM */ char name[MAX_VM_OS_NAME_LEN]; /* VM name identifier, useful for debug. */ const uint8_t uuid[16]; /* UUID of the VM */ - uint16_t vcpu_num; /* Number of vCPUs for the VM */ + uint8_t reserved[2]; /* Temporarily reserve it so that don't need to update + * the users of get_platform_info frequently. + */ uint8_t severity; /* severity of the VM */ - uint64_t cpu_affinity_bitmap; /* The set bits represent the pCPUs the vCPUs of + uint64_t cpu_affinity; /* The set bits represent the pCPUs the vCPUs of * the VM may run on. */ uint64_t guest_flags; /* VM flags that we want to configure for guest