diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index fb0bd6b94..51f527179 100644 --- a/hypervisor/arch/x86/cpu.c +++ b/hypervisor/arch/x86/cpu.c @@ -14,7 +14,7 @@ #include struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(PAGE_SIZE); -uint16_t phys_cpu_num = 0U; +static uint16_t phys_cpu_num = 0U; static uint64_t pcpu_sync = 0UL; static uint16_t up_count = 0U; static uint64_t startup_paddr = 0UL; @@ -68,6 +68,15 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state) per_cpu(boot_state, pcpu_id) = state; } +uint16_t get_pcpu_nums(void) +{ + return phys_cpu_num; +} + +bool is_pcpu_active(uint16_t pcpu_id) +{ + return bitmap_test(pcpu_id, &pcpu_active_bitmap); +} void init_cpu_pre(uint16_t pcpu_id_args) { uint16_t pcpu_id = pcpu_id_args; @@ -219,7 +228,7 @@ static void start_cpu(uint16_t pcpu_id) * configured time-out has expired */ timeout = (uint32_t)CONFIG_CPU_UP_TIMEOUT * 1000U; - while ((bitmap_test(pcpu_id, &pcpu_active_bitmap) == false) && (timeout != 0U)) { + while (!is_pcpu_active(pcpu_id) && (timeout != 0U)) { /* Delay 10us */ udelay(10U); @@ -228,7 +237,7 @@ static void start_cpu(uint16_t pcpu_id) } /* Check to see if expected CPU is actually up */ - if (bitmap_test(pcpu_id, &pcpu_active_bitmap) == false) { + if (!is_pcpu_active(pcpu_id)) { /* Print error */ pr_fatal("Secondary CPUs failed to come up"); diff --git a/hypervisor/arch/x86/cpu_caps.c b/hypervisor/arch/x86/cpu_caps.c index f25576704..d36c4edc4 100644 --- a/hypervisor/arch/x86/cpu_caps.c +++ b/hypervisor/arch/x86/cpu_caps.c @@ -405,8 +405,8 @@ int32_t detect_hardware_support(void) } else if (is_vmx_disabled()) { pr_fatal("%s, VMX can not be enabled\n", __func__); ret = -ENODEV; - } else if (phys_cpu_num > CONFIG_MAX_PCPU_NUM) { - pr_fatal("%s, pcpu number(%d) is out of range\n", __func__, phys_cpu_num); + } else if (get_pcpu_nums() > CONFIG_MAX_PCPU_NUM) { + pr_fatal("%s, pcpu number(%d) is out of range\n", __func__, get_pcpu_nums()); ret = -ENODEV; } else { ret = check_vmx_mmu_cap(); diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index 8b9ce666d..dbcd46f22 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -125,7 +125,7 @@ static uint16_t vm_apicid2vcpu_id(struct acrn_vm *vm, uint8_t lapicid) pr_err("%s: bad lapicid %hhu", __func__, lapicid); - return phys_cpu_num; + return get_pcpu_nums(); } static uint64_t @@ -1713,13 +1713,11 @@ vlapic_reset(struct acrn_vlapic *vlapic) /** * @pre vlapic->vm != NULL + * @pre vlapic->vcpu->vcpu_id < CONFIG_MAX_VCPUS_PER_VM */ void vlapic_init(struct acrn_vlapic *vlapic) { - ASSERT(vlapic->vcpu->vcpu_id < phys_cpu_num, - "%s: vcpu_id is not initialized", __func__); - vlapic_init_timer(vlapic); vlapic_reset(vlapic); diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index e42af5c82..14b936382 100644 --- a/hypervisor/arch/x86/guest/vm.c +++ b/hypervisor/arch/x86/guest/vm.c @@ -398,7 +398,7 @@ static int32_t prepare_vm0(void) struct vm_description vm0_desc; (void)memset((void *)&vm0_desc, 0U, sizeof(vm0_desc)); - vm0_desc.vm_hw_num_cores = phys_cpu_num; + vm0_desc.vm_hw_num_cores = get_pcpu_nums(); err = create_vm(&vm0_desc, &vm); diff --git a/hypervisor/arch/x86/lapic.c b/hypervisor/arch/x86/lapic.c index 77fae8bc6..4f082f45a 100644 --- a/hypervisor/arch/x86/lapic.c +++ b/hypervisor/arch/x86/lapic.c @@ -256,7 +256,7 @@ void send_dest_ipi_mask(uint32_t dest_mask, uint32_t vector) while (pcpu_id != INVALID_BIT_INDEX) { bitmap32_clear_nolock(pcpu_id, &mask); - if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) { + if (is_pcpu_active(pcpu_id)) { icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id); msr_write(MSR_IA32_EXT_APIC_ICR, icr.value); } else { @@ -270,7 +270,7 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t vector) { union apic_icr icr; - if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) { + if (is_pcpu_active(pcpu_id)) { /* Set the destination field to the target processor. */ icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id); diff --git a/hypervisor/arch/x86/notify.c b/hypervisor/arch/x86/notify.c index a951867e1..80eb1e386 100644 --- a/hypervisor/arch/x86/notify.c +++ b/hypervisor/arch/x86/notify.c @@ -39,7 +39,7 @@ void smp_call_function(uint64_t mask, smp_call_func_t func, void *data) pcpu_id = ffs64(mask); while (pcpu_id != INVALID_BIT_INDEX) { bitmap_clear_nolock(pcpu_id, &mask); - if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) { + if (is_pcpu_active(pcpu_id)) { smp_call = &per_cpu(smp_call_info, pcpu_id); smp_call->func = func; smp_call->data = data; diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c index 8ca797c85..eac7a7b4c 100644 --- a/hypervisor/common/schedule.c +++ b/hypervisor/common/schedule.c @@ -14,8 +14,9 @@ void init_scheduler(void) { struct sched_context *ctx; uint32_t i; + uint16_t pcpu_nums = get_pcpu_nums(); - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { ctx = &per_cpu(sched_ctx, i); spinlock_init(&ctx->runqueue_lock); @@ -42,8 +43,9 @@ uint16_t allocate_pcpu(void) { uint16_t i; uint16_t ret = INVALID_CPU_ID; + uint16_t pcpu_nums = get_pcpu_nums(); - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { if (bitmap_test_and_set_lock(i, &pcpu_used_bitmap) == 0) { ret = i; break; diff --git a/hypervisor/debug/hypercall.c b/hypervisor/debug/hypercall.c index fbc055615..7acb69a49 100644 --- a/hypervisor/debug/hypercall.c +++ b/hypervisor/debug/hypercall.c @@ -141,7 +141,7 @@ static int32_t hcall_get_hw_info(struct acrn_vm *vm, uint64_t param) (void)memset((void *)&hw_info, 0U, sizeof(hw_info)); - hw_info.cpu_num = phys_cpu_num; + hw_info.cpu_num = get_pcpu_nums(); ret = copy_to_gpa(vm, &hw_info, param, sizeof(hw_info)); if (ret != 0) { pr_err("%s: Unable to copy param to vm", __func__); diff --git a/hypervisor/debug/npk_log.c b/hypervisor/debug/npk_log.c index 3a05b1f26..a2c7ccea7 100644 --- a/hypervisor/debug/npk_log.c +++ b/hypervisor/debug/npk_log.c @@ -66,6 +66,7 @@ static inline int32_t npk_write(const char *value, void *addr, size_t sz) void npk_log_setup(struct hv_npk_log_param *param) { uint16_t i; + uint16_t pcpu_nums; pr_info("HV_NPK_LOG: cmd %d param 0x%llx\n", param->cmd, param->mmio_addr); @@ -90,7 +91,8 @@ void npk_log_setup(struct hv_npk_log_param *param) } if ((base != 0UL) && (param->cmd == HV_NPK_LOG_CMD_ENABLE)) { if (!npk_log_enabled) { - for (i = 0U; i < phys_cpu_num; i++) { + pcpu_nums = get_pcpu_nums(); + for (i = 0U; i < pcpu_nums; i++) { per_cpu(npk_log_ref, i) = 0U; } } diff --git a/hypervisor/debug/profiling.c b/hypervisor/debug/profiling.c index 306f07a09..38730fc6e 100644 --- a/hypervisor/debug/profiling.c +++ b/hypervisor/debug/profiling.c @@ -718,6 +718,7 @@ reconfig: static void profiling_start_pmu(void) { uint16_t i; + uint16_t pcpu_nums = get_pcpu_nums(); dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__); @@ -725,7 +726,7 @@ static void profiling_start_pmu(void) return; } - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { if (per_cpu(profiling_info.sep_state, i).pmu_state != PMU_SETUP) { pr_err("%s: invalid pmu_state %u on cpu%d", __func__, get_cpu_var(profiling_info.sep_state).pmu_state, i); @@ -733,7 +734,7 @@ static void profiling_start_pmu(void) } } - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_START; per_cpu(profiling_info.sep_state, i).samples_logged = 0U; per_cpu(profiling_info.sep_state, i).samples_dropped = 0U; @@ -759,11 +760,12 @@ static void profiling_start_pmu(void) static void profiling_stop_pmu(void) { uint16_t i; + uint16_t pcpu_nums = get_pcpu_nums(); dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__); if (in_pmu_profiling) { - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_STOP; if (per_cpu(profiling_info.sep_state, i).pmu_state == PMU_RUNNING) { per_cpu(profiling_info.sep_state, i).pmu_state = PMU_SETUP; @@ -812,7 +814,8 @@ static void profiling_stop_pmu(void) int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr) { uint16_t i; - struct profiling_msr_ops_list msr_list[phys_cpu_num]; + uint16_t pcpu_nums = get_pcpu_nums(); + struct profiling_msr_ops_list msr_list[pcpu_nums]; (void)memset((void *)&msr_list, 0U, sizeof(msr_list)); @@ -823,7 +826,7 @@ int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr) return -EINVAL; } - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { per_cpu(profiling_info.ipi_cmd, i) = IPI_MSR_OP; per_cpu(profiling_info.msr_node, i) = &(msr_list[i]); } @@ -849,6 +852,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr) int32_t vm_idx; uint16_t i, j; struct profiling_vm_info_list vm_info_list; + uint16_t pcpu_nums = get_pcpu_nums(); (void)memset((void *)&vm_info_list, 0U, sizeof(vm_info_list)); @@ -862,7 +866,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr) vm_idx = 0; vm_info_list.vm_list[vm_idx].vm_id_num = -1; (void)memcpy_s((void *)vm_info_list.vm_list[vm_idx].vm_name, 4U, "VMM\0", 4U); - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id = (int32_t)i; vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id = (int32_t)i; vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id @@ -985,7 +989,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr) uint64_t old_switch; uint64_t new_switch; uint16_t i; - + uint16_t pcpu_nums = get_pcpu_nums(); struct profiling_control prof_control; (void)memset((void *)&prof_control, 0U, sizeof(prof_control)); @@ -1062,7 +1066,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr) } } } - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums ; i++) { per_cpu(profiling_info.soc_state, i) = SW_RUNNING; } @@ -1070,7 +1074,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr) dev_dbg(ACRN_DBG_PROFILING, "%s: socwatch stop collection invoked or collection switch not set!", __func__); - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums ; i++) { per_cpu(profiling_info.soc_state, i) = SW_STOPPED; } @@ -1099,6 +1103,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr) { uint16_t i; struct profiling_pmi_config pmi_config; + uint16_t pcpu_nums = get_pcpu_nums(); (void)memset((void *)&pmi_config, 0U, sizeof(pmi_config)); @@ -1109,7 +1114,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr) return -EINVAL; } - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { if (!((per_cpu(profiling_info.sep_state, i).pmu_state == PMU_INITIALIZED) || (per_cpu(profiling_info.sep_state, i).pmu_state == @@ -1127,7 +1132,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr) return -EINVAL; } - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_CONFIG; per_cpu(profiling_info.sep_state, i).num_pmi_groups = pmi_config.num_groups; @@ -1177,6 +1182,7 @@ int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr) uint16_t i; int32_t ret = 0; struct profiling_vmsw_config vmsw_config; + uint16_t pcpu_nums = get_pcpu_nums(); (void)memset((void *)&vmsw_config, 0U, sizeof(vmsw_config)); @@ -1189,7 +1195,7 @@ int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr) switch (vmsw_config.collector_id) { case COLLECT_PROFILE_DATA: - for (i = 0U; i < phys_cpu_num; i++) { + for (i = 0U; i < pcpu_nums; i++) { per_cpu(profiling_info.ipi_cmd, i) = IPI_VMSW_CONFIG; (void)memcpy_s( diff --git a/hypervisor/debug/sbuf.c b/hypervisor/debug/sbuf.c index 72527d345..9a5053959 100644 --- a/hypervisor/debug/sbuf.c +++ b/hypervisor/debug/sbuf.c @@ -106,8 +106,7 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data) int32_t sbuf_share_setup(uint16_t pcpu_id, uint32_t sbuf_id, uint64_t *hva) { - if ((pcpu_id >= phys_cpu_num) || - (sbuf_id >= ACRN_SBUF_ID_MAX)) { + if ((pcpu_id >= get_pcpu_nums()) || (sbuf_id >= ACRN_SBUF_ID_MAX)) { return -EINVAL; } diff --git a/hypervisor/debug/shell.c b/hypervisor/debug/shell.c index fb081e29e..5faf74261 100644 --- a/hypervisor/debug/shell.c +++ b/hypervisor/debug/shell.c @@ -849,6 +849,7 @@ static void get_cpu_interrupt_info(char *str_arg, size_t str_max) uint16_t pcpu_id; uint32_t irq, vector; size_t len, size = str_max; + uint16_t pcpu_nums = get_pcpu_nums(); len = snprintf(str, size, "\r\nIRQ\tVECTOR"); if (len >= size) { @@ -857,7 +858,7 @@ static void get_cpu_interrupt_info(char *str_arg, size_t str_max) size -= len; str += len; - for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) { + for (pcpu_id = 0U; pcpu_id < pcpu_nums; pcpu_id++) { len = snprintf(str, size, "\tCPU%d", pcpu_id); if (len >= size) { goto overflow; @@ -878,7 +879,7 @@ static void get_cpu_interrupt_info(char *str_arg, size_t str_max) size -= len; str += len; - for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) { + for (pcpu_id = 0U; pcpu_id < pcpu_nums; pcpu_id++) { len = snprintf(str, size, "\t%d", per_cpu(irq_count, pcpu_id)[irq]); if (len >= size) { goto overflow; diff --git a/hypervisor/include/arch/x86/cpu.h b/hypervisor/include/arch/x86/cpu.h index 662d65928..6b05a8520 100644 --- a/hypervisor/include/arch/x86/cpu.h +++ b/hypervisor/include/arch/x86/cpu.h @@ -462,6 +462,8 @@ static inline void clac(void) asm volatile ("clac" : : : "memory"); } +uint16_t get_pcpu_nums(void); +bool is_pcpu_active(uint16_t pcpu_id); #else /* ASSEMBLER defined */ #endif /* ASSEMBLER defined */ diff --git a/hypervisor/include/arch/x86/guest/vlapic.h b/hypervisor/include/arch/x86/guest/vlapic.h index 3a96ef07f..621940e2c 100644 --- a/hypervisor/include/arch/x86/guest/vlapic.h +++ b/hypervisor/include/arch/x86/guest/vlapic.h @@ -270,6 +270,10 @@ int32_t vlapic_create(struct acrn_vcpu *vcpu); * @pre vcpu != NULL */ void vlapic_free(struct acrn_vcpu *vcpu); +/** + * @pre vlapic->vm != NULL + * @pre vlapic->vcpu->vcpu_id < CONFIG_MAX_VCPUS_PER_VM + */ void vlapic_init(struct acrn_vlapic *vlapic); void vlapic_reset(struct acrn_vlapic *vlapic); void vlapic_restore(struct acrn_vlapic *vlapic, const struct lapic_regs *regs); diff --git a/hypervisor/include/arch/x86/irq.h b/hypervisor/include/arch/x86/irq.h index e47bae980..710fd9412 100644 --- a/hypervisor/include/arch/x86/irq.h +++ b/hypervisor/include/arch/x86/irq.h @@ -50,7 +50,7 @@ #define DEFAULT_DEST_MODE IOAPIC_RTE_DESTLOG #define DEFAULT_DELIVERY_MODE IOAPIC_RTE_DELLOPRI -#define ALL_CPUS_MASK ((1UL << (uint64_t)phys_cpu_num) - 1UL) +#define ALL_CPUS_MASK ((1UL << (uint64_t)get_pcpu_nums()) - 1UL) #define IRQ_ALLOC_BITMAP_SIZE INT_DIV_ROUNDUP(NR_IRQS, 64U) diff --git a/hypervisor/include/arch/x86/per_cpu.h b/hypervisor/include/arch/x86/per_cpu.h index de0b0e4c1..21fbc752b 100644 --- a/hypervisor/include/arch/x86/per_cpu.h +++ b/hypervisor/include/arch/x86/per_cpu.h @@ -56,7 +56,6 @@ struct per_cpu_region { } __aligned(PAGE_SIZE); /* per_cpu_region size aligned with PAGE_SIZE */ extern struct per_cpu_region per_cpu_data[]; -extern uint16_t phys_cpu_num; extern uint64_t pcpu_active_bitmap; /* * get percpu data for pcpu_id.