hv:Change phys_cpu_num to static

-- change phys_cpu_num to static
-- add get_pcpu_nums() and is_pcpu_active() APIs
-- replace phys_cpu_num with get_pcpu_nums() except cpu.c

Tracked-On: #1842
Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Mingqiang Chi 2018-12-26 13:54:05 +08:00 committed by wenlingz
parent 59e2de4805
commit 682824de6d
16 changed files with 57 additions and 35 deletions

View File

@ -14,7 +14,7 @@
#include <security.h>
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(PAGE_SIZE);
uint16_t phys_cpu_num = 0U;
static uint16_t phys_cpu_num = 0U;
static uint64_t pcpu_sync = 0UL;
static uint16_t up_count = 0U;
static uint64_t startup_paddr = 0UL;
@ -68,6 +68,15 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
per_cpu(boot_state, pcpu_id) = state;
}
uint16_t get_pcpu_nums(void)
{
return phys_cpu_num;
}
bool is_pcpu_active(uint16_t pcpu_id)
{
return bitmap_test(pcpu_id, &pcpu_active_bitmap);
}
void init_cpu_pre(uint16_t pcpu_id_args)
{
uint16_t pcpu_id = pcpu_id_args;
@ -219,7 +228,7 @@ static void start_cpu(uint16_t pcpu_id)
* configured time-out has expired
*/
timeout = (uint32_t)CONFIG_CPU_UP_TIMEOUT * 1000U;
while ((bitmap_test(pcpu_id, &pcpu_active_bitmap) == false) && (timeout != 0U)) {
while (!is_pcpu_active(pcpu_id) && (timeout != 0U)) {
/* Delay 10us */
udelay(10U);
@ -228,7 +237,7 @@ static void start_cpu(uint16_t pcpu_id)
}
/* Check to see if expected CPU is actually up */
if (bitmap_test(pcpu_id, &pcpu_active_bitmap) == false) {
if (!is_pcpu_active(pcpu_id)) {
/* Print error */
pr_fatal("Secondary CPUs failed to come up");

View File

@ -405,8 +405,8 @@ int32_t detect_hardware_support(void)
} else if (is_vmx_disabled()) {
pr_fatal("%s, VMX can not be enabled\n", __func__);
ret = -ENODEV;
} else if (phys_cpu_num > CONFIG_MAX_PCPU_NUM) {
pr_fatal("%s, pcpu number(%d) is out of range\n", __func__, phys_cpu_num);
} else if (get_pcpu_nums() > CONFIG_MAX_PCPU_NUM) {
pr_fatal("%s, pcpu number(%d) is out of range\n", __func__, get_pcpu_nums());
ret = -ENODEV;
} else {
ret = check_vmx_mmu_cap();

View File

@ -125,7 +125,7 @@ static uint16_t vm_apicid2vcpu_id(struct acrn_vm *vm, uint8_t lapicid)
pr_err("%s: bad lapicid %hhu", __func__, lapicid);
return phys_cpu_num;
return get_pcpu_nums();
}
static uint64_t
@ -1713,13 +1713,11 @@ vlapic_reset(struct acrn_vlapic *vlapic)
/**
* @pre vlapic->vm != NULL
* @pre vlapic->vcpu->vcpu_id < CONFIG_MAX_VCPUS_PER_VM
*/
void
vlapic_init(struct acrn_vlapic *vlapic)
{
ASSERT(vlapic->vcpu->vcpu_id < phys_cpu_num,
"%s: vcpu_id is not initialized", __func__);
vlapic_init_timer(vlapic);
vlapic_reset(vlapic);

View File

@ -398,7 +398,7 @@ static int32_t prepare_vm0(void)
struct vm_description vm0_desc;
(void)memset((void *)&vm0_desc, 0U, sizeof(vm0_desc));
vm0_desc.vm_hw_num_cores = phys_cpu_num;
vm0_desc.vm_hw_num_cores = get_pcpu_nums();
err = create_vm(&vm0_desc, &vm);

View File

@ -256,7 +256,7 @@ void send_dest_ipi_mask(uint32_t dest_mask, uint32_t vector)
while (pcpu_id != INVALID_BIT_INDEX) {
bitmap32_clear_nolock(pcpu_id, &mask);
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
if (is_pcpu_active(pcpu_id)) {
icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id);
msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
} else {
@ -270,7 +270,7 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t vector)
{
union apic_icr icr;
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
if (is_pcpu_active(pcpu_id)) {
/* Set the destination field to the target processor. */
icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id);

View File

@ -39,7 +39,7 @@ void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
pcpu_id = ffs64(mask);
while (pcpu_id != INVALID_BIT_INDEX) {
bitmap_clear_nolock(pcpu_id, &mask);
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
if (is_pcpu_active(pcpu_id)) {
smp_call = &per_cpu(smp_call_info, pcpu_id);
smp_call->func = func;
smp_call->data = data;

View File

@ -14,8 +14,9 @@ void init_scheduler(void)
{
struct sched_context *ctx;
uint32_t i;
uint16_t pcpu_nums = get_pcpu_nums();
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
ctx = &per_cpu(sched_ctx, i);
spinlock_init(&ctx->runqueue_lock);
@ -42,8 +43,9 @@ uint16_t allocate_pcpu(void)
{
uint16_t i;
uint16_t ret = INVALID_CPU_ID;
uint16_t pcpu_nums = get_pcpu_nums();
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
if (bitmap_test_and_set_lock(i, &pcpu_used_bitmap) == 0) {
ret = i;
break;

View File

@ -141,7 +141,7 @@ static int32_t hcall_get_hw_info(struct acrn_vm *vm, uint64_t param)
(void)memset((void *)&hw_info, 0U, sizeof(hw_info));
hw_info.cpu_num = phys_cpu_num;
hw_info.cpu_num = get_pcpu_nums();
ret = copy_to_gpa(vm, &hw_info, param, sizeof(hw_info));
if (ret != 0) {
pr_err("%s: Unable to copy param to vm", __func__);

View File

@ -66,6 +66,7 @@ static inline int32_t npk_write(const char *value, void *addr, size_t sz)
void npk_log_setup(struct hv_npk_log_param *param)
{
uint16_t i;
uint16_t pcpu_nums;
pr_info("HV_NPK_LOG: cmd %d param 0x%llx\n", param->cmd,
param->mmio_addr);
@ -90,7 +91,8 @@ void npk_log_setup(struct hv_npk_log_param *param)
}
if ((base != 0UL) && (param->cmd == HV_NPK_LOG_CMD_ENABLE)) {
if (!npk_log_enabled) {
for (i = 0U; i < phys_cpu_num; i++) {
pcpu_nums = get_pcpu_nums();
for (i = 0U; i < pcpu_nums; i++) {
per_cpu(npk_log_ref, i) = 0U;
}
}

View File

@ -718,6 +718,7 @@ reconfig:
static void profiling_start_pmu(void)
{
uint16_t i;
uint16_t pcpu_nums = get_pcpu_nums();
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
@ -725,7 +726,7 @@ static void profiling_start_pmu(void)
return;
}
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
if (per_cpu(profiling_info.sep_state, i).pmu_state != PMU_SETUP) {
pr_err("%s: invalid pmu_state %u on cpu%d",
__func__, get_cpu_var(profiling_info.sep_state).pmu_state, i);
@ -733,7 +734,7 @@ static void profiling_start_pmu(void)
}
}
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_START;
per_cpu(profiling_info.sep_state, i).samples_logged = 0U;
per_cpu(profiling_info.sep_state, i).samples_dropped = 0U;
@ -759,11 +760,12 @@ static void profiling_start_pmu(void)
static void profiling_stop_pmu(void)
{
uint16_t i;
uint16_t pcpu_nums = get_pcpu_nums();
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
if (in_pmu_profiling) {
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_STOP;
if (per_cpu(profiling_info.sep_state, i).pmu_state == PMU_RUNNING) {
per_cpu(profiling_info.sep_state, i).pmu_state = PMU_SETUP;
@ -812,7 +814,8 @@ static void profiling_stop_pmu(void)
int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr)
{
uint16_t i;
struct profiling_msr_ops_list msr_list[phys_cpu_num];
uint16_t pcpu_nums = get_pcpu_nums();
struct profiling_msr_ops_list msr_list[pcpu_nums];
(void)memset((void *)&msr_list, 0U, sizeof(msr_list));
@ -823,7 +826,7 @@ int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr)
return -EINVAL;
}
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
per_cpu(profiling_info.ipi_cmd, i) = IPI_MSR_OP;
per_cpu(profiling_info.msr_node, i) = &(msr_list[i]);
}
@ -849,6 +852,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
int32_t vm_idx;
uint16_t i, j;
struct profiling_vm_info_list vm_info_list;
uint16_t pcpu_nums = get_pcpu_nums();
(void)memset((void *)&vm_info_list, 0U, sizeof(vm_info_list));
@ -862,7 +866,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
vm_idx = 0;
vm_info_list.vm_list[vm_idx].vm_id_num = -1;
(void)memcpy_s((void *)vm_info_list.vm_list[vm_idx].vm_name, 4U, "VMM\0", 4U);
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id = (int32_t)i;
vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id = (int32_t)i;
vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id
@ -985,7 +989,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
uint64_t old_switch;
uint64_t new_switch;
uint16_t i;
uint16_t pcpu_nums = get_pcpu_nums();
struct profiling_control prof_control;
(void)memset((void *)&prof_control, 0U, sizeof(prof_control));
@ -1062,7 +1066,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
}
}
}
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums ; i++) {
per_cpu(profiling_info.soc_state, i)
= SW_RUNNING;
}
@ -1070,7 +1074,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr)
dev_dbg(ACRN_DBG_PROFILING,
"%s: socwatch stop collection invoked or collection switch not set!",
__func__);
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums ; i++) {
per_cpu(profiling_info.soc_state, i)
= SW_STOPPED;
}
@ -1099,6 +1103,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr)
{
uint16_t i;
struct profiling_pmi_config pmi_config;
uint16_t pcpu_nums = get_pcpu_nums();
(void)memset((void *)&pmi_config, 0U, sizeof(pmi_config));
@ -1109,7 +1114,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr)
return -EINVAL;
}
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
if (!((per_cpu(profiling_info.sep_state, i).pmu_state ==
PMU_INITIALIZED) ||
(per_cpu(profiling_info.sep_state, i).pmu_state ==
@ -1127,7 +1132,7 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr)
return -EINVAL;
}
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_CONFIG;
per_cpu(profiling_info.sep_state, i).num_pmi_groups
= pmi_config.num_groups;
@ -1177,6 +1182,7 @@ int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr)
uint16_t i;
int32_t ret = 0;
struct profiling_vmsw_config vmsw_config;
uint16_t pcpu_nums = get_pcpu_nums();
(void)memset((void *)&vmsw_config, 0U, sizeof(vmsw_config));
@ -1189,7 +1195,7 @@ int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr)
switch (vmsw_config.collector_id) {
case COLLECT_PROFILE_DATA:
for (i = 0U; i < phys_cpu_num; i++) {
for (i = 0U; i < pcpu_nums; i++) {
per_cpu(profiling_info.ipi_cmd, i) = IPI_VMSW_CONFIG;
(void)memcpy_s(

View File

@ -106,8 +106,7 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data)
int32_t sbuf_share_setup(uint16_t pcpu_id, uint32_t sbuf_id, uint64_t *hva)
{
if ((pcpu_id >= phys_cpu_num) ||
(sbuf_id >= ACRN_SBUF_ID_MAX)) {
if ((pcpu_id >= get_pcpu_nums()) || (sbuf_id >= ACRN_SBUF_ID_MAX)) {
return -EINVAL;
}

View File

@ -849,6 +849,7 @@ static void get_cpu_interrupt_info(char *str_arg, size_t str_max)
uint16_t pcpu_id;
uint32_t irq, vector;
size_t len, size = str_max;
uint16_t pcpu_nums = get_pcpu_nums();
len = snprintf(str, size, "\r\nIRQ\tVECTOR");
if (len >= size) {
@ -857,7 +858,7 @@ static void get_cpu_interrupt_info(char *str_arg, size_t str_max)
size -= len;
str += len;
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
for (pcpu_id = 0U; pcpu_id < pcpu_nums; pcpu_id++) {
len = snprintf(str, size, "\tCPU%d", pcpu_id);
if (len >= size) {
goto overflow;
@ -878,7 +879,7 @@ static void get_cpu_interrupt_info(char *str_arg, size_t str_max)
size -= len;
str += len;
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
for (pcpu_id = 0U; pcpu_id < pcpu_nums; pcpu_id++) {
len = snprintf(str, size, "\t%d", per_cpu(irq_count, pcpu_id)[irq]);
if (len >= size) {
goto overflow;

View File

@ -462,6 +462,8 @@ static inline void clac(void)
asm volatile ("clac" : : : "memory");
}
uint16_t get_pcpu_nums(void);
bool is_pcpu_active(uint16_t pcpu_id);
#else /* ASSEMBLER defined */
#endif /* ASSEMBLER defined */

View File

@ -270,6 +270,10 @@ int32_t vlapic_create(struct acrn_vcpu *vcpu);
* @pre vcpu != NULL
*/
void vlapic_free(struct acrn_vcpu *vcpu);
/**
* @pre vlapic->vm != NULL
* @pre vlapic->vcpu->vcpu_id < CONFIG_MAX_VCPUS_PER_VM
*/
void vlapic_init(struct acrn_vlapic *vlapic);
void vlapic_reset(struct acrn_vlapic *vlapic);
void vlapic_restore(struct acrn_vlapic *vlapic, const struct lapic_regs *regs);

View File

@ -50,7 +50,7 @@
#define DEFAULT_DEST_MODE IOAPIC_RTE_DESTLOG
#define DEFAULT_DELIVERY_MODE IOAPIC_RTE_DELLOPRI
#define ALL_CPUS_MASK ((1UL << (uint64_t)phys_cpu_num) - 1UL)
#define ALL_CPUS_MASK ((1UL << (uint64_t)get_pcpu_nums()) - 1UL)
#define IRQ_ALLOC_BITMAP_SIZE INT_DIV_ROUNDUP(NR_IRQS, 64U)

View File

@ -56,7 +56,6 @@ struct per_cpu_region {
} __aligned(PAGE_SIZE); /* per_cpu_region size aligned with PAGE_SIZE */
extern struct per_cpu_region per_cpu_data[];
extern uint16_t phys_cpu_num;
extern uint64_t pcpu_active_bitmap;
/*
* get percpu data for pcpu_id.