Rename phy_cpu_num as phys_cpu_num

phys_cpu_num is more popular than phy_cpu_num, update them
through command.

Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Xiangyang Wu
2018-06-22 13:07:27 +08:00
committed by lijinxia
parent 3892bd0455
commit c585172492
14 changed files with 34 additions and 34 deletions

View File

@@ -23,7 +23,7 @@ spinlock_t up_count_spinlock = {
};
struct per_cpu_region *per_cpu_data_base_ptr;
uint16_t phy_cpu_num = 0U;
uint16_t phys_cpu_num = 0U;
unsigned long pcpu_sync = 0;
volatile uint32_t up_count = 0;
@@ -226,7 +226,7 @@ static int hardware_detect_support(void)
static void alloc_phy_cpu_data(uint16_t pcpu_num)
{
phy_cpu_num = pcpu_num;
phys_cpu_num = pcpu_num;
per_cpu_data_base_ptr = calloc(pcpu_num, sizeof(struct per_cpu_region));
ASSERT(per_cpu_data_base_ptr != NULL, "");
@@ -606,7 +606,7 @@ int cpu_find_logical_id(uint32_t lapic_id)
{
int i;
for (i = 0; i < phy_cpu_num; i++) {
for (i = 0; i < phys_cpu_num; i++) {
if (per_cpu(lapic_id, i) == lapic_id)
return i;
}
@@ -690,7 +690,7 @@ void start_cpus()
/* Set flag showing number of CPUs expected to be up to all
* cpus
*/
expected_up = phy_cpu_num;
expected_up = phys_cpu_num;
/* Broadcast IPIs to all other CPUs */
send_startup_ipi(INTR_CPU_STARTUP_ALL_EX_SELF,
@@ -725,7 +725,7 @@ void stop_cpus()
uint32_t timeout, expected_up;
timeout = CONFIG_CPU_UP_TIMEOUT * 1000;
for (i = 0; i < phy_cpu_num; i++) {
for (i = 0; i < phys_cpu_num; i++) {
if (get_cpu_id() == i) /* avoid offline itself */
continue;

View File

@@ -137,7 +137,7 @@ static uint16_t vm_apicid2vcpu_id(struct vm *vm, uint8_t lapicid)
pr_err("%s: bad lapicid %d", __func__, lapicid);
return phy_cpu_num;
return phys_cpu_num;
}
static uint64_t
@@ -895,7 +895,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
*/
*dmask = 0;
vcpu_id = vm_apicid2vcpu_id(vm, dest);
if (vcpu_id < phy_cpu_num)
if (vcpu_id < phys_cpu_num)
bitmap_set(vcpu_id, dmask);
} else {
/*
@@ -1537,7 +1537,7 @@ vlapic_init(struct vlapic *vlapic)
{
ASSERT(vlapic->vm != NULL, "%s: vm is not initialized", __func__);
ASSERT(vlapic->vcpu->vcpu_id >= 0 &&
vlapic->vcpu->vcpu_id < phy_cpu_num,
vlapic->vcpu->vcpu_id < phys_cpu_num,
"%s: vcpu_id is not initialized", __func__);
ASSERT(vlapic->apic_page != NULL,
"%s: apic_page is not initialized", __func__);
@@ -1765,7 +1765,7 @@ vlapic_set_local_intr(struct vm *vm, int vcpu_id, uint32_t vector)
uint64_t dmask = 0;
int error;
if (vcpu_id < -1 || vcpu_id >= phy_cpu_num)
if (vcpu_id < -1 || vcpu_id >= phys_cpu_num)
return -EINVAL;
if (vcpu_id == -1)

View File

@@ -31,7 +31,7 @@ static void init_vm(struct vm_description *vm_desc,
/* Populate VM attributes from VM description */
if (is_vm0(vm_handle)) {
/* Allocate all cpus to vm0 at the beginning */
vm_handle->hw.num_vcpus = phy_cpu_num;
vm_handle->hw.num_vcpus = phys_cpu_num;
vm_handle->hw.exp_num_vcpus = vm_desc->vm_hw_num_cores;
} else {
vm_handle->hw.num_vcpus = vm_desc->vm_hw_num_cores;
@@ -88,7 +88,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
INIT_LIST_HEAD(&vm->mmio_list);
if (vm->hw.num_vcpus == 0)
vm->hw.num_vcpus = phy_cpu_num;
vm->hw.num_vcpus = phys_cpu_num;
vm->hw.vcpu_array =
calloc(1, sizeof(struct vcpu *) * vm->hw.num_vcpus);
@@ -299,7 +299,7 @@ int prepare_vm0(void)
return ret;
/* Allocate all cpus to vm0 at the beginning */
for (i = 0; i < phy_cpu_num; i++)
for (i = 0; i < phys_cpu_num; i++)
prepare_vcpu(vm, i);
/* start vm0 BSP automatically */

View File

@@ -177,7 +177,7 @@ static void _irq_desc_free_vector(uint32_t irq)
if (vector_to_irq[vr] == irq)
vector_to_irq[vr] = IRQ_INVALID;
for (pcpu_id = 0; pcpu_id < phy_cpu_num; pcpu_id++)
for (pcpu_id = 0; pcpu_id < phys_cpu_num; pcpu_id++)
per_cpu(irq_count, pcpu_id)[irq] = 0;
}
@@ -699,7 +699,7 @@ void get_cpu_interrupt_info(char *str, int str_max)
len = snprintf(str, size, "\r\nIRQ\tVECTOR");
size -= len;
str += len;
for (pcpu_id = 0; pcpu_id < phy_cpu_num; pcpu_id++) {
for (pcpu_id = 0; pcpu_id < phys_cpu_num; pcpu_id++) {
len = snprintf(str, size, "\tCPU%d", pcpu_id);
size -= len;
str += len;
@@ -716,7 +716,7 @@ void get_cpu_interrupt_info(char *str, int str_max)
len = snprintf(str, size, "\r\n%d\t0x%X", irq, vector);
size -= len;
str += len;
for (pcpu_id = 0; pcpu_id < phy_cpu_num; pcpu_id++) {
for (pcpu_id = 0; pcpu_id < phys_cpu_num; pcpu_id++) {
len = snprintf(str, size, "\t%d",
per_cpu(irq_count, pcpu_id)[irq]);
size -= len;

View File

@@ -413,9 +413,9 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
* TODO: add shootdown APs operation if MMU will be
* modified after AP start in the future.
*/
if ((phy_cpu_num != 0) &&
if ((phys_cpu_num != 0) &&
((pcpu_active_bitmap &
((1UL << phy_cpu_num) - 1))
((1UL << phys_cpu_num) - 1))
!= (1UL << CPU_BOOT_ID))) {
panic("need shootdown for invlpg");
}

View File

@@ -20,7 +20,7 @@ void init_softirq(void)
{
uint16_t cpu_id;
for (cpu_id = 0; cpu_id < phy_cpu_num; cpu_id++) {
for (cpu_id = 0; cpu_id < phys_cpu_num; cpu_id++) {
per_cpu(softirq_pending, cpu_id) = 0;
bitmap_set(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
}
@@ -31,7 +31,7 @@ void raise_softirq(int softirq_id)
uint16_t cpu_id = get_cpu_id();
uint64_t *bitmap = &per_cpu(softirq_pending, cpu_id);
if (cpu_id >= phy_cpu_num)
if (cpu_id >= phys_cpu_num)
return;
bitmap_set(softirq_id, bitmap);
@@ -44,7 +44,7 @@ void exec_softirq(void)
int softirq_id;
if (cpu_id >= phy_cpu_num)
if (cpu_id >= phys_cpu_num)
return;
if (((*bitmap) & SOFTIRQ_MASK) == 0UL)

View File

@@ -105,7 +105,7 @@ static int request_timer_irq(uint16_t pcpu_id,
{
struct dev_handler_node *node = NULL;
if (pcpu_id >= phy_cpu_num)
if (pcpu_id >= phys_cpu_num)
return -EINVAL;
if (per_cpu(timer_node, pcpu_id) != NULL) {