mirror of
https://github.com/projectacrn/acrn-hypervisor.git
synced 2025-06-26 07:21:37 +00:00
Rename phy_cpu_num as phys_cpu_num
phys_cpu_num is more popular than phy_cpu_num, update them through command. Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
3892bd0455
commit
c585172492
@ -23,7 +23,7 @@ spinlock_t up_count_spinlock = {
|
||||
};
|
||||
|
||||
struct per_cpu_region *per_cpu_data_base_ptr;
|
||||
uint16_t phy_cpu_num = 0U;
|
||||
uint16_t phys_cpu_num = 0U;
|
||||
unsigned long pcpu_sync = 0;
|
||||
volatile uint32_t up_count = 0;
|
||||
|
||||
@ -226,7 +226,7 @@ static int hardware_detect_support(void)
|
||||
|
||||
static void alloc_phy_cpu_data(uint16_t pcpu_num)
|
||||
{
|
||||
phy_cpu_num = pcpu_num;
|
||||
phys_cpu_num = pcpu_num;
|
||||
|
||||
per_cpu_data_base_ptr = calloc(pcpu_num, sizeof(struct per_cpu_region));
|
||||
ASSERT(per_cpu_data_base_ptr != NULL, "");
|
||||
@ -606,7 +606,7 @@ int cpu_find_logical_id(uint32_t lapic_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < phy_cpu_num; i++) {
|
||||
for (i = 0; i < phys_cpu_num; i++) {
|
||||
if (per_cpu(lapic_id, i) == lapic_id)
|
||||
return i;
|
||||
}
|
||||
@ -690,7 +690,7 @@ void start_cpus()
|
||||
/* Set flag showing number of CPUs expected to be up to all
|
||||
* cpus
|
||||
*/
|
||||
expected_up = phy_cpu_num;
|
||||
expected_up = phys_cpu_num;
|
||||
|
||||
/* Broadcast IPIs to all other CPUs */
|
||||
send_startup_ipi(INTR_CPU_STARTUP_ALL_EX_SELF,
|
||||
@ -725,7 +725,7 @@ void stop_cpus()
|
||||
uint32_t timeout, expected_up;
|
||||
|
||||
timeout = CONFIG_CPU_UP_TIMEOUT * 1000;
|
||||
for (i = 0; i < phy_cpu_num; i++) {
|
||||
for (i = 0; i < phys_cpu_num; i++) {
|
||||
if (get_cpu_id() == i) /* avoid offline itself */
|
||||
continue;
|
||||
|
||||
|
@ -137,7 +137,7 @@ static uint16_t vm_apicid2vcpu_id(struct vm *vm, uint8_t lapicid)
|
||||
|
||||
pr_err("%s: bad lapicid %d", __func__, lapicid);
|
||||
|
||||
return phy_cpu_num;
|
||||
return phys_cpu_num;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
@ -895,7 +895,7 @@ vlapic_calcdest(struct vm *vm, uint64_t *dmask, uint32_t dest,
|
||||
*/
|
||||
*dmask = 0;
|
||||
vcpu_id = vm_apicid2vcpu_id(vm, dest);
|
||||
if (vcpu_id < phy_cpu_num)
|
||||
if (vcpu_id < phys_cpu_num)
|
||||
bitmap_set(vcpu_id, dmask);
|
||||
} else {
|
||||
/*
|
||||
@ -1537,7 +1537,7 @@ vlapic_init(struct vlapic *vlapic)
|
||||
{
|
||||
ASSERT(vlapic->vm != NULL, "%s: vm is not initialized", __func__);
|
||||
ASSERT(vlapic->vcpu->vcpu_id >= 0 &&
|
||||
vlapic->vcpu->vcpu_id < phy_cpu_num,
|
||||
vlapic->vcpu->vcpu_id < phys_cpu_num,
|
||||
"%s: vcpu_id is not initialized", __func__);
|
||||
ASSERT(vlapic->apic_page != NULL,
|
||||
"%s: apic_page is not initialized", __func__);
|
||||
@ -1765,7 +1765,7 @@ vlapic_set_local_intr(struct vm *vm, int vcpu_id, uint32_t vector)
|
||||
uint64_t dmask = 0;
|
||||
int error;
|
||||
|
||||
if (vcpu_id < -1 || vcpu_id >= phy_cpu_num)
|
||||
if (vcpu_id < -1 || vcpu_id >= phys_cpu_num)
|
||||
return -EINVAL;
|
||||
|
||||
if (vcpu_id == -1)
|
||||
|
@ -31,7 +31,7 @@ static void init_vm(struct vm_description *vm_desc,
|
||||
/* Populate VM attributes from VM description */
|
||||
if (is_vm0(vm_handle)) {
|
||||
/* Allocate all cpus to vm0 at the beginning */
|
||||
vm_handle->hw.num_vcpus = phy_cpu_num;
|
||||
vm_handle->hw.num_vcpus = phys_cpu_num;
|
||||
vm_handle->hw.exp_num_vcpus = vm_desc->vm_hw_num_cores;
|
||||
} else {
|
||||
vm_handle->hw.num_vcpus = vm_desc->vm_hw_num_cores;
|
||||
@ -88,7 +88,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
|
||||
INIT_LIST_HEAD(&vm->mmio_list);
|
||||
|
||||
if (vm->hw.num_vcpus == 0)
|
||||
vm->hw.num_vcpus = phy_cpu_num;
|
||||
vm->hw.num_vcpus = phys_cpu_num;
|
||||
|
||||
vm->hw.vcpu_array =
|
||||
calloc(1, sizeof(struct vcpu *) * vm->hw.num_vcpus);
|
||||
@ -299,7 +299,7 @@ int prepare_vm0(void)
|
||||
return ret;
|
||||
|
||||
/* Allocate all cpus to vm0 at the beginning */
|
||||
for (i = 0; i < phy_cpu_num; i++)
|
||||
for (i = 0; i < phys_cpu_num; i++)
|
||||
prepare_vcpu(vm, i);
|
||||
|
||||
/* start vm0 BSP automatically */
|
||||
|
@ -177,7 +177,7 @@ static void _irq_desc_free_vector(uint32_t irq)
|
||||
if (vector_to_irq[vr] == irq)
|
||||
vector_to_irq[vr] = IRQ_INVALID;
|
||||
|
||||
for (pcpu_id = 0; pcpu_id < phy_cpu_num; pcpu_id++)
|
||||
for (pcpu_id = 0; pcpu_id < phys_cpu_num; pcpu_id++)
|
||||
per_cpu(irq_count, pcpu_id)[irq] = 0;
|
||||
}
|
||||
|
||||
@ -699,7 +699,7 @@ void get_cpu_interrupt_info(char *str, int str_max)
|
||||
len = snprintf(str, size, "\r\nIRQ\tVECTOR");
|
||||
size -= len;
|
||||
str += len;
|
||||
for (pcpu_id = 0; pcpu_id < phy_cpu_num; pcpu_id++) {
|
||||
for (pcpu_id = 0; pcpu_id < phys_cpu_num; pcpu_id++) {
|
||||
len = snprintf(str, size, "\tCPU%d", pcpu_id);
|
||||
size -= len;
|
||||
str += len;
|
||||
@ -716,7 +716,7 @@ void get_cpu_interrupt_info(char *str, int str_max)
|
||||
len = snprintf(str, size, "\r\n%d\t0x%X", irq, vector);
|
||||
size -= len;
|
||||
str += len;
|
||||
for (pcpu_id = 0; pcpu_id < phy_cpu_num; pcpu_id++) {
|
||||
for (pcpu_id = 0; pcpu_id < phys_cpu_num; pcpu_id++) {
|
||||
len = snprintf(str, size, "\t%d",
|
||||
per_cpu(irq_count, pcpu_id)[irq]);
|
||||
size -= len;
|
||||
|
@ -413,9 +413,9 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
||||
* TODO: add shootdown APs operation if MMU will be
|
||||
* modified after AP start in the future.
|
||||
*/
|
||||
if ((phy_cpu_num != 0) &&
|
||||
if ((phys_cpu_num != 0) &&
|
||||
((pcpu_active_bitmap &
|
||||
((1UL << phy_cpu_num) - 1))
|
||||
((1UL << phys_cpu_num) - 1))
|
||||
!= (1UL << CPU_BOOT_ID))) {
|
||||
panic("need shootdown for invlpg");
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ void init_softirq(void)
|
||||
{
|
||||
uint16_t cpu_id;
|
||||
|
||||
for (cpu_id = 0; cpu_id < phy_cpu_num; cpu_id++) {
|
||||
for (cpu_id = 0; cpu_id < phys_cpu_num; cpu_id++) {
|
||||
per_cpu(softirq_pending, cpu_id) = 0;
|
||||
bitmap_set(SOFTIRQ_ATOMIC, &per_cpu(softirq_pending, cpu_id));
|
||||
}
|
||||
@ -31,7 +31,7 @@ void raise_softirq(int softirq_id)
|
||||
uint16_t cpu_id = get_cpu_id();
|
||||
uint64_t *bitmap = &per_cpu(softirq_pending, cpu_id);
|
||||
|
||||
if (cpu_id >= phy_cpu_num)
|
||||
if (cpu_id >= phys_cpu_num)
|
||||
return;
|
||||
|
||||
bitmap_set(softirq_id, bitmap);
|
||||
@ -44,7 +44,7 @@ void exec_softirq(void)
|
||||
|
||||
int softirq_id;
|
||||
|
||||
if (cpu_id >= phy_cpu_num)
|
||||
if (cpu_id >= phys_cpu_num)
|
||||
return;
|
||||
|
||||
if (((*bitmap) & SOFTIRQ_MASK) == 0UL)
|
||||
|
@ -105,7 +105,7 @@ static int request_timer_irq(uint16_t pcpu_id,
|
||||
{
|
||||
struct dev_handler_node *node = NULL;
|
||||
|
||||
if (pcpu_id >= phy_cpu_num)
|
||||
if (pcpu_id >= phys_cpu_num)
|
||||
return -EINVAL;
|
||||
|
||||
if (per_cpu(timer_node, pcpu_id) != NULL) {
|
||||
|
@ -111,9 +111,9 @@ int hv_main(uint16_t cpu_id)
|
||||
pr_info("%s, Starting common entry point for CPU %d",
|
||||
__func__, cpu_id);
|
||||
|
||||
if (cpu_id >= phy_cpu_num) {
|
||||
if (cpu_id >= phys_cpu_num) {
|
||||
pr_err("%s, cpu_id %d out of range %d\n",
|
||||
__func__, cpu_id, phy_cpu_num);
|
||||
__func__, cpu_id, phys_cpu_num);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -154,7 +154,7 @@ void get_vmexit_profile(char *str, int str_max)
|
||||
size -= len;
|
||||
str += len;
|
||||
|
||||
for (cpu = 0; cpu < phy_cpu_num; cpu++) {
|
||||
for (cpu = 0; cpu < phys_cpu_num; cpu++) {
|
||||
len = snprintf(str, size, "\t CPU%d\t US", cpu);
|
||||
size -= len;
|
||||
str += len;
|
||||
@ -164,7 +164,7 @@ void get_vmexit_profile(char *str, int str_max)
|
||||
len = snprintf(str, size, "\r\n0x%x", i);
|
||||
size -= len;
|
||||
str += len;
|
||||
for (cpu = 0; cpu < phy_cpu_num; cpu++) {
|
||||
for (cpu = 0; cpu < phys_cpu_num; cpu++) {
|
||||
len = snprintf(str, size, "\t%10lld\t%10lld",
|
||||
per_cpu(vmexit_cnt, cpu)[i],
|
||||
TICKS_TO_US(per_cpu(vmexit_time, cpu)[i]));
|
||||
|
@ -13,7 +13,7 @@ void init_scheduler(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < phy_cpu_num; i++) {
|
||||
for (i = 0; i < phys_cpu_num; i++) {
|
||||
spinlock_init(&per_cpu(sched_ctx, i).runqueue_lock);
|
||||
spinlock_init(&per_cpu(sched_ctx, i).scheduler_lock);
|
||||
INIT_LIST_HEAD(&per_cpu(sched_ctx, i).runqueue);
|
||||
@ -36,7 +36,7 @@ int allocate_pcpu(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < phy_cpu_num; i++) {
|
||||
for (i = 0; i < phys_cpu_num; i++) {
|
||||
if (bitmap_test_and_set(i, &pcpu_used_bitmap) == 0)
|
||||
return i;
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ static struct logmsg logmsg;
|
||||
static inline void alloc_earlylog_sbuf(uint32_t cpu_id)
|
||||
{
|
||||
uint32_t ele_size = LOG_ENTRY_SIZE;
|
||||
uint32_t ele_num = ((HVLOG_BUF_SIZE >> 1) / phy_cpu_num
|
||||
uint32_t ele_num = ((HVLOG_BUF_SIZE >> 1) / phys_cpu_num
|
||||
- SBUF_HEAD_SIZE) / ele_size;
|
||||
|
||||
per_cpu(earlylog_sbuf, cpu_id) = sbuf_allocate(ele_num, ele_size);
|
||||
@ -75,7 +75,7 @@ void init_logmsg(__unused uint32_t mem_size, uint32_t flags)
|
||||
logmsg.seq = 0;
|
||||
|
||||
/* allocate sbuf for log before sos booting */
|
||||
for (idx = 0; idx < phy_cpu_num; idx++)
|
||||
for (idx = 0; idx < phys_cpu_num; idx++)
|
||||
alloc_earlylog_sbuf(idx);
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ void print_logmsg_buffer(uint32_t cpu_id)
|
||||
struct shared_buf **sbuf;
|
||||
int is_earlylog = 0;
|
||||
|
||||
if (cpu_id >= (uint32_t)phy_cpu_num)
|
||||
if (cpu_id >= (uint32_t)phys_cpu_num)
|
||||
return;
|
||||
|
||||
if (per_cpu(earlylog_sbuf, cpu_id) != NULL) {
|
||||
|
@ -155,7 +155,7 @@ int sbuf_put(struct shared_buf *sbuf, uint8_t *data)
|
||||
|
||||
int sbuf_share_setup(uint16_t pcpu_id, uint32_t sbuf_id, uint64_t *hva)
|
||||
{
|
||||
if (pcpu_id >= phy_cpu_num ||
|
||||
if (pcpu_id >= phys_cpu_num ||
|
||||
sbuf_id >= ACRN_SBUF_ID_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
#define DEFAULT_DEST_MODE IOAPIC_RTE_DESTLOG
|
||||
#define DEFAULT_DELIVERY_MODE IOAPIC_RTE_DELLOPRI
|
||||
#define ALL_CPUS_MASK ((1U << phy_cpu_num) - 1U)
|
||||
#define ALL_CPUS_MASK ((1U << phys_cpu_num) - 1U)
|
||||
|
||||
struct irq_desc;
|
||||
|
||||
|
@ -41,7 +41,7 @@ struct per_cpu_region {
|
||||
} __aligned(CPU_PAGE_SIZE); //per_cpu_region size aligned with CPU_PAGE_SIZE
|
||||
|
||||
extern struct per_cpu_region *per_cpu_data_base_ptr;
|
||||
extern uint16_t phy_cpu_num;
|
||||
extern uint16_t phys_cpu_num;
|
||||
extern uint64_t pcpu_active_bitmap;
|
||||
/*
|
||||
* get percpu data for pcpu_id.
|
||||
|
@ -75,7 +75,7 @@ struct trace_entry {
|
||||
static inline bool
|
||||
trace_check(uint16_t cpu_id, __unused int evid)
|
||||
{
|
||||
if (cpu_id >= phy_cpu_num)
|
||||
if (cpu_id >= phys_cpu_num)
|
||||
return false;
|
||||
|
||||
if (per_cpu(sbuf, cpu_id)[ACRN_TRACE] == NULL)
|
||||
|
Loading…
Reference in New Issue
Block a user