HV: Add prefix 'p' before 'cpu' to physical cpu related functions

This patch adds prefix 'p' before 'cpu' to physical cpu related functions.
And there is no code logic change.

Tracked-On: #2991
Signed-off-by: Kaige Fu <kaige.fu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Kaige Fu 2019-04-21 21:52:12 +00:00 committed by wenlingz
parent 25741b62db
commit a85d11ca7a
38 changed files with 177 additions and 177 deletions

View File

@ -162,7 +162,7 @@ after:
mov %eax,%gs // Was 32bit POC CLS
/* continue with chipset level initialization */
call init_primary_cpu
call init_primary_pcpu
loop:
jmp loop

View File

@ -33,7 +33,7 @@
* the macros involved are changed.
*/
.extern init_secondary_cpu
.extern init_secondary_pcpu
.section .trampoline_reset,"ax"
@ -160,7 +160,7 @@ trampoline_start64:
.align 8
.global main_entry
main_entry:
.quad init_secondary_cpu /* default entry is AP start entry */
.quad init_secondary_pcpu /* default entry is AP start entry */
.global secondary_cpu_stack
secondary_cpu_stack:

View File

@ -22,7 +22,7 @@ int32_t init_cat_cap_info(void)
uint32_t eax = 0U, ebx = 0U, ecx = 0U, edx = 0U;
int32_t ret = 0;
if (cpu_has_cap(X86_FEATURE_CAT)) {
if (pcpu_has_cap(X86_FEATURE_CAT)) {
cpuid_subleaf(CPUID_RSD_ALLOCATION, 0, &eax, &ebx, &ecx, &edx);
/* If support L3 CAT, EBX[1] is set */
if ((ebx & 2U) != 0U) {

View File

@ -41,10 +41,10 @@ static uint64_t startup_paddr = 0UL;
/* physical cpu active bitmap, support up to 64 cpus */
static uint64_t pcpu_active_bitmap = 0UL;
static void cpu_xsave_init(void);
static void set_current_cpu_id(uint16_t pcpu_id);
static void pcpu_xsave_init(void);
static void set_current_pcpu_id(uint16_t pcpu_id);
static void print_hv_banner(void);
static uint16_t get_cpu_id_from_lapic_id(uint32_t lapic_id);
static uint16_t get_pcpu_id_from_lapic_id(uint32_t lapic_id);
static uint64_t start_tsc __attribute__((__section__(".bss_noinit")));
static void init_percpu_lapic_id(void)
@ -67,7 +67,7 @@ static void init_percpu_lapic_id(void)
}
}
static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
static void pcpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
{
/* Check if state is initializing */
if (state == PCPU_STATE_INITIALIZING) {
@ -75,7 +75,7 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
atomic_inc16(&up_count);
/* Save this CPU's logical ID to the TSC AUX MSR */
set_current_cpu_id(pcpu_id);
set_current_pcpu_id(pcpu_id);
}
/* If cpu is dead, decrement CPU up count */
@ -102,7 +102,7 @@ uint64_t get_active_pcpu_bitmap(void)
return pcpu_active_bitmap;
}
void init_cpu_pre(uint16_t pcpu_id_args)
void init_pcpu_pre(uint16_t pcpu_id_args)
{
uint16_t pcpu_id = pcpu_id_args;
int32_t ret;
@ -116,19 +116,19 @@ void init_cpu_pre(uint16_t pcpu_id_args)
/* Get CPU capabilities thru CPUID, including the physical address bit
* limit which is required for initializing paging.
*/
init_cpu_capabilities();
init_pcpu_capabilities();
init_firmware_operations();
init_cpu_model_name();
init_pcpu_model_name();
load_cpu_state_data();
load_pcpu_state_data();
/* Initialize the hypervisor paging */
init_e820();
init_paging();
if (!cpu_has_cap(X86_FEATURE_X2APIC)) {
if (!pcpu_has_cap(X86_FEATURE_X2APIC)) {
panic("x2APIC is not present!");
}
@ -154,7 +154,7 @@ void init_cpu_pre(uint16_t pcpu_id_args)
early_init_lapic();
pcpu_id = get_cpu_id_from_lapic_id(get_cur_lapic_id());
pcpu_id = get_pcpu_id_from_lapic_id(get_cur_lapic_id());
if (pcpu_id >= CONFIG_MAX_PCPU_NUM) {
panic("Invalid pCPU ID!");
}
@ -163,10 +163,10 @@ void init_cpu_pre(uint16_t pcpu_id_args)
bitmap_set_nolock(pcpu_id, &pcpu_active_bitmap);
/* Set state for this CPU to initializing */
cpu_set_current_state(pcpu_id, PCPU_STATE_INITIALIZING);
pcpu_set_current_state(pcpu_id, PCPU_STATE_INITIALIZING);
}
void init_cpu_post(uint16_t pcpu_id)
void init_pcpu_post(uint16_t pcpu_id)
{
#ifdef STACK_PROTECTOR
set_fs_base();
@ -177,7 +177,7 @@ void init_cpu_post(uint16_t pcpu_id)
enable_smap();
cpu_xsave_init();
pcpu_xsave_init();
if (pcpu_id == BOOT_CPU_ID) {
/* Print Hypervisor Banner */
@ -195,7 +195,7 @@ void init_cpu_post(uint16_t pcpu_id)
pr_acrnlog("API version %u.%u",
HV_API_MAJOR_VERSION, HV_API_MINOR_VERSION);
pr_acrnlog("Detect processor: %s", (get_cpu_info())->model_name);
pr_acrnlog("Detect processor: %s", (get_pcpu_info())->model_name);
pr_dbg("Core %hu is up", BOOT_CPU_ID);
@ -231,11 +231,11 @@ void init_cpu_post(uint16_t pcpu_id)
/* Start all secondary cores */
startup_paddr = prepare_trampoline();
if (!start_cpus(AP_MASK)) {
if (!start_pcpus(AP_MASK)) {
panic("Failed to start all secondary cores!");
}
ASSERT(get_cpu_id() == BOOT_CPU_ID, "");
ASSERT(get_pcpu_id() == BOOT_CPU_ID, "");
} else {
pr_dbg("Core %hu is up", pcpu_id);
@ -251,7 +251,7 @@ void init_cpu_post(uint16_t pcpu_id)
setup_clos(pcpu_id);
}
static uint16_t get_cpu_id_from_lapic_id(uint32_t lapic_id)
static uint16_t get_pcpu_id_from_lapic_id(uint32_t lapic_id)
{
uint16_t i;
uint16_t pcpu_id = INVALID_CPU_ID;
@ -266,7 +266,7 @@ static uint16_t get_cpu_id_from_lapic_id(uint32_t lapic_id)
return pcpu_id;
}
static void start_cpu(uint16_t pcpu_id)
static void start_pcpu(uint16_t pcpu_id)
{
uint32_t timeout;
@ -292,7 +292,7 @@ static void start_cpu(uint16_t pcpu_id)
/* Check to see if expected CPU is actually up */
if (!is_pcpu_active(pcpu_id)) {
pr_fatal("Secondary CPU%hu failed to come up", pcpu_id);
cpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
pcpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
}
}
@ -305,10 +305,10 @@ static void start_cpu(uint16_t pcpu_id)
* @return true if all cpus set in mask are started
* @return false if there are any cpus set in mask aren't started
*/
bool start_cpus(uint64_t mask)
bool start_pcpus(uint64_t mask)
{
uint16_t i;
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
uint64_t expected_start_mask = mask;
/* secondary cpu start up will wait for pcpu_sync -> 0UL */
@ -322,7 +322,7 @@ bool start_cpus(uint64_t mask)
continue; /* Avoid start itself */
}
start_cpu(i);
start_pcpu(i);
i = ffs64(expected_start_mask);
}
@ -343,13 +343,13 @@ void wait_pcpus_offline(uint64_t mask)
}
}
void stop_cpus(void)
void stop_pcpus(void)
{
uint16_t pcpu_id;
uint64_t mask = 0UL;
for (pcpu_id = 0U; pcpu_id < phys_cpu_num; pcpu_id++) {
if (get_cpu_id() == pcpu_id) { /* avoid offline itself */
if (get_pcpu_id() == pcpu_id) { /* avoid offline itself */
continue;
}
@ -379,7 +379,7 @@ void cpu_dead(void)
* us to modify the value using a JTAG probe and resume if needed.
*/
int32_t halt = 1;
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
/* clean up native stuff */
@ -387,7 +387,7 @@ void cpu_dead(void)
cache_flush_invalidate_all();
/* Set state to show CPU is dead */
cpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
pcpu_set_current_state(pcpu_id, PCPU_STATE_DEAD);
bitmap_clear_nolock(pcpu_id, &pcpu_active_bitmap);
/* Halt the CPU */
@ -399,7 +399,7 @@ void cpu_dead(void)
}
}
static void set_current_cpu_id(uint16_t pcpu_id)
static void set_current_pcpu_id(uint16_t pcpu_id)
{
/* Write TSC AUX register */
msr_write(MSR_IA32_TSC_AUX, (uint64_t) pcpu_id);
@ -442,23 +442,23 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync)
}
}
static void cpu_xsave_init(void)
static void pcpu_xsave_init(void)
{
uint64_t val64;
struct cpuinfo_x86 *cpu_info;
if (cpu_has_cap(X86_FEATURE_XSAVE)) {
if (pcpu_has_cap(X86_FEATURE_XSAVE)) {
CPU_CR_READ(cr4, &val64);
val64 |= CR4_OSXSAVE;
CPU_CR_WRITE(cr4, val64);
if (get_cpu_id() == BOOT_CPU_ID) {
if (get_pcpu_id() == BOOT_CPU_ID) {
uint32_t ecx, unused;
cpuid(CPUID_FEATURES, &unused, &unused, &ecx, &unused);
/* if set, update it */
if ((ecx & CPUID_ECX_OSXSAVE) != 0U) {
cpu_info = get_cpu_info();
cpu_info = get_pcpu_info();
cpu_info->cpuid_leaves[FEAT_1_ECX] |= CPUID_ECX_OSXSAVE;
}
}
@ -478,7 +478,7 @@ void msr_write_pcpu(uint32_t msr_index, uint64_t value64, uint16_t pcpu_id)
struct msr_data_struct msr = {0};
uint64_t mask = 0UL;
if (pcpu_id == get_cpu_id()) {
if (pcpu_id == get_pcpu_id()) {
msr_write(msr_index, value64);
} else {
msr.msr_index = msr_index;
@ -501,7 +501,7 @@ uint64_t msr_read_pcpu(uint32_t msr_index, uint16_t pcpu_id)
uint64_t mask = 0UL;
uint64_t ret = 0;
if (pcpu_id == get_cpu_id()) {
if (pcpu_id == get_pcpu_id()) {
ret = msr_read(msr_index);
} else {
msr.msr_index = msr_index;

View File

@ -42,7 +42,7 @@ static struct cpu_capability {
static struct cpuinfo_x86 boot_cpu_data;
bool cpu_has_cap(uint32_t bit)
bool pcpu_has_cap(uint32_t bit)
{
uint32_t feat_idx = bit >> 5U;
uint32_t feat_bit = bit & 0x1fU;
@ -61,7 +61,7 @@ bool has_monitor_cap(void)
{
bool ret = false;
if (cpu_has_cap(X86_FEATURE_MONITOR)) {
if (pcpu_has_cap(X86_FEATURE_MONITOR)) {
/* don't use monitor for CPU (family: 0x6 model: 0x5c)
* in hypervisor, but still expose it to the guests and
* let them handle it correctly
@ -82,7 +82,7 @@ static inline bool is_fast_string_erms_supported_and_enabled(void)
if ((misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) == 0U) {
pr_fatal("%s, fast string is not enabled\n", __func__);
} else {
if (!cpu_has_cap(X86_FEATURE_ERMS)) {
if (!pcpu_has_cap(X86_FEATURE_ERMS)) {
pr_fatal("%s, enhanced rep movsb/stosb not supported\n", __func__);
} else {
ret = true;
@ -179,7 +179,7 @@ static void detect_vmx_mmu_cap(void)
cpu_caps.vmx_vpid = (uint32_t) (val >> 32U);
}
static void detect_cpu_cap(void)
static void detect_pcpu_cap(void)
{
detect_apicv_cap();
detect_ept_cap();
@ -191,7 +191,7 @@ static uint64_t get_address_mask(uint8_t limit)
return ((1UL << limit) - 1UL) & PAGE_MASK;
}
void init_cpu_capabilities(void)
void init_pcpu_capabilities(void)
{
uint32_t eax, unused;
uint32_t family, model;
@ -245,7 +245,7 @@ void init_cpu_capabilities(void)
get_address_mask(boot_cpu_data.phys_bits);
}
detect_cpu_cap();
detect_pcpu_cap();
}
static bool is_ept_supported(void)
@ -263,17 +263,17 @@ bool is_apicv_advanced_feature_supported(void)
return ((cpu_caps.apicv_features & APICV_ADVANCED_FEATURE) == APICV_ADVANCED_FEATURE);
}
bool cpu_has_vmx_ept_cap(uint32_t bit_mask)
bool pcpu_has_vmx_ept_cap(uint32_t bit_mask)
{
return ((cpu_caps.vmx_ept & bit_mask) != 0U);
}
bool cpu_has_vmx_vpid_cap(uint32_t bit_mask)
bool pcpu_has_vmx_vpid_cap(uint32_t bit_mask)
{
return ((cpu_caps.vmx_vpid & bit_mask) != 0U);
}
void init_cpu_model_name(void)
void init_pcpu_model_name(void)
{
cpuid(CPUID_EXTEND_FUNCTION_2,
(uint32_t *)(boot_cpu_data.model_name),
@ -311,7 +311,7 @@ static inline bool is_vmx_disabled(void)
return ret;
}
static inline bool cpu_has_vmx_unrestricted_guest_cap(void)
static inline bool pcpu_has_vmx_unrestricted_guest_cap(void)
{
return ((msr_read(MSR_IA32_VMX_MISC) & VMX_SUPPORT_UNRESTRICTED_GUEST)
!= 0UL);
@ -321,15 +321,15 @@ static int32_t check_vmx_mmu_cap(void)
{
int32_t ret = 0;
if (!cpu_has_vmx_ept_cap(VMX_EPT_INVEPT)) {
if (!pcpu_has_vmx_ept_cap(VMX_EPT_INVEPT)) {
pr_fatal("%s, invept not supported\n", __func__);
ret = -ENODEV;
} else if (!cpu_has_vmx_vpid_cap(VMX_VPID_INVVPID) ||
!cpu_has_vmx_vpid_cap(VMX_VPID_INVVPID_SINGLE_CONTEXT) ||
!cpu_has_vmx_vpid_cap(VMX_VPID_INVVPID_GLOBAL_CONTEXT)) {
} else if (!pcpu_has_vmx_vpid_cap(VMX_VPID_INVVPID) ||
!pcpu_has_vmx_vpid_cap(VMX_VPID_INVVPID_SINGLE_CONTEXT) ||
!pcpu_has_vmx_vpid_cap(VMX_VPID_INVVPID_GLOBAL_CONTEXT)) {
pr_fatal("%s, invvpid not supported\n", __func__);
ret = -ENODEV;
} else if (!cpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE)) {
} else if (!pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE)) {
pr_fatal("%s, ept not support 1GB large page\n", __func__);
ret = -ENODEV;
} else {
@ -350,41 +350,41 @@ int32_t detect_hardware_support(void)
int32_t ret;
/* Long Mode (x86-64, 64-bit support) */
if (!cpu_has_cap(X86_FEATURE_LM)) {
if (!pcpu_has_cap(X86_FEATURE_LM)) {
pr_fatal("%s, LM not supported\n", __func__);
ret = -ENODEV;
} else if ((boot_cpu_data.phys_bits == 0U) ||
(boot_cpu_data.virt_bits == 0U)) {
pr_fatal("%s, can't detect Linear/Physical Address size\n", __func__);
ret = -ENODEV;
} else if (!cpu_has_cap(X86_FEATURE_TSC_DEADLINE)) {
} else if (!pcpu_has_cap(X86_FEATURE_TSC_DEADLINE)) {
/* lapic TSC deadline timer */
pr_fatal("%s, TSC deadline not supported\n", __func__);
ret = -ENODEV;
} else if (!cpu_has_cap(X86_FEATURE_NX)) {
} else if (!pcpu_has_cap(X86_FEATURE_NX)) {
/* Execute Disable */
pr_fatal("%s, NX not supported\n", __func__);
ret = -ENODEV;
} else if (!cpu_has_cap(X86_FEATURE_SMEP)) {
} else if (!pcpu_has_cap(X86_FEATURE_SMEP)) {
/* Supervisor-Mode Execution Prevention */
pr_fatal("%s, SMEP not supported\n", __func__);
ret = -ENODEV;
} else if (!cpu_has_cap(X86_FEATURE_SMAP)) {
} else if (!pcpu_has_cap(X86_FEATURE_SMAP)) {
/* Supervisor-Mode Access Prevention */
pr_fatal("%s, SMAP not supported\n", __func__);
ret = -ENODEV;
} else if (!cpu_has_cap(X86_FEATURE_MTRR)) {
} else if (!pcpu_has_cap(X86_FEATURE_MTRR)) {
pr_fatal("%s, MTRR not supported\n", __func__);
ret = -ENODEV;
} else if (!cpu_has_cap(X86_FEATURE_PAGE1GB)) {
} else if (!pcpu_has_cap(X86_FEATURE_PAGE1GB)) {
pr_fatal("%s, not support 1GB page\n", __func__);
ret = -ENODEV;
} else if (!cpu_has_cap(X86_FEATURE_VMX)) {
} else if (!pcpu_has_cap(X86_FEATURE_VMX)) {
pr_fatal("%s, vmx not supported\n", __func__);
ret = -ENODEV;
} else if (!is_fast_string_erms_supported_and_enabled()) {
ret = -ENODEV;
} else if (!cpu_has_vmx_unrestricted_guest_cap()) {
} else if (!pcpu_has_vmx_unrestricted_guest_cap()) {
pr_fatal("%s, unrestricted guest not supported\n", __func__);
ret = -ENODEV;
} else if (!is_ept_supported()) {
@ -412,7 +412,7 @@ int32_t detect_hardware_support(void)
return ret;
}
struct cpuinfo_x86 *get_cpu_info(void)
struct cpuinfo_x86 *get_pcpu_info(void)
{
return &boot_cpu_data;
}

View File

@ -131,11 +131,11 @@ struct cpu_state_info *get_cpu_pm_state_info(void)
return &cpu_pm_state_info;
}
void load_cpu_state_data(void)
void load_pcpu_state_data(void)
{
int32_t tbl_idx;
const struct cpu_state_info *state_info;
struct cpuinfo_x86 *cpu_info = get_cpu_info();
struct cpuinfo_x86 *cpu_info = get_pcpu_info();
(void)memset(&cpu_pm_state_info, 0U, sizeof(struct cpu_state_info));

View File

@ -37,7 +37,7 @@ uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size)
const uint64_t *pgentry;
uint64_t pg_size = 0UL;
void *eptp;
struct acrn_vcpu *vcpu = vcpu_from_pid(vm, get_cpu_id());
struct acrn_vcpu *vcpu = vcpu_from_pid(vm, get_pcpu_id());
if ((vcpu != NULL) && (vcpu->arch.cur_context == SECURE_WORLD)) {
eptp = vm->arch_vm.sworld_eptp;

View File

@ -596,7 +596,7 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
{
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
pr_dbg("vcpu%hu paused, new state: %d",
vcpu->vcpu_id, new_state);

View File

@ -136,7 +136,7 @@ static void init_vcpuid_entry(uint32_t leaf, uint32_t subleaf,
break;
case 0x16U:
cpu_info = get_cpu_info();
cpu_info = get_pcpu_info();
if (cpu_info->cpuid_level >= 0x16U) {
/* call the cpuid when 0x16 is supported */
cpuid_subleaf(leaf, subleaf, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
@ -199,7 +199,7 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
struct vcpuid_entry entry;
uint32_t limit;
uint32_t i, j;
struct cpuinfo_x86 *cpu_info = get_cpu_info();
struct cpuinfo_x86 *cpu_info = get_pcpu_info();
init_vcpuid_entry(0U, 0U, 0U, &entry);
if (cpu_info->cpuid_level < 0x16U) {
@ -407,7 +407,7 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
break;
case 0x0dU:
if (!cpu_has_cap(X86_FEATURE_OSXSAVE)) {
if (!pcpu_has_cap(X86_FEATURE_OSXSAVE)) {
*eax = 0U;
*ebx = 0U;
*ecx = 0U;

View File

@ -116,7 +116,7 @@ void vcpu_make_request(struct acrn_vcpu *vcpu, uint16_t eventid)
* scheduling, we need change here to determine it target vcpu is
* VMX non-root or root mode
*/
if (get_cpu_id() != vcpu->pcpu_id) {
if (get_pcpu_id() != vcpu->pcpu_id) {
send_single_ipi(vcpu->pcpu_id, VECTOR_NOTIFY_VCPU);
}
}

View File

@ -528,7 +528,7 @@ static void apicv_advanced_accept_intr(struct acrn_vlapic *vlapic, uint32_t vect
*/
bitmap_set_lock(ACRN_REQUEST_EVENT, &vlapic->vcpu->arch.pending_req);
if (get_cpu_id() != vlapic->vcpu->pcpu_id) {
if (get_pcpu_id() != vlapic->vcpu->pcpu_id) {
apicv_post_intr(vlapic->vcpu->pcpu_id);
}
}

View File

@ -479,7 +479,7 @@ int32_t shutdown_vm(struct acrn_vm *vm)
wait_pcpus_offline(mask);
if (is_lapic_pt(vm) && !start_cpus(mask)) {
if (is_lapic_pt(vm) && !start_pcpus(mask)) {
pr_fatal("Failed to start all cpus in mask(0x%llx)", mask);
ret = -ETIMEDOUT;
}

View File

@ -355,7 +355,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
exec_vmwrite32(VMX_TPR_THRESHOLD, 0U);
}
if (cpu_has_cap(X86_FEATURE_OSXSAVE)) {
if (pcpu_has_cap(X86_FEATURE_OSXSAVE)) {
exec_vmwrite64(VMX_XSS_EXITING_BITMAP_FULL, 0UL);
value32 |= VMX_PROCBASED_CTLS2_XSVE_XRSTR;
}

View File

@ -172,7 +172,7 @@ int32_t vmexit_handler(struct acrn_vcpu *vcpu)
uint16_t basic_exit_reason;
int32_t ret;
if (get_cpu_id() != vcpu->pcpu_id) {
if (get_pcpu_id() != vcpu->pcpu_id) {
pr_fatal("vcpu is not running on its pcpu!");
ret = -EINVAL;
} else {

View File

@ -62,14 +62,14 @@ static void enter_guest_mode(uint16_t pcpu_id)
cpu_dead();
}
static void init_primary_cpu_post(void)
static void init_primary_pcpu_post(void)
{
/* Perform any necessary firmware initialization */
init_firmware();
init_debug_pre();
init_cpu_post(BOOT_CPU_ID);
init_pcpu_post(BOOT_CPU_ID);
init_seed();
@ -81,27 +81,27 @@ static void init_primary_cpu_post(void)
/* NOTE: this function is using temp stack, and after SWITCH_TO(runtime_sp, to)
* it will switch to runtime stack.
*/
void init_primary_cpu(void)
void init_primary_pcpu(void)
{
uint64_t rsp;
init_cpu_pre(BOOT_CPU_ID);
init_pcpu_pre(BOOT_CPU_ID);
/* Switch to run-time stack */
rsp = (uint64_t)(&get_cpu_var(stack)[CONFIG_STACK_SIZE - 1]);
rsp &= ~(CPU_STACK_ALIGN - 1UL);
SWITCH_TO(rsp, init_primary_cpu_post);
SWITCH_TO(rsp, init_primary_pcpu_post);
}
void init_secondary_cpu(void)
void init_secondary_pcpu(void)
{
uint16_t pcpu_id;
init_cpu_pre(INVALID_CPU_ID);
init_pcpu_pre(INVALID_CPU_ID);
pcpu_id = get_cpu_id();
pcpu_id = get_pcpu_id();
init_cpu_post(pcpu_id);
init_pcpu_post(pcpu_id);
init_debug_post(pcpu_id);

View File

@ -346,7 +346,7 @@ void dispatch_interrupt(const struct intr_excp_ctx *ctx)
*/
if (irq < NR_IRQS) {
desc = &irq_desc_array[irq];
per_cpu(irq_count, get_cpu_id())[irq]++;
per_cpu(irq_count, get_pcpu_id())[irq]++;
if (vr == desc->vector &&
bitmap_test((uint16_t)(irq & 0x3FU), irq_alloc_bitmap + (irq >> 6U)) != 0U) {
@ -365,7 +365,7 @@ void dispatch_interrupt(const struct intr_excp_ctx *ctx)
void dispatch_exception(struct intr_excp_ctx *ctx)
{
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
/* Obtain lock to ensure exception dump doesn't get corrupted */
spinlock_obtain(&exception_spinlock);

View File

@ -181,7 +181,7 @@ send_startup_ipi(enum intr_cpu_startup_shorthand cpu_startup_shorthand,
{
union apic_icr icr;
uint8_t shorthand;
struct cpuinfo_x86 *cpu_info = get_cpu_info();
struct cpuinfo_x86 *cpu_info = get_pcpu_info();
icr.value = 0U;
icr.bits.destination_mode = INTR_LAPIC_ICR_PHYSICAL;

View File

@ -127,7 +127,7 @@ void invept(const struct acrn_vcpu *vcpu)
{
struct invept_desc desc = {0};
if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_SINGLE_CONTEXT)) {
if (pcpu_has_vmx_ept_cap(VMX_EPT_INVEPT_SINGLE_CONTEXT)) {
desc.eptp = hva2hpa(vcpu->vm->arch_vm.nworld_eptp) |
(3UL << 3U) | 6UL;
local_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
@ -136,7 +136,7 @@ void invept(const struct acrn_vcpu *vcpu)
| (3UL << 3U) | 6UL;
local_invept(INVEPT_TYPE_SINGLE_CONTEXT, desc);
}
} else if (cpu_has_vmx_ept_cap(VMX_EPT_INVEPT_GLOBAL_CONTEXT)) {
} else if (pcpu_has_vmx_ept_cap(VMX_EPT_INVEPT_GLOBAL_CONTEXT)) {
local_invept(INVEPT_TYPE_ALL_CONTEXTS, desc);
} else {
/* Neither type of INVEPT is supported. Skip. */

View File

@ -23,7 +23,7 @@ static void kick_notification(__unused uint32_t irq, __unused void *data)
/* Notification vector is used to kick taget cpu out of non-root mode.
* And it also serves for smp call.
*/
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
if (bitmap_test(pcpu_id, &smp_call_mask)) {
struct smp_call_info_data *smp_call =

View File

@ -151,7 +151,7 @@ void host_enter_s3(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, u
clac();
/* offline all APs */
stop_cpus();
stop_pcpus();
stac();
/* Save default main entry and we will restore it after
@ -188,7 +188,7 @@ void host_enter_s3(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, u
clac();
/* online all APs again */
if (!start_cpus(AP_MASK)) {
if (!start_pcpus(AP_MASK)) {
panic("Failed to start all APs!");
}
}

View File

@ -35,9 +35,9 @@ static void detect_ibrs(void)
* should be set all the time instead of relying on retpoline
*/
#ifndef CONFIG_RETPOLINE
if (cpu_has_cap(X86_FEATURE_IBRS_IBPB)) {
if (pcpu_has_cap(X86_FEATURE_IBRS_IBPB)) {
ibrs_type = IBRS_RAW;
if (cpu_has_cap(X86_FEATURE_STIBP)) {
if (pcpu_has_cap(X86_FEATURE_STIBP)) {
ibrs_type = IBRS_OPT;
}
}
@ -56,15 +56,15 @@ bool check_cpu_security_cap(void)
detect_ibrs();
if (cpu_has_cap(X86_FEATURE_ARCH_CAP)) {
if (pcpu_has_cap(X86_FEATURE_ARCH_CAP)) {
x86_arch_capabilities = msr_read(MSR_IA32_ARCH_CAPABILITIES);
skip_l1dfl_vmentry = ((x86_arch_capabilities
& IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) != 0UL);
if ((!cpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) {
if ((!pcpu_has_cap(X86_FEATURE_L1D_FLUSH)) && (!skip_l1dfl_vmentry)) {
ret = false;
} else if ((!cpu_has_cap(X86_FEATURE_IBRS_IBPB)) &&
(!cpu_has_cap(X86_FEATURE_STIBP))) {
} else if ((!pcpu_has_cap(X86_FEATURE_IBRS_IBPB)) &&
(!pcpu_has_cap(X86_FEATURE_STIBP))) {
ret = false;
} else {
/* No other state currently, do nothing */
@ -84,7 +84,7 @@ void cpu_l1d_flush(void)
*
*/
if (!skip_l1dfl_vmentry) {
if (cpu_has_cap(X86_FEATURE_L1D_FLUSH)) {
if (pcpu_has_cap(X86_FEATURE_L1D_FLUSH)) {
msr_write(MSR_IA32_FLUSH_CMD, IA32_L1D_FLUSH);
}
}

View File

@ -100,7 +100,7 @@ int32_t add_timer(struct hv_timer *timer)
timer->period_in_cycle = max(timer->period_in_cycle, us_to_ticks(MIN_TIMER_PERIOD_US));
}
pcpu_id = get_cpu_id();
pcpu_id = get_pcpu_id();
cpu_timer = &per_cpu(cpu_timers, pcpu_id);
/* update the physical timer if we're on the timer_list head */
@ -185,7 +185,7 @@ static void timer_softirq(uint16_t pcpu_id)
void timer_init(void)
{
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
int32_t retval = 0;
init_percpu_timer(pcpu_id);
@ -260,7 +260,7 @@ static uint64_t pit_calibrate_tsc(uint32_t cal_ms_arg)
static uint64_t native_calibrate_tsc(void)
{
uint64_t tsc_hz = 0UL;
struct cpuinfo_x86 *cpu_info = get_cpu_info();
struct cpuinfo_x86 *cpu_info = get_pcpu_info();
if (cpu_info->cpuid_level >= 0x15U) {
uint32_t eax_denominator, ebx_numerator, ecx_hz, reserved;

View File

@ -65,7 +65,7 @@ static void* uefi_get_rsdp(void)
static void uefi_spurious_handler(int32_t vector)
{
if (get_cpu_id() == BOOT_CPU_ID) {
if (get_pcpu_id() == BOOT_CPU_ID) {
struct acrn_vcpu *vcpu = per_cpu(vcpu, BOOT_CPU_ID);
if (vcpu != NULL) {

View File

@ -81,7 +81,7 @@ void vcpu_thread(struct sched_object *obj)
void default_idle(__unused struct sched_object *obj)
{
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
while (1) {
if (need_reschedule(pcpu_id)) {

View File

@ -181,7 +181,7 @@ void ptirq_deactivate_entry(struct ptirq_remapping_info *entry)
void ptdev_init(void)
{
if (get_cpu_id() == BOOT_CPU_ID) {
if (get_pcpu_id() == BOOT_CPU_ID) {
spinlock_init(&ptdev_lock);
register_softirq(SOFTIRQ_PTDEV, ptirq_softirq);

View File

@ -113,7 +113,7 @@ void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode)
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
bitmap_set_lock(NEED_RESCHEDULE, &ctx->flags);
if (get_cpu_id() != pcpu_id) {
if (get_pcpu_id() != pcpu_id) {
switch (delmode) {
case DEL_MODE_IPI:
send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU);
@ -140,7 +140,7 @@ void make_pcpu_offline(uint16_t pcpu_id)
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
bitmap_set_lock(NEED_OFFLINE, &ctx->flags);
if (get_cpu_id() != pcpu_id) {
if (get_pcpu_id() != pcpu_id) {
send_single_ipi(pcpu_id, VECTOR_NOTIFY_VCPU);
}
}
@ -168,7 +168,7 @@ static void prepare_switch(struct sched_object *prev, struct sched_object *next)
void schedule(void)
{
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
struct sched_object *next = NULL;
struct sched_object *prev = ctx->curr_obj;
@ -198,7 +198,7 @@ void run_sched_thread(struct sched_object *obj)
void switch_to_idle(run_thread_t idle_thread)
{
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
struct sched_object *idle = &per_cpu(idle, pcpu_id);
char idle_name[16];

View File

@ -29,12 +29,12 @@ void register_softirq(uint16_t nr, softirq_handler handler)
*/
void fire_softirq(uint16_t nr)
{
bitmap_set_lock(nr, &per_cpu(softirq_pending, get_cpu_id()));
bitmap_set_lock(nr, &per_cpu(softirq_pending, get_pcpu_id()));
}
void do_softirq(void)
{
uint16_t cpu_id = get_cpu_id();
uint16_t cpu_id = get_pcpu_id();
volatile uint64_t *softirq_pending_bitmap =
&per_cpu(softirq_pending, cpu_id);
uint16_t nr = ffs64(*softirq_pending_bitmap);

View File

@ -236,7 +236,7 @@ static void show_host_call_trace(uint64_t rsp, uint64_t rbp_arg, uint16_t pcpu_i
void asm_assert(int32_t line, const char *file, const char *txt)
{
uint16_t pcpu_id = get_cpu_id();
uint16_t pcpu_id = get_pcpu_id();
uint64_t rsp = cpu_rsp_get();
uint64_t rbp = cpu_rbp_get();

View File

@ -56,7 +56,7 @@ void do_logmsg(uint32_t severity, const char *fmt, ...)
timestamp = ticks_to_us(timestamp);
/* Get CPU ID */
pcpu_id = get_cpu_id();
pcpu_id = get_pcpu_id();
buffer = per_cpu(logbuf, pcpu_id);
(void)memset(buffer, 0U, LOG_MESSAGE_MAX_SIZE);

View File

@ -133,7 +133,7 @@ out:
void npk_log_write(const char *buf, size_t buf_len)
{
uint32_t cpu_id = get_cpu_id();
uint32_t cpu_id = get_pcpu_id();
struct npk_chan *channel = (struct npk_chan *)base;
const char *p = buf;
int32_t sz;

View File

@ -39,10 +39,10 @@ extern struct irq_desc irq_desc_array[NR_IRQS];
static void profiling_initialize_vmsw(void)
{
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
}
/*
@ -58,11 +58,11 @@ static void profiling_initialize_pmi(void)
struct sep_state *ss = &get_cpu_var(profiling_info.sep_state);
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
if (ss == NULL) {
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
return;
}
@ -80,7 +80,7 @@ static void profiling_initialize_pmi(void)
msr_write(msrop->msr_id, msrop->value);
dev_dbg(ACRN_DBG_PROFILING,
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%llx",
__func__, get_cpu_id(), msrop->msr_id, msrop->value);
__func__, get_pcpu_id(), msrop->msr_id, msrop->value);
}
}
}
@ -88,7 +88,7 @@ static void profiling_initialize_pmi(void)
ss->pmu_state = PMU_SETUP;
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
}
/*
@ -104,11 +104,11 @@ static void profiling_enable_pmu(void)
struct sep_state *ss = &get_cpu_var(profiling_info.sep_state);
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
if (ss == NULL) {
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
return;
}
@ -124,7 +124,7 @@ static void profiling_enable_pmu(void)
if (ss->guest_debugctl_value != 0U) {
/* Merge the msr vmexit loading list with HV */
if (ss->vmexit_msr_cnt == 0) {
struct acrn_vcpu *vcpu = get_ever_run_vcpu(get_cpu_id());
struct acrn_vcpu *vcpu = get_ever_run_vcpu(get_pcpu_id());
size = sizeof(struct msr_store_entry) * MAX_HV_MSR_LIST_NUM;
(void)memcpy_s(ss->vmexit_msr_list, size, vcpu->arch.msr_area.host, size);
@ -156,7 +156,7 @@ static void profiling_enable_pmu(void)
msr_write(msrop->msr_id, msrop->value);
dev_dbg(ACRN_DBG_PROFILING,
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%llx",
__func__, get_cpu_id(), msrop->msr_id, msrop->value);
__func__, get_pcpu_id(), msrop->msr_id, msrop->value);
}
}
}
@ -164,7 +164,7 @@ static void profiling_enable_pmu(void)
ss->pmu_state = PMU_RUNNING;
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
}
/*
@ -179,12 +179,12 @@ static void profiling_disable_pmu(void)
struct sep_state *ss = &get_cpu_var(profiling_info.sep_state);
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
if (ss != NULL) {
if (ss->vmexit_msr_cnt != 0) {
/* Restore the msr exit loading list of HV */
struct acrn_vcpu *vcpu = get_ever_run_vcpu(get_cpu_id());
struct acrn_vcpu *vcpu = get_ever_run_vcpu(get_pcpu_id());
exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, hva2hpa(vcpu->arch.msr_area.host));
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, MAX_HV_MSR_LIST_NUM);
@ -203,7 +203,7 @@ static void profiling_disable_pmu(void)
msr_write(msrop->msr_id, msrop->value);
dev_dbg(ACRN_DBG_PROFILING,
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%llx",
__func__, get_cpu_id(), msrop->msr_id, msrop->value);
__func__, get_pcpu_id(), msrop->msr_id, msrop->value);
}
}
}
@ -217,10 +217,10 @@ static void profiling_disable_pmu(void)
ss->pmu_state = PMU_SETUP;
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
} else {
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
}
}
@ -318,16 +318,16 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
spinlock_t *sw_lock = NULL;
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
if (collector == COLLECT_PROFILE_DATA) {
sbuf = (struct shared_buf *)
per_cpu(sbuf, get_cpu_id())[ACRN_SEP];
per_cpu(sbuf, get_pcpu_id())[ACRN_SEP];
if (sbuf == NULL) {
ss->samples_dropped++;
dev_dbg(ACRN_DBG_PROFILING, "%s: sbuf is NULL exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
return 0;
}
@ -344,7 +344,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
/* populate the data header */
pkt_header.tsc = rdtsc();
pkt_header.collector_id = collector;
pkt_header.cpu_id = get_cpu_id();
pkt_header.cpu_id = get_pcpu_id();
pkt_header.data_type = 1U << type;
pkt_header.reserved = MAGIC_NUMBER;
@ -364,7 +364,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
break;
default:
pr_err("%s: unknown data type %u on cpu %d",
__func__, type, get_cpu_id());
__func__, type, get_pcpu_id());
ret = -1;
break;
}
@ -378,7 +378,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
dev_dbg(ACRN_DBG_PROFILING,
"%s: not enough space left in sbuf[%d: %d] exiting cpu%d",
__func__, remaining_space,
DATA_HEADER_SIZE + payload_size, get_cpu_id());
DATA_HEADER_SIZE + payload_size, get_pcpu_id());
return 0;
}
@ -397,7 +397,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
} else if (collector == COLLECT_POWER_DATA) {
sbuf = (struct shared_buf *)
per_cpu(sbuf, get_cpu_id())[ACRN_SOCWATCH];
per_cpu(sbuf, get_pcpu_id())[ACRN_SOCWATCH];
if (sbuf == NULL) {
dev_dbg(ACRN_DBG_PROFILING,
@ -419,7 +419,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
/* populate the data header */
pkt_header.tsc = rdtsc();
pkt_header.collector_id = collector;
pkt_header.cpu_id = get_cpu_id();
pkt_header.cpu_id = get_pcpu_id();
pkt_header.data_type = (uint16_t)type;
switch (type) {
@ -442,7 +442,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
break;
default:
pr_err("%s: unknown data type %u on cpu %d",
__func__, type, get_cpu_id());
__func__, type, get_pcpu_id());
ret = -1;
break;
}
@ -453,7 +453,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
if ((DATA_HEADER_SIZE + payload_size) >= (uint64_t)remaining_space) {
pr_err("%s: not enough space in socwatch buffer on cpu %d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
return 0;
}
/* copy header */
@ -485,12 +485,12 @@ static void profiling_handle_msrops(void)
= &(get_cpu_var(profiling_info.sw_msr_op_info));
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
if ((my_msr_node == NULL) ||
(my_msr_node->msr_op_state != (int32_t)MSR_OP_REQUESTED)) {
dev_dbg(ACRN_DBG_PROFILING, "%s: invalid my_msr_node on cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
return;
}
@ -498,7 +498,7 @@ static void profiling_handle_msrops(void)
(my_msr_node->num_entries >= MAX_MSR_LIST_NUM)) {
dev_dbg(ACRN_DBG_PROFILING,
"%s: invalid num_entries on cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
return;
}
@ -509,7 +509,7 @@ static void profiling_handle_msrops(void)
= msr_read(my_msr_node->entries[i].msr_id);
dev_dbg(ACRN_DBG_PROFILING,
"%s: MSRREAD cpu%d, msr_id=0x%x, msr_val=0x%llx",
__func__, get_cpu_id(), my_msr_node->entries[i].msr_id,
__func__, get_pcpu_id(), my_msr_node->entries[i].msr_id,
my_msr_node->entries[i].value);
break;
case MSR_OP_READ_CLEAR:
@ -517,7 +517,7 @@ static void profiling_handle_msrops(void)
= msr_read(my_msr_node->entries[i].msr_id);
dev_dbg(ACRN_DBG_PROFILING,
"%s: MSRREADCLEAR cpu%d, msr_id=0x%x, msr_val=0x%llx",
__func__, get_cpu_id(), my_msr_node->entries[i].msr_id,
__func__, get_pcpu_id(), my_msr_node->entries[i].msr_id,
my_msr_node->entries[i].value);
msr_write(my_msr_node->entries[i].msr_id, 0U);
break;
@ -526,13 +526,13 @@ static void profiling_handle_msrops(void)
my_msr_node->entries[i].value);
dev_dbg(ACRN_DBG_PROFILING,
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%llx",
__func__, get_cpu_id(), my_msr_node->entries[i].msr_id,
__func__, get_pcpu_id(), my_msr_node->entries[i].msr_id,
my_msr_node->entries[i].value);
break;
default:
pr_err("%s: unknown MSR op_type %u on cpu %d",
__func__, my_msr_node->entries[i].msr_op_type,
get_cpu_id());
get_pcpu_id());
break;
}
}
@ -543,7 +543,7 @@ static void profiling_handle_msrops(void)
if ((my_msr_node->collector_id == COLLECT_POWER_DATA) &&
(sw_msrop != NULL)) {
sw_msrop->cpu_id = get_cpu_id();
sw_msrop->cpu_id = get_pcpu_id();
sw_msrop->valid_entries = my_msr_node->num_entries;
/*
@ -571,7 +571,7 @@ static void profiling_handle_msrops(void)
}
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
}
/*
@ -589,7 +589,7 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data)
if ((ss == NULL) || (psample == NULL)) {
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
__func__, get_pcpu_id());
return;
}
/* Stop all the counters first */
@ -630,7 +630,7 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data)
psample->csample.os_id
= get_cpu_var(profiling_info.vm_info).guest_vm_id;
(void)memset(psample->csample.task, 0U, 16);
psample->csample.cpu_id = get_cpu_id();
psample->csample.cpu_id = get_pcpu_id();
psample->csample.process_id = 0U;
psample->csample.task_id = 0U;
psample->csample.overflow_status = perf_ovf_status;
@ -645,7 +645,7 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data)
} else {
psample->csample.os_id = 0xFFFFU;
(void)memcpy_s(psample->csample.task, 16, "VMM\0", 4);
psample->csample.cpu_id = get_cpu_id();
psample->csample.cpu_id = get_pcpu_id();
psample->csample.process_id = 0U;
psample->csample.task_id = 0U;
psample->csample.overflow_status = perf_ovf_status;
@ -1332,7 +1332,7 @@ void profiling_ipi_handler(__unused void *data)
break;
default:
pr_err("%s: unknown IPI command %d on cpu %d",
__func__, get_cpu_var(profiling_info.ipi_cmd), get_cpu_id());
__func__, get_cpu_var(profiling_info.ipi_cmd), get_pcpu_id());
break;
}
get_cpu_var(profiling_info.ipi_cmd) = IPI_UNKNOWN;
@ -1434,7 +1434,7 @@ void profiling_setup(void)
uint16_t cpu;
int32_t retval;
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
cpu = get_cpu_id();
cpu = get_pcpu_id();
/* support PMI notification, SOS_VM will register all CPU */
if ((cpu == BOOT_CPU_ID) && (profiling_pmi_irq == IRQ_INVALID)) {
pr_info("%s: calling request_irq", __func__);

View File

@ -784,7 +784,7 @@ static int32_t shell_vcpu_dumpreg(int32_t argc, char **argv)
dump.vcpu = vcpu;
dump.str = shell_log_buf;
dump.str_max = SHELL_LOG_BUF_SIZE;
if (vcpu->pcpu_id == get_cpu_id()) {
if (vcpu->pcpu_id == get_pcpu_id()) {
vcpu_dumpreg(&dump);
} else {
bitmap_set_nolock(vcpu->pcpu_id, &mask);
@ -1296,7 +1296,7 @@ static int32_t shell_rdmsr(int32_t argc, char **argv)
uint64_t val = 0;
char str[MAX_STR_SIZE] = {0};
pcpu_id = get_cpu_id();
pcpu_id = get_pcpu_id();
switch (argc) {
case 3:
@ -1332,7 +1332,7 @@ static int32_t shell_wrmsr(int32_t argc, char **argv)
uint32_t msr_index = 0;
uint64_t val = 0;
pcpu_id = get_cpu_id();
pcpu_id = get_pcpu_id();
switch (argc) {
case 4:

View File

@ -61,7 +61,7 @@ static inline void trace_put(uint16_t cpu_id, uint32_t evid, uint32_t n_data, st
void TRACE_2L(uint32_t evid, uint64_t e, uint64_t f)
{
struct trace_entry entry;
uint16_t cpu_id = get_cpu_id();
uint16_t cpu_id = get_pcpu_id();
if (!trace_check(cpu_id)) {
return;
@ -75,7 +75,7 @@ void TRACE_2L(uint32_t evid, uint64_t e, uint64_t f)
void TRACE_4I(uint32_t evid, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
{
struct trace_entry entry;
uint16_t cpu_id = get_cpu_id();
uint16_t cpu_id = get_pcpu_id();
if (!trace_check(cpu_id)) {
return;
@ -91,7 +91,7 @@ void TRACE_4I(uint32_t evid, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
void TRACE_6C(uint32_t evid, uint8_t a1, uint8_t a2, uint8_t a3, uint8_t a4, uint8_t b1, uint8_t b2)
{
struct trace_entry entry;
uint16_t cpu_id = get_cpu_id();
uint16_t cpu_id = get_pcpu_id();
if (!trace_check(cpu_id)) {
return;
@ -113,7 +113,7 @@ void TRACE_6C(uint32_t evid, uint8_t a1, uint8_t a2, uint8_t a3, uint8_t a4, uin
static inline void TRACE_16STR(uint32_t evid, const char name[])
{
struct trace_entry entry;
uint16_t cpu_id = get_cpu_id();
uint16_t cpu_id = get_pcpu_id();
size_t len, i;
if (!trace_check(cpu_id)) {

View File

@ -256,12 +256,12 @@ enum pcpu_boot_state {
void cpu_do_idle(void);
void cpu_dead(void);
void trampoline_start16(void);
void load_cpu_state_data(void);
void init_cpu_pre(uint16_t pcpu_id_args);
void init_cpu_post(uint16_t pcpu_id);
bool start_cpus(uint64_t mask);
void load_pcpu_state_data(void);
void init_pcpu_pre(uint16_t pcpu_id_args);
void init_pcpu_post(uint16_t pcpu_id);
bool start_pcpus(uint64_t mask);
void wait_pcpus_offline(uint64_t mask);
void stop_cpus(void);
void stop_pcpus(void);
void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
#define CPU_SEG_READ(seg, result_ptr) \
@ -412,7 +412,7 @@ cpu_rdtscp_execute(uint64_t *timestamp_ptr, uint32_t *cpu_id_ptr)
* Macro to get CPU ID
* @pre: the return CPU ID would never equal or large than phys_cpu_num.
*/
static inline uint16_t get_cpu_id(void)
static inline uint16_t get_pcpu_id(void)
{
uint32_t tsl, tsh, cpu_id;

View File

@ -38,12 +38,12 @@ struct cpuinfo_x86 {
bool has_monitor_cap(void);
bool is_apicv_advanced_feature_supported(void);
bool cpu_has_cap(uint32_t bit);
bool cpu_has_vmx_ept_cap(uint32_t bit_mask);
bool cpu_has_vmx_vpid_cap(uint32_t bit_mask);
void init_cpu_capabilities(void);
void init_cpu_model_name(void);
bool pcpu_has_cap(uint32_t bit);
bool pcpu_has_vmx_ept_cap(uint32_t bit_mask);
bool pcpu_has_vmx_vpid_cap(uint32_t bit_mask);
void init_pcpu_capabilities(void);
void init_pcpu_model_name(void);
int32_t detect_hardware_support(void);
struct cpuinfo_x86 *get_cpu_info(void);
struct cpuinfo_x86 *get_pcpu_info(void);
#endif /* CPUINFO_H */

View File

@ -9,7 +9,7 @@
/* hypervisor stack bottom magic('intl') */
#define SP_BOTTOM_MAGIC 0x696e746cUL
void init_primary_cpu(void);
void init_secondary_cpu(void);
void init_primary_pcpu(void);
void init_secondary_pcpu(void);
#endif /* INIT_H*/

View File

@ -52,7 +52,7 @@
#define CACHE_LINE_SIZE 64U
/* IA32E Paging constants */
#define IA32E_REF_MASK ((get_cpu_info())->physical_address_mask)
#define IA32E_REF_MASK ((get_pcpu_info())->physical_address_mask)
struct acrn_vcpu;
static inline uint64_t round_page_up(uint64_t addr)

View File

@ -62,6 +62,6 @@ extern struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM];
(per_cpu_data[(pcpu_id)].name)
/* get percpu data for current pcpu */
#define get_cpu_var(name) per_cpu(name, get_cpu_id())
#define get_cpu_var(name) per_cpu(name, get_pcpu_id())
#endif