diff --git a/hypervisor/Makefile b/hypervisor/Makefile index 32b54582e..2c6e751c9 100644 --- a/hypervisor/Makefile +++ b/hypervisor/Makefile @@ -150,6 +150,7 @@ endif # COMMON_C_SRCS += common/notify.c COMMON_C_SRCS += lib/memory.c +COMMON_C_SRCS += common/percpu.c ifeq ($(ARCH),x86) COMMON_C_SRCS += common/ticks.c diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index a2284a8e0..efc5b4e4c 100644 --- a/hypervisor/arch/x86/cpu.c +++ b/hypervisor/arch/x86/cpu.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -43,7 +43,6 @@ #define CPU_UP_TIMEOUT 100U /* millisecond */ #define CPU_DOWN_TIMEOUT 100U /* millisecond */ -struct per_cpu_region per_cpu_data[MAX_PCPU_NUM] __aligned(PAGE_SIZE); static uint16_t phys_cpu_num = 0U; static uint64_t pcpu_sync = 0UL; static uint64_t startup_paddr = 0UL; @@ -72,7 +71,7 @@ static bool init_percpu_lapic_id(void) if ((phys_cpu_num != 0U) && (phys_cpu_num <= MAX_PCPU_NUM)) { for (i = 0U; i < phys_cpu_num; i++) { - per_cpu(lapic_id, i) = lapic_id_array[i]; + per_cpu(arch.lapic_id, i) = lapic_id_array[i]; } success = true; } @@ -344,7 +343,7 @@ static uint16_t get_pcpu_id_from_lapic_id(uint32_t lapic_id) uint16_t pcpu_id = INVALID_CPU_ID; for (i = 0U; i < phys_cpu_num; i++) { - if (per_cpu(lapic_id, i) == lapic_id) { + if (per_cpu(arch.lapic_id, i) == lapic_id) { pcpu_id = i; break; } diff --git a/hypervisor/arch/x86/gdt.c b/hypervisor/arch/x86/gdt.c index 628f94bc3..3284aef83 100644 --- a/hypervisor/arch/x86/gdt.c +++ b/hypervisor/arch/x86/gdt.c @@ -6,7 +6,7 @@ #include #include -#include +#include static void set_tss_desc(struct tss_64_descriptor *desc, uint64_t tss, size_t tss_limit, uint32_t type) @@ -32,9 +32,9 @@ static inline void load_gdt(struct host_gdt_descriptor *gdtr) void load_gdtr_and_tr(void) { - struct host_gdt *gdt = &get_cpu_var(gdt); + struct host_gdt *gdt = &get_cpu_var(arch.gdt); struct host_gdt_descriptor gdtr; - struct tss_64 *tss = &get_cpu_var(tss); + struct tss_64 *tss = &get_cpu_var(arch.tss); /* first entry is not used */ gdt->rsvd = 0xAAAAAAAAAAAAAAAAUL; @@ -43,9 +43,9 @@ void load_gdtr_and_tr(void) /* ring 0 data sel descriptor */ gdt->data_segment_descriptor = 0x00cf93000000ffffUL; - tss->ist1 = (uint64_t)get_cpu_var(mc_stack) + CONFIG_STACK_SIZE; - tss->ist2 = (uint64_t)get_cpu_var(df_stack) + CONFIG_STACK_SIZE; - tss->ist3 = (uint64_t)get_cpu_var(sf_stack) + CONFIG_STACK_SIZE; + tss->ist1 = (uint64_t)get_cpu_var(arch.mc_stack) + CONFIG_STACK_SIZE; + tss->ist2 = (uint64_t)get_cpu_var(arch.df_stack) + CONFIG_STACK_SIZE; + tss->ist3 = (uint64_t)get_cpu_var(arch.sf_stack) + CONFIG_STACK_SIZE; tss->ist4 = 0UL; /* tss descriptor */ diff --git a/hypervisor/arch/x86/guest/assign.c b/hypervisor/arch/x86/guest/assign.c index ac4da3022..e7fe139eb 100644 --- a/hypervisor/arch/x86/guest/assign.c +++ b/hypervisor/arch/x86/guest/assign.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -65,12 +65,12 @@ static uint32_t calculate_logical_dest_mask(uint64_t pdmask) * one Cluster. So some pCPUs may not be included. * Here we use the first Cluster of all the requested pCPUs. */ - dest_cluster_id = per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK; + dest_cluster_id = per_cpu(arch.lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK; do { bitmap_clear_nolock(pcpu_id, &pcpu_mask); - cluster_id = per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK; + cluster_id = per_cpu(arch.lapic_ldr, pcpu_id) & X2APIC_LDR_CLUSTER_ID_MASK; if (cluster_id == dest_cluster_id) { - logical_id_mask |= (per_cpu(lapic_ldr, pcpu_id) & X2APIC_LDR_LOGICAL_ID_MASK); + logical_id_mask |= (per_cpu(arch.lapic_ldr, pcpu_id) & X2APIC_LDR_LOGICAL_ID_MASK); } else { pr_warn("The cluster ID of pCPU %d is %d which differs from that (%d) of " "the previous cores in the guest logical destination.\n" diff --git a/hypervisor/arch/x86/guest/pm.c b/hypervisor/arch/x86/guest/pm.c index 11fbe8c38..ac8b90c47 100644 --- a/hypervisor/arch/x86/guest/pm.c +++ b/hypervisor/arch/x86/guest/pm.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include int32_t validate_pstate(const struct acrn_vm *vm, uint64_t perf_ctl) { diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 8dcc833b5..a9463b1aa 100755 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -214,7 +214,7 @@ static void init_iwkey(struct acrn_vcpu *vcpu) vcpu->arch.IWKey.encryption_key[2] = get_random_value(); vcpu->arch.IWKey.encryption_key[3] = get_random_value(); /* It's always safe to clear whose_iwkey */ - per_cpu(whose_iwkey, pcpuid_from_vcpu(vcpu)) = NULL; + per_cpu(arch.whose_iwkey, pcpuid_from_vcpu(vcpu)) = NULL; } } @@ -224,14 +224,14 @@ void load_iwkey(struct acrn_vcpu *vcpu) /* Only load IWKey with vCPU CR4 keylocker bit enabled */ if (pcpu_has_cap(X86_FEATURE_KEYLOCKER) && vcpu->arch.cr4_kl_enabled && - (get_cpu_var(whose_iwkey) != vcpu)) { + (get_cpu_var(arch.whose_iwkey) != vcpu)) { /* Save/restore xmm0/xmm1/xmm2 during the process */ read_xmm_0_2(&xmm_save[0], &xmm_save[2], &xmm_save[4]); write_xmm_0_2(&vcpu->arch.IWKey.integrity_key[0], &vcpu->arch.IWKey.encryption_key[0], &vcpu->arch.IWKey.encryption_key[2]); asm_loadiwkey(0); write_xmm_0_2(&xmm_save[0], &xmm_save[2], &xmm_save[4]); - get_cpu_var(whose_iwkey) = vcpu; + get_cpu_var(arch.whose_iwkey) = vcpu; } } @@ -611,7 +611,7 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn /* ACRN does not support vCPU migration, one vCPU always runs on * the same pCPU, so PI's ndst is never changed after startup. */ - vcpu->arch.pid.control.bits.ndst = per_cpu(lapic_id, pcpu_id); + vcpu->arch.pid.control.bits.ndst = per_cpu(arch.lapic_id, pcpu_id); /* Create per vcpu vlapic */ vlapic_create(vcpu, pcpu_id); @@ -819,7 +819,7 @@ void kick_vcpu(struct acrn_vcpu *vcpu) { uint16_t pcpu_id = pcpuid_from_vcpu(vcpu); - if ((get_pcpu_id() != pcpu_id) && (per_cpu(vmcs_run, pcpu_id) == vcpu->arch.vmcs)) { + if ((get_pcpu_id() != pcpu_id) && (per_cpu(arch.vmcs_run, pcpu_id) == vcpu->arch.vmcs)) { kick_pcpu(pcpu_id); } } diff --git a/hypervisor/arch/x86/guest/virq.c b/hypervisor/arch/x86/guest/virq.c index b36ce6316..d9e741505 100644 --- a/hypervisor/arch/x86/guest/virq.c +++ b/hypervisor/arch/x86/guest/virq.c @@ -18,6 +18,7 @@ #include #include #include +#include #define EXCEPTION_ERROR_CODE_VALID 8U diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index d25847b46..fc6138315 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include @@ -1917,7 +1917,7 @@ static void inject_msi_for_lapic_pt(struct acrn_vm *vm, uint64_t addr, uint64_t while (vcpu_id != INVALID_BIT_INDEX) { bitmap_clear_nolock(vcpu_id, &vdmask); vcpu = vcpu_from_vid(vm, vcpu_id); - dest |= per_cpu(lapic_ldr, pcpuid_from_vcpu(vcpu)); + dest |= per_cpu(arch.lapic_ldr, pcpuid_from_vcpu(vcpu)); vcpu_id = ffs64(vdmask); } @@ -2072,7 +2072,7 @@ vlapic_x2apic_pt_icr_access(struct acrn_vcpu *vcpu, uint64_t val) default: /* convert the dest from virtual apic_id to physical apic_id */ if (is_x2apic_enabled(vcpu_vlapic(target_vcpu))) { - papic_id = per_cpu(lapic_id, pcpuid_from_vcpu(target_vcpu)); + papic_id = per_cpu(arch.lapic_id, pcpuid_from_vcpu(target_vcpu)); dev_dbg(DBG_LEVEL_LAPICPT, "%s vapic_id: 0x%08lx papic_id: 0x%08lx icr_low:0x%08lx", __func__, target_vcpu->arch.vlapic.vapic_id, papic_id, icr_low); @@ -2203,7 +2203,7 @@ void vlapic_create(struct acrn_vcpu *vcpu, uint16_t pcpu_id) vlapic_init_timer(vlapic); /* Set vLAPIC ID to be same as pLAPIC ID */ - vlapic->vapic_id = per_cpu(lapic_id, pcpu_id); + vlapic->vapic_id = per_cpu(arch.lapic_id, pcpu_id); dev_dbg(DBG_LEVEL_VLAPIC, "vlapic APIC ID : 0x%04x", vlapic->vapic_id); } diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index 4addc1af8..936487d37 100644 --- a/hypervisor/arch/x86/guest/vm.c +++ b/hypervisor/arch/x86/guest/vm.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/hypervisor/arch/x86/guest/vm_reset.c b/hypervisor/arch/x86/guest/vm_reset.c index 91e15b07c..f80bc850c 100644 --- a/hypervisor/arch/x86/guest/vm_reset.c +++ b/hypervisor/arch/x86/guest/vm_reset.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c index 689e4a846..c35fbb0ed 100644 --- a/hypervisor/arch/x86/guest/vmcs.c +++ b/hypervisor/arch/x86/guest/vmcs.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -146,7 +146,7 @@ void init_host_state(void) exec_vmwrite(VMX_HOST_GDTR_BASE, gdt_base); pr_dbg("VMX_HOST_GDTR_BASE: 0x%x ", gdt_base); - tss_addr = hva2hpa((void *)&get_cpu_var(tss)); + tss_addr = hva2hpa((void *)&get_cpu_var(arch.tss)); /* Set up host TR base fields */ exec_vmwrite(VMX_HOST_TR_BASE, tss_addr); pr_dbg("VMX_HOST_TR_BASE: 0x%016lx ", tss_addr); @@ -558,7 +558,7 @@ static void init_exit_ctrl(const struct acrn_vcpu *vcpu) void init_vmcs(struct acrn_vcpu *vcpu) { uint64_t vmx_rev_id; - void **vmcs_ptr = &get_cpu_var(vmcs_run); + void **vmcs_ptr = &get_cpu_var(arch.vmcs_run); /* Log message */ pr_dbg("Initializing VMCS"); @@ -588,7 +588,7 @@ void init_vmcs(struct acrn_vcpu *vcpu) */ void load_vmcs(const struct acrn_vcpu *vcpu) { - void **vmcs_ptr = &get_cpu_var(vmcs_run); + void **vmcs_ptr = &get_cpu_var(arch.vmcs_run); if (vcpu->launched && (*vmcs_ptr != (void *)vcpu->arch.vmcs)) { load_va_vmcs(vcpu->arch.vmcs); diff --git a/hypervisor/arch/x86/guest/vmexit.c b/hypervisor/arch/x86/guest/vmexit.c index 6138663a3..5433646d2 100644 --- a/hypervisor/arch/x86/guest/vmexit.c +++ b/hypervisor/arch/x86/guest/vmexit.c @@ -23,6 +23,7 @@ #include #include #include +#include /* * According to "SDM APPENDIX C VMX BASIC EXIT REASONS", @@ -487,7 +488,7 @@ static int32_t loadiwkey_vmexit_handler(struct acrn_vcpu *vcpu) vcpu->arch.IWKey.integrity_key[1] = xmm[1]; asm_loadiwkey(0); - get_cpu_var(whose_iwkey) = vcpu; + get_cpu_var(arch.whose_iwkey) = vcpu; } return 0; diff --git a/hypervisor/arch/x86/guest/vmsr.c b/hypervisor/arch/x86/guest/vmsr.c index 65ba1059f..bbfdc32ce 100644 --- a/hypervisor/arch/x86/guest/vmsr.c +++ b/hypervisor/arch/x86/guest/vmsr.c @@ -23,6 +23,7 @@ #include #include #include +#include #define INTERCEPT_DISABLE (0U) #define INTERCEPT_READ (1U << 0U) @@ -1295,7 +1296,7 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu) vcpu->arch.IWKey = vcpu->vm->arch_vm.iwkey_backup; spinlock_release(&vcpu->vm->arch_vm.iwkey_backup_lock); /* Load the new iwkey for this vcpu */ - get_cpu_var(whose_iwkey) = NULL; + get_cpu_var(arch.whose_iwkey) = NULL; load_iwkey(vcpu); vcpu->arch.iwkey_copy_status = 1UL; } diff --git a/hypervisor/arch/x86/init.c b/hypervisor/arch/x86/init.c index b559c713e..b627af872 100644 --- a/hypervisor/arch/x86/init.c +++ b/hypervisor/arch/x86/init.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/hypervisor/arch/x86/irq.c b/hypervisor/arch/x86/irq.c index 8f064bd9c..9f40845c4 100644 --- a/hypervisor/arch/x86/irq.c +++ b/hypervisor/arch/x86/irq.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/hypervisor/arch/x86/lapic.c b/hypervisor/arch/x86/lapic.c index bde1da70c..25ef9270f 100644 --- a/hypervisor/arch/x86/lapic.c +++ b/hypervisor/arch/x86/lapic.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include @@ -113,7 +113,7 @@ void init_lapic(uint16_t pcpu_id) /* Can not put this to early_init_lapic because logical ID is not * updated yet. */ - per_cpu(lapic_ldr, pcpu_id) = (uint32_t) msr_read(MSR_IA32_EXT_APIC_LDR); + per_cpu(arch.lapic_ldr, pcpu_id) = (uint32_t) msr_read(MSR_IA32_EXT_APIC_LDR); } static void save_lapic(struct lapic_regs *regs) @@ -203,7 +203,7 @@ send_startup_ipi(uint16_t dest_pcpu_id, uint64_t cpu_startup_start_address) struct cpuinfo_x86 *cpu_info = get_pcpu_info(); icr.value = 0U; - icr.value_32.hi_32 = per_cpu(lapic_id, dest_pcpu_id); + icr.value_32.hi_32 = per_cpu(arch.lapic_id, dest_pcpu_id); /* Assert INIT IPI */ icr.bits.destination_mode = INTR_LAPIC_ICR_PHYSICAL; @@ -262,7 +262,7 @@ void send_single_ipi(uint16_t pcpu_id, uint32_t vector) msr_write(MSR_IA32_EXT_APIC_SELF_IPI, vector); } else { /* Set the destination field to the target processor. */ - icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id); + icr.value_32.hi_32 = per_cpu(arch.lapic_id, pcpu_id); /* Write the vector ID to ICR. */ icr.value_32.lo_32 = vector | (INTR_LAPIC_ICR_PHYSICAL << 11U); @@ -287,7 +287,7 @@ void send_single_init(uint16_t pcpu_id) * It is not blocked in VMX non-root operation. Instead, INITs cause VM exits */ - icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id); + icr.value_32.hi_32 = per_cpu(arch.lapic_id, pcpu_id); icr.value_32.lo_32 = (INTR_LAPIC_ICR_PHYSICAL << 11U) | (INTR_LAPIC_ICR_INIT << 8U); msr_write(MSR_IA32_EXT_APIC_ICR, icr.value); diff --git a/hypervisor/arch/x86/notify.c b/hypervisor/arch/x86/notify.c index 49d5c288f..1e5f79a0a 100644 --- a/hypervisor/arch/x86/notify.c +++ b/hypervisor/arch/x86/notify.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include static uint32_t notification_irq = IRQ_INVALID; diff --git a/hypervisor/arch/x86/pm.c b/hypervisor/arch/x86/pm.c index 3fb634d15..c5c49982f 100644 --- a/hypervisor/arch/x86/pm.c +++ b/hypervisor/arch/x86/pm.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include @@ -167,12 +167,12 @@ void shutdown_system(void) static void suspend_tsc(__unused void *data) { - per_cpu(tsc_suspend, get_pcpu_id()) = rdtsc(); + per_cpu(arch.tsc_suspend, get_pcpu_id()) = rdtsc(); } static void resume_tsc(__unused void *data) { - msr_write(MSR_IA32_TIME_STAMP_COUNTER, per_cpu(tsc_suspend, get_pcpu_id())); + msr_write(MSR_IA32_TIME_STAMP_COUNTER, per_cpu(arch.tsc_suspend, get_pcpu_id())); } void host_enter_s3(const struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val) diff --git a/hypervisor/arch/x86/security.c b/hypervisor/arch/x86/security.c index 1e84023b9..c1c3119bf 100644 --- a/hypervisor/arch/x86/security.c +++ b/hypervisor/arch/x86/security.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/hypervisor/arch/x86/trampoline.c b/hypervisor/arch/x86/trampoline.c index de1e55d98..a9a9b176a 100644 --- a/hypervisor/arch/x86/trampoline.c +++ b/hypervisor/arch/x86/trampoline.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include #include diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c index d74f874f2..e0d9b569d 100644 --- a/hypervisor/arch/x86/vmx.c +++ b/hypervisor/arch/x86/vmx.c @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include @@ -40,7 +40,7 @@ void vmx_on(void) { uint64_t tmp64; uint32_t tmp32; - void *vmxon_region_va = (void *)get_cpu_var(vmxon_region); + void *vmxon_region_va = (void *)get_cpu_var(arch.vmxon_region); uint64_t vmxon_region_pa; /* Initialize vmxon page with revision id from IA32 VMX BASIC MSR */ @@ -136,7 +136,7 @@ void clear_va_vmcs(const uint8_t *vmcs_va) */ void vmx_off(void) { - void **vmcs_ptr = &get_cpu_var(vmcs_run); + void **vmcs_ptr = &get_cpu_var(arch.vmcs_run); if (*vmcs_ptr != NULL) { clear_va_vmcs(*vmcs_ptr); diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c index 380175451..ea7df8e0c 100644 --- a/hypervisor/common/hv_main.c +++ b/hypervisor/common/hv_main.c @@ -14,6 +14,7 @@ #include #include #include +#include void vcpu_thread(struct thread_object *obj) { diff --git a/hypervisor/common/hypercall.c b/hypervisor/common/hypercall.c index 4dc58788e..23edff15b 100644 --- a/hypervisor/common/hypercall.c +++ b/hypervisor/common/hypercall.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/hypervisor/common/irq.c b/hypervisor/common/irq.c index d32485415..11b90847c 100644 --- a/hypervisor/common/irq.c +++ b/hypervisor/common/irq.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include static spinlock_t irq_alloc_spinlock = { .head = 0U, .tail = 0U, }; diff --git a/hypervisor/common/notify.c b/hypervisor/common/notify.c index 4be8d2134..f8d96646e 100644 --- a/hypervisor/common/notify.c +++ b/hypervisor/common/notify.c @@ -7,10 +7,11 @@ #include #include #include -#include +#include #include #include #include +#include static volatile uint64_t smp_call_mask = 0UL; diff --git a/hypervisor/common/percpu.c b/hypervisor/common/percpu.c new file mode 100644 index 000000000..ba07cf812 --- /dev/null +++ b/hypervisor/common/percpu.c @@ -0,0 +1,13 @@ +/* + * Copyright (C) 2023-2024 Intel Corporation. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + * + * Authors: + * Haicheng Li + */ + +#include +#include + +struct per_cpu_region per_cpu_data[MAX_PCPU_NUM] __aligned(PAGE_SIZE); diff --git a/hypervisor/common/ptdev.c b/hypervisor/common/ptdev.c index 323067ded..ac123c06c 100644 --- a/hypervisor/common/ptdev.c +++ b/hypervisor/common/ptdev.c @@ -5,7 +5,7 @@ */ #include -#include +#include #include #include #include diff --git a/hypervisor/common/sbuf.c b/hypervisor/common/sbuf.c index 8b8420e9c..d55e44ca5 100644 --- a/hypervisor/common/sbuf.c +++ b/hypervisor/common/sbuf.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include uint32_t sbuf_next_ptr(uint32_t pos_arg, diff --git a/hypervisor/common/sched_bvt.c b/hypervisor/common/sched_bvt.c index cbbfddaa8..6d7b9e5cb 100644 --- a/hypervisor/common/sched_bvt.c +++ b/hypervisor/common/sched_bvt.c @@ -5,7 +5,7 @@ */ #include -#include +#include #include #include diff --git a/hypervisor/common/sched_iorr.c b/hypervisor/common/sched_iorr.c index ffe7cb881..6e23474b2 100644 --- a/hypervisor/common/sched_iorr.c +++ b/hypervisor/common/sched_iorr.c @@ -5,7 +5,7 @@ */ #include -#include +#include #include #include diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c index 495a0b91c..9d5e09d3d 100644 --- a/hypervisor/common/schedule.c +++ b/hypervisor/common/schedule.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/hypervisor/common/softirq.c b/hypervisor/common/softirq.c index 29c1dd302..3b0b84dc8 100644 --- a/hypervisor/common/softirq.c +++ b/hypervisor/common/softirq.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include static softirq_handler softirq_handlers[NR_SOFTIRQS]; diff --git a/hypervisor/common/thermal.c b/hypervisor/common/thermal.c index ef9502333..1dea54202 100644 --- a/hypervisor/common/thermal.c +++ b/hypervisor/common/thermal.c @@ -9,6 +9,7 @@ #include #include #include +#include static void thermal_softirq(uint16_t pcpu_id) { diff --git a/hypervisor/common/timer.c b/hypervisor/common/timer.c index ac11f6799..6b7564f2e 100644 --- a/hypervisor/common/timer.c +++ b/hypervisor/common/timer.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include diff --git a/hypervisor/debug/logmsg.c b/hypervisor/debug/logmsg.c index 44c5d4715..b60277249 100644 --- a/hypervisor/debug/logmsg.c +++ b/hypervisor/debug/logmsg.c @@ -5,10 +5,11 @@ */ #include + +#include #include #include #include -#include #include #include #include diff --git a/hypervisor/debug/npk_log.c b/hypervisor/debug/npk_log.c index d1a34843d..f5d7b8f35 100644 --- a/hypervisor/debug/npk_log.c +++ b/hypervisor/debug/npk_log.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/hypervisor/debug/profiling.c b/hypervisor/debug/profiling.c index 2cbd796f4..c6a5b7c16 100644 --- a/hypervisor/debug/profiling.c +++ b/hypervisor/debug/profiling.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include @@ -56,7 +56,7 @@ static void profiling_initialize_pmi(void) { uint32_t i, group_id; struct profiling_msr_op *msrop = NULL; - struct sep_state *ss = &get_cpu_var(profiling_info.s_state); + struct sep_state *ss = &get_cpu_var(arch.profiling_info.s_state); dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d", __func__, get_pcpu_id()); @@ -102,7 +102,7 @@ static void profiling_enable_pmu(void) uint32_t group_id; uint32_t size; struct profiling_msr_op *msrop = NULL; - struct sep_state *ss = &get_cpu_var(profiling_info.s_state); + struct sep_state *ss = &get_cpu_var(arch.profiling_info.s_state); dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d", __func__, get_pcpu_id()); @@ -177,7 +177,7 @@ static void profiling_disable_pmu(void) uint32_t i; uint32_t group_id; struct profiling_msr_op *msrop = NULL; - struct sep_state *ss = &get_cpu_var(profiling_info.s_state); + struct sep_state *ss = &get_cpu_var(arch.profiling_info.s_state); dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d", __func__, get_pcpu_id()); @@ -309,9 +309,9 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type) uint64_t payload_size = 0UL; void *payload = NULL; struct shared_buf *sbuf = NULL; - struct sep_state *ss = &(get_cpu_var(profiling_info.s_state)); + struct sep_state *ss = &(get_cpu_var(arch.profiling_info.s_state)); struct sw_msr_op_info *sw_msrop - = &(get_cpu_var(profiling_info.sw_msr_info)); + = &(get_cpu_var(arch.profiling_info.sw_msr_info)); uint64_t rflags; spinlock_t *sw_lock = NULL; @@ -348,16 +348,16 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type) switch (type) { case CORE_PMU_SAMPLING: payload_size = CORE_PMU_SAMPLE_SIZE; - payload = &get_cpu_var(profiling_info.p_sample); + payload = &get_cpu_var(arch.profiling_info.p_sample); break; case LBR_PMU_SAMPLING: payload_size = CORE_PMU_SAMPLE_SIZE + LBR_PMU_SAMPLE_SIZE; - payload = &get_cpu_var(profiling_info.p_sample); + payload = &get_cpu_var(arch.profiling_info.p_sample); break; case VM_SWITCH_TRACING: payload_size = VM_SWITCH_TRACE_SIZE; - payload = &get_cpu_var(profiling_info.vm_trace); + payload = &get_cpu_var(arch.profiling_info.vm_trace); break; default: pr_err("%s: unknown data type %u on cpu %d", @@ -394,7 +394,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type) return 0; } - sw_lock = &(get_cpu_var(profiling_info.sw_lock)); + sw_lock = &(get_cpu_var(arch.profiling_info.sw_lock)); spinlock_irqsave_obtain(sw_lock, &rflags); stac(); if (sbuf->tail >= sbuf->head) { @@ -427,7 +427,7 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type) dev_dbg(DBG_LEVEL_PROFILING, "%s: generating vm-switch sample", __func__); payload_size = VM_SWITCH_TRACE_SIZE; - payload = &get_cpu_var(profiling_info.vm_trace); + payload = &get_cpu_var(arch.profiling_info.vm_trace); break; default: pr_err("%s: unknown data type %u on cpu %d", @@ -469,9 +469,9 @@ static void profiling_handle_msrops(void) { uint32_t i, j; struct profiling_msr_ops_list *my_msr_node - = get_cpu_var(profiling_info.msr_node); + = get_cpu_var(arch.profiling_info.msr_node); struct sw_msr_op_info *sw_msrop - = &(get_cpu_var(profiling_info.sw_msr_info)); + = &(get_cpu_var(arch.profiling_info.sw_msr_info)); dev_dbg(DBG_LEVEL_PROFILING, "%s: entering cpu%d", __func__, get_pcpu_id()); @@ -573,8 +573,8 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data) uint32_t i; uint32_t group_id; struct profiling_msr_op *msrop = NULL; - struct pmu_sample *psample = &(get_cpu_var(profiling_info.p_sample)); - struct sep_state *ss = &(get_cpu_var(profiling_info.s_state)); + struct pmu_sample *psample = &(get_cpu_var(arch.profiling_info.p_sample)); + struct sep_state *ss = &(get_cpu_var(arch.profiling_info.s_state)); if ((ss == NULL) || (psample == NULL)) { dev_dbg(DBG_LEVEL_ERR_PROFILING, "%s: exiting cpu%d", @@ -612,24 +612,24 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data) (void)memset(psample, 0U, sizeof(struct pmu_sample)); /* Attribute PMI to guest context */ - if ((get_cpu_var(profiling_info.vm_info).vmexit_reason + if ((get_cpu_var(arch.profiling_info.vm_info).vmexit_reason == VMX_EXIT_REASON_EXTERNAL_INTERRUPT) && - ((uint64_t)get_cpu_var(profiling_info.vm_info).external_vector + ((uint64_t)get_cpu_var(arch.profiling_info.vm_info).external_vector == PMI_VECTOR)) { psample->csample.os_id - = get_cpu_var(profiling_info.vm_info).guest_vm_id; + = get_cpu_var(arch.profiling_info.vm_info).guest_vm_id; (void)memset(psample->csample.task, 0U, 16); psample->csample.cpu_id = get_pcpu_id(); psample->csample.process_id = 0U; psample->csample.task_id = 0U; psample->csample.overflow_status = perf_ovf_status; - psample->csample.rip = get_cpu_var(profiling_info.vm_info).guest_rip; + psample->csample.rip = get_cpu_var(arch.profiling_info.vm_info).guest_rip; psample->csample.rflags - = (uint32_t)get_cpu_var(profiling_info.vm_info).guest_rflags; + = (uint32_t)get_cpu_var(arch.profiling_info.vm_info).guest_rflags; psample->csample.cs - = (uint32_t)get_cpu_var(profiling_info.vm_info).guest_cs; - get_cpu_var(profiling_info.vm_info).vmexit_reason = 0U; - get_cpu_var(profiling_info.vm_info).external_vector = -1; + = (uint32_t)get_cpu_var(arch.profiling_info.vm_info).guest_cs; + get_cpu_var(arch.profiling_info.vm_info).vmexit_reason = 0U; + get_cpu_var(arch.profiling_info.vm_info).external_vector = -1; /* Attribute PMI to hypervisor context */ } else { const struct x86_irq_data *irqd = irq_desc_array[irq].arch_data; @@ -727,24 +727,24 @@ static void profiling_start_pmu(void) } for (i = 0U; i < pcpu_nums; i++) { - if (per_cpu(profiling_info.s_state, i).pmu_state != PMU_SETUP) { + if (per_cpu(arch.profiling_info.s_state, i).pmu_state != PMU_SETUP) { pr_err("%s: invalid pmu_state %u on cpu%d", - __func__, get_cpu_var(profiling_info.s_state).pmu_state, i); + __func__, get_cpu_var(arch.profiling_info.s_state).pmu_state, i); return; } } for (i = 0U; i < pcpu_nums; i++) { - per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_START; - per_cpu(profiling_info.s_state, i).samples_logged = 0U; - per_cpu(profiling_info.s_state, i).samples_dropped = 0U; - per_cpu(profiling_info.s_state, i).valid_pmi_count = 0U; - per_cpu(profiling_info.s_state, i).total_pmi_count = 0U; - per_cpu(profiling_info.s_state, i).total_vmexit_count = 0U; - per_cpu(profiling_info.s_state, i).frozen_well = 0U; - per_cpu(profiling_info.s_state, i).frozen_delayed = 0U; - per_cpu(profiling_info.s_state, i).nofrozen_pmi = 0U; - per_cpu(profiling_info.s_state, i).pmu_state = PMU_RUNNING; + per_cpu(arch.profiling_info.ipi_cmd, i) = IPI_PMU_START; + per_cpu(arch.profiling_info.s_state, i).samples_logged = 0U; + per_cpu(arch.profiling_info.s_state, i).samples_dropped = 0U; + per_cpu(arch.profiling_info.s_state, i).valid_pmi_count = 0U; + per_cpu(arch.profiling_info.s_state, i).total_pmi_count = 0U; + per_cpu(arch.profiling_info.s_state, i).total_vmexit_count = 0U; + per_cpu(arch.profiling_info.s_state, i).frozen_well = 0U; + per_cpu(arch.profiling_info.s_state, i).frozen_delayed = 0U; + per_cpu(arch.profiling_info.s_state, i).nofrozen_pmi = 0U; + per_cpu(arch.profiling_info.s_state, i).pmu_state = PMU_RUNNING; } smp_call_function(get_active_pcpu_bitmap(), profiling_ipi_handler, NULL); @@ -766,27 +766,27 @@ static void profiling_stop_pmu(void) if (in_pmu_profiling) { for (i = 0U; i < pcpu_nums; i++) { - per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_STOP; - if (per_cpu(profiling_info.s_state, i).pmu_state == PMU_RUNNING) { - per_cpu(profiling_info.s_state, i).pmu_state = PMU_SETUP; + per_cpu(arch.profiling_info.ipi_cmd, i) = IPI_PMU_STOP; + if (per_cpu(arch.profiling_info.s_state, i).pmu_state == PMU_RUNNING) { + per_cpu(arch.profiling_info.s_state, i).pmu_state = PMU_SETUP; } dev_dbg(DBG_LEVEL_PROFILING, "%s: pmi_cnt[%d] = total:%u valid=%u, vmexit_cnt=%u", - __func__, i, per_cpu(profiling_info.s_state, i).total_pmi_count, - per_cpu(profiling_info.s_state, i).valid_pmi_count, - per_cpu(profiling_info.s_state, i).total_vmexit_count); + __func__, i, per_cpu(arch.profiling_info.s_state, i).total_pmi_count, + per_cpu(arch.profiling_info.s_state, i).valid_pmi_count, + per_cpu(arch.profiling_info.s_state, i).total_vmexit_count); dev_dbg(DBG_LEVEL_PROFILING, "%s: cpu%d frozen well:%u frozen delayed=%u, nofrozen_pmi=%u", - __func__, i, per_cpu(profiling_info.s_state, i).frozen_well, - per_cpu(profiling_info.s_state, i).frozen_delayed, - per_cpu(profiling_info.s_state, i).nofrozen_pmi); + __func__, i, per_cpu(arch.profiling_info.s_state, i).frozen_well, + per_cpu(arch.profiling_info.s_state, i).frozen_delayed, + per_cpu(arch.profiling_info.s_state, i).nofrozen_pmi); dev_dbg(DBG_LEVEL_PROFILING, "%s: cpu%d samples captured:%u samples dropped=%u", - __func__, i, per_cpu(profiling_info.s_state, i).samples_logged, - per_cpu(profiling_info.s_state, i).samples_dropped); + __func__, i, per_cpu(arch.profiling_info.s_state, i).samples_logged, + per_cpu(arch.profiling_info.s_state, i).samples_dropped); } @@ -816,8 +816,8 @@ int32_t profiling_msr_ops_all_cpus(struct acrn_vm *vm, uint64_t addr) } for (i = 0U; i < pcpu_nums; i++) { - per_cpu(profiling_info.ipi_cmd, i) = IPI_MSR_OP; - per_cpu(profiling_info.msr_node, i) = &(msr_list[i]); + per_cpu(arch.profiling_info.ipi_cmd, i) = IPI_MSR_OP; + per_cpu(arch.profiling_info.msr_node, i) = &(msr_list[i]); } smp_call_function(get_active_pcpu_bitmap(), profiling_ipi_handler, NULL); @@ -855,7 +855,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr) vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id = i; vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id = i; vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id - = per_cpu(lapic_id, i); + = per_cpu(arch.lapic_id, i); } vm_info_list.vm_list[vm_idx].num_vcpus = i; vm_info_list.num_vms = 1; @@ -1038,7 +1038,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr) } } for (i = 0U; i < pcpu_nums ; i++) { - per_cpu(profiling_info.soc_state, i) + per_cpu(arch.profiling_info.soc_state, i) = SW_RUNNING; } } else { /* stop socwatch collection */ @@ -1046,7 +1046,7 @@ int32_t profiling_set_control(struct acrn_vm *vm, uint64_t addr) "%s: socwatch stop collection invoked or collection switch not set!", __func__); for (i = 0U; i < pcpu_nums ; i++) { - per_cpu(profiling_info.soc_state, i) + per_cpu(arch.profiling_info.soc_state, i) = SW_STOPPED; } } @@ -1082,12 +1082,12 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr) } for (i = 0U; i < pcpu_nums; i++) { - if (!((per_cpu(profiling_info.s_state, i).pmu_state == + if (!((per_cpu(arch.profiling_info.s_state, i).pmu_state == PMU_INITIALIZED) || - (per_cpu(profiling_info.s_state, i).pmu_state == + (per_cpu(arch.profiling_info.s_state, i).pmu_state == PMU_SETUP))) { pr_err("%s: invalid pmu_state %u on cpu%d", - __func__, per_cpu(profiling_info.s_state, i).pmu_state, i); + __func__, per_cpu(arch.profiling_info.s_state, i).pmu_state, i); return -EINVAL; } } @@ -1100,31 +1100,31 @@ int32_t profiling_configure_pmi(struct acrn_vm *vm, uint64_t addr) } for (i = 0U; i < pcpu_nums; i++) { - per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_CONFIG; - per_cpu(profiling_info.s_state, i).num_pmi_groups + per_cpu(arch.profiling_info.ipi_cmd, i) = IPI_PMU_CONFIG; + per_cpu(arch.profiling_info.s_state, i).num_pmi_groups = pmi_config.num_groups; - (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_initial_msr_list, + (void)memcpy_s((void *)per_cpu(arch.profiling_info.s_state, i).pmi_initial_msr_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM, (void *)pmi_config.initial_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM); - (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_start_msr_list, + (void)memcpy_s((void *)per_cpu(arch.profiling_info.s_state, i).pmi_start_msr_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM, (void *)pmi_config.start_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM); - (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_stop_msr_list, + (void)memcpy_s((void *)per_cpu(arch.profiling_info.s_state, i).pmi_stop_msr_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM, (void *)pmi_config.stop_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM); - (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_entry_msr_list, + (void)memcpy_s((void *)per_cpu(arch.profiling_info.s_state, i).pmi_entry_msr_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM, (void *)pmi_config.entry_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM); - (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_exit_msr_list, + (void)memcpy_s((void *)per_cpu(arch.profiling_info.s_state, i).pmi_exit_msr_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM, (void *)pmi_config.exit_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM); @@ -1159,22 +1159,22 @@ int32_t profiling_configure_vmsw(struct acrn_vm *vm, uint64_t addr) switch (vmsw_config.collector_id) { case COLLECT_PROFILE_DATA: for (i = 0U; i < pcpu_nums; i++) { - per_cpu(profiling_info.ipi_cmd, i) = IPI_VMSW_CONFIG; + per_cpu(arch.profiling_info.ipi_cmd, i) = IPI_VMSW_CONFIG; (void)memcpy_s( - (void *)per_cpu(profiling_info.s_state, i).vmsw_initial_msr_list, + (void *)per_cpu(arch.profiling_info.s_state, i).vmsw_initial_msr_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM, (void *)vmsw_config.initial_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM); (void)memcpy_s( - (void *)per_cpu(profiling_info.s_state, i).vmsw_entry_msr_list, + (void *)per_cpu(arch.profiling_info.s_state, i).vmsw_entry_msr_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM, (void *)vmsw_config.entry_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM); (void)memcpy_s( - (void *)per_cpu(profiling_info.s_state, i).vmsw_exit_msr_list, + (void *)per_cpu(arch.profiling_info.s_state, i).vmsw_exit_msr_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM, (void *)vmsw_config.exit_list, sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM); @@ -1244,9 +1244,9 @@ int32_t profiling_get_status_info(struct acrn_vm *vm, uint64_t gpa) for (i = 0U; i < pcpu_nums; i++) { pstats[i].samples_logged = - per_cpu(profiling_info.s_state, i).samples_logged; + per_cpu(arch.profiling_info.s_state, i).samples_logged; pstats[i].samples_dropped = - per_cpu(profiling_info.s_state, i).samples_dropped; + per_cpu(arch.profiling_info.s_state, i).samples_dropped; } if (copy_to_gpa(vm, &pstats, gpa, @@ -1264,7 +1264,7 @@ int32_t profiling_get_status_info(struct acrn_vm *vm, uint64_t gpa) */ void profiling_ipi_handler(__unused void *data) { - switch (get_cpu_var(profiling_info.ipi_cmd)) { + switch (get_cpu_var(arch.profiling_info.ipi_cmd)) { case IPI_PMU_START: profiling_enable_pmu(); break; @@ -1282,10 +1282,10 @@ void profiling_ipi_handler(__unused void *data) break; default: pr_err("%s: unknown IPI command %d on cpu %d", - __func__, get_cpu_var(profiling_info.ipi_cmd), get_pcpu_id()); + __func__, get_cpu_var(arch.profiling_info.ipi_cmd), get_pcpu_id()); break; } - get_cpu_var(profiling_info.ipi_cmd) = IPI_UNKNOWN; + get_cpu_var(arch.profiling_info.ipi_cmd) = IPI_UNKNOWN; } /* @@ -1293,14 +1293,14 @@ void profiling_ipi_handler(__unused void *data) */ void profiling_vmenter_handler(__unused struct acrn_vcpu *vcpu) { - if (((get_cpu_var(profiling_info.s_state).pmu_state == PMU_RUNNING) && + if (((get_cpu_var(arch.profiling_info.s_state).pmu_state == PMU_RUNNING) && ((sep_collection_switch & (1UL << (uint64_t)VM_SWITCH_TRACING)) > 0UL)) || - ((get_cpu_var(profiling_info.soc_state) == SW_RUNNING) && + ((get_cpu_var(arch.profiling_info.soc_state) == SW_RUNNING) && ((socwatch_collection_switch & (1UL << (uint64_t)SOCWATCH_VM_SWITCH_TRACING)) > 0UL))) { - get_cpu_var(profiling_info.vm_info).vmenter_tsc = cpu_ticks(); + get_cpu_var(arch.profiling_info.vm_info).vmenter_tsc = cpu_ticks(); } } @@ -1313,28 +1313,28 @@ void profiling_pre_vmexit_handler(struct acrn_vcpu *vcpu) exit_reason = vcpu->arch.exit_reason & 0xFFFFUL; - if ((get_cpu_var(profiling_info.s_state).pmu_state == PMU_RUNNING) || - (get_cpu_var(profiling_info.soc_state) == SW_RUNNING)) { + if ((get_cpu_var(arch.profiling_info.s_state).pmu_state == PMU_RUNNING) || + (get_cpu_var(arch.profiling_info.soc_state) == SW_RUNNING)) { - get_cpu_var(profiling_info.vm_info).vmexit_tsc = cpu_ticks(); - get_cpu_var(profiling_info.vm_info).vmexit_reason + get_cpu_var(arch.profiling_info.vm_info).vmexit_tsc = cpu_ticks(); + get_cpu_var(arch.profiling_info.vm_info).vmexit_reason = exit_reason; if (exit_reason == VMX_EXIT_REASON_EXTERNAL_INTERRUPT) { - get_cpu_var(profiling_info.vm_info).external_vector + get_cpu_var(arch.profiling_info.vm_info).external_vector = (int32_t)(exec_vmread(VMX_EXIT_INT_INFO) & 0xFFUL); } else { - get_cpu_var(profiling_info.vm_info).external_vector = -1; + get_cpu_var(arch.profiling_info.vm_info).external_vector = -1; } - get_cpu_var(profiling_info.vm_info).guest_rip + get_cpu_var(arch.profiling_info.vm_info).guest_rip = vcpu_get_rip(vcpu); - get_cpu_var(profiling_info.vm_info).guest_rflags + get_cpu_var(arch.profiling_info.vm_info).guest_rflags = vcpu_get_rflags(vcpu); - get_cpu_var(profiling_info.vm_info).guest_cs + get_cpu_var(arch.profiling_info.vm_info).guest_cs = exec_vmread64(VMX_GUEST_CS_SEL); - get_cpu_var(profiling_info.vm_info).guest_vm_id = (int16_t)vcpu->vm->vm_id; + get_cpu_var(arch.profiling_info.vm_info).guest_vm_id = (int16_t)vcpu->vm->vm_id; } } @@ -1343,24 +1343,24 @@ void profiling_pre_vmexit_handler(struct acrn_vcpu *vcpu) */ void profiling_post_vmexit_handler(struct acrn_vcpu *vcpu) { - per_cpu(profiling_info.s_state, pcpuid_from_vcpu(vcpu)).total_vmexit_count++; + per_cpu(arch.profiling_info.s_state, pcpuid_from_vcpu(vcpu)).total_vmexit_count++; - if ((get_cpu_var(profiling_info.s_state).pmu_state == PMU_RUNNING) || - (get_cpu_var(profiling_info.soc_state) == SW_RUNNING)) { + if ((get_cpu_var(arch.profiling_info.s_state).pmu_state == PMU_RUNNING) || + (get_cpu_var(arch.profiling_info.soc_state) == SW_RUNNING)) { /* Generate vmswitch sample */ if (((sep_collection_switch & (1UL << (uint64_t)VM_SWITCH_TRACING)) > 0UL) || ((socwatch_collection_switch & (1UL << (uint64_t)SOCWATCH_VM_SWITCH_TRACING)) > 0UL)) { - get_cpu_var(profiling_info.vm_trace).os_id + get_cpu_var(arch.profiling_info.vm_trace).os_id = vcpu->vm->vm_id; - get_cpu_var(profiling_info.vm_trace).vm_enter_tsc - = get_cpu_var(profiling_info.vm_info).vmenter_tsc; - get_cpu_var(profiling_info.vm_trace).vm_exit_tsc - = get_cpu_var(profiling_info.vm_info).vmexit_tsc; - get_cpu_var(profiling_info.vm_trace).vm_exit_reason - = get_cpu_var(profiling_info.vm_info).vmexit_reason; + get_cpu_var(arch.profiling_info.vm_trace).vm_enter_tsc + = get_cpu_var(arch.profiling_info.vm_info).vmenter_tsc; + get_cpu_var(arch.profiling_info.vm_trace).vm_exit_tsc + = get_cpu_var(arch.profiling_info.vm_info).vmexit_tsc; + get_cpu_var(arch.profiling_info.vm_trace).vm_exit_reason + = get_cpu_var(arch.profiling_info.vm_info).vmexit_reason; if ((sep_collection_switch & (1UL << (uint64_t)VM_SWITCH_TRACING)) > 0UL) { @@ -1398,16 +1398,16 @@ void profiling_setup(void) profiling_pmi_irq = (uint32_t)retval; } - per_cpu(profiling_info.s_state, cpu).valid_pmi_count = 0U; - per_cpu(profiling_info.s_state, cpu).total_pmi_count = 0U; - per_cpu(profiling_info.s_state, cpu).total_vmexit_count = 0U; - per_cpu(profiling_info.s_state, cpu).pmu_state = PMU_INITIALIZED; - per_cpu(profiling_info.s_state, cpu).vmexit_msr_cnt = 0U; - per_cpu(profiling_info.s_state, cpu).samples_logged = 0U; - per_cpu(profiling_info.s_state, cpu).samples_dropped = 0U; - per_cpu(profiling_info.s_state, cpu).frozen_well = 0U; - per_cpu(profiling_info.s_state, cpu).frozen_delayed = 0U; - per_cpu(profiling_info.s_state, cpu).nofrozen_pmi = 0U; + per_cpu(arch.profiling_info.s_state, cpu).valid_pmi_count = 0U; + per_cpu(arch.profiling_info.s_state, cpu).total_pmi_count = 0U; + per_cpu(arch.profiling_info.s_state, cpu).total_vmexit_count = 0U; + per_cpu(arch.profiling_info.s_state, cpu).pmu_state = PMU_INITIALIZED; + per_cpu(arch.profiling_info.s_state, cpu).vmexit_msr_cnt = 0U; + per_cpu(arch.profiling_info.s_state, cpu).samples_logged = 0U; + per_cpu(arch.profiling_info.s_state, cpu).samples_dropped = 0U; + per_cpu(arch.profiling_info.s_state, cpu).frozen_well = 0U; + per_cpu(arch.profiling_info.s_state, cpu).frozen_delayed = 0U; + per_cpu(arch.profiling_info.s_state, cpu).nofrozen_pmi = 0U; msr_write(MSR_IA32_EXT_APIC_LVT_PMI, PMI_VECTOR | LVT_PERFCTR_BIT_MASK); diff --git a/hypervisor/debug/sbuf.c b/hypervisor/debug/sbuf.c index 178d6928d..d8c48d910 100644 --- a/hypervisor/debug/sbuf.c +++ b/hypervisor/debug/sbuf.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include int32_t sbuf_share_setup(uint16_t pcpu_id, uint32_t sbuf_id, uint64_t *hva) { diff --git a/hypervisor/debug/shell.c b/hypervisor/debug/shell.c index fd0580e39..10a204dd6 100644 --- a/hypervisor/debug/shell.c +++ b/hypervisor/debug/shell.c @@ -10,7 +10,7 @@ #include "shell_priv.h" #include #include -#include +#include #include #include #include diff --git a/hypervisor/debug/trace.c b/hypervisor/debug/trace.c index 9265af5c9..a53a1c42a 100644 --- a/hypervisor/debug/trace.c +++ b/hypervisor/debug/trace.c @@ -5,7 +5,7 @@ */ #include -#include +#include #include #include diff --git a/hypervisor/include/arch/riscv/asm/per_cpu.h b/hypervisor/include/arch/riscv/asm/per_cpu.h index f8b056dfd..683d7a25f 100644 --- a/hypervisor/include/arch/riscv/asm/per_cpu.h +++ b/hypervisor/include/arch/riscv/asm/per_cpu.h @@ -10,20 +10,12 @@ #ifndef RISCV_PERCPU_H #define RISCV_PERCPU_H -#include #include -#include #include -struct per_cpu_region { - struct smp_call_info_data smp_call_info; +struct per_cpu_arch { + } __aligned(PAGE_SIZE); /* per_cpu_region size aligned with PAGE_SIZE */ -extern struct per_cpu_region per_cpu_data[MAX_PCPU_NUM]; -/* - * get percpu data for pcpu_id. - */ -#define per_cpu(name, pcpu_id) \ - (per_cpu_data[(pcpu_id)].name) #endif /* RISCV_PERCPU_H */ diff --git a/hypervisor/include/arch/x86/asm/per_cpu.h b/hypervisor/include/arch/x86/asm/per_cpu.h index 4d26cd65f..2cfe93b7f 100644 --- a/hypervisor/include/arch/x86/asm/per_cpu.h +++ b/hypervisor/include/arch/x86/asm/per_cpu.h @@ -4,84 +4,34 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef PER_CPU_H -#define PER_CPU_H +#ifndef PER_CPU_X86_H +#define PER_CPU_X86_H #include -#include -#include -#include -#include -#include -#include #include +#include #include #include #include -#include -struct per_cpu_region { +struct per_cpu_arch { /* vmxon_region MUST be 4KB-aligned */ uint8_t vmxon_region[PAGE_SIZE]; void *vmcs_run; -#ifdef HV_DEBUG - struct shared_buf *sbuf[ACRN_SBUF_PER_PCPU_ID_MAX]; - char logbuf[LOG_MESSAGE_MAX_SIZE]; - uint32_t npk_log_ref; -#endif - uint64_t irq_count[NR_IRQS]; - uint64_t softirq_pending; - uint64_t spurious; - struct acrn_vcpu *ever_run_vcpu; -#ifdef STACK_PROTECTOR - struct stack_canary stk_canary; -#endif - struct per_cpu_timers cpu_timers; - struct sched_control sched_ctl; - struct sched_noop_control sched_noop_ctl; - struct sched_iorr_control sched_iorr_ctl; - struct sched_bvt_control sched_bvt_ctl; - struct sched_prio_control sched_prio_ctl; - struct thread_object idle; struct host_gdt gdt; struct tss_64 tss; - enum pcpu_boot_state boot_state; - uint64_t pcpu_flag; uint8_t mc_stack[CONFIG_STACK_SIZE] __aligned(16); uint8_t df_stack[CONFIG_STACK_SIZE] __aligned(16); uint8_t sf_stack[CONFIG_STACK_SIZE] __aligned(16); - uint8_t stack[CONFIG_STACK_SIZE] __aligned(16); uint32_t lapic_id; uint32_t lapic_ldr; - uint32_t softirq_servicing; - uint32_t mode_to_kick_pcpu; - uint32_t mode_to_idle; - struct smp_call_info_data smp_call_info; - struct list_head softirq_dev_entry_list; #ifdef PROFILING_ON struct profiling_info_wrapper profiling_info; #endif - uint64_t shutdown_vm_bitmap; uint64_t tsc_suspend; struct acrn_vcpu *whose_iwkey; - /* - * We maintain a per-pCPU array of vCPUs. vCPUs of a VM won't - * share same pCPU. So the maximum possible # of vCPUs that can - * run on a pCPU is CONFIG_MAX_VM_NUM. - * vcpu_array address must be aligned to 64-bit for atomic access - * to avoid contention between offline_vcpu and posted interrupt handler - */ - struct acrn_vcpu *vcpu_array[CONFIG_MAX_VM_NUM] __aligned(8); + } __aligned(PAGE_SIZE); /* per_cpu_region size aligned with PAGE_SIZE */ -extern struct per_cpu_region per_cpu_data[MAX_PCPU_NUM]; -/* - * get percpu data for pcpu_id. - */ -#define per_cpu(name, pcpu_id) \ - (per_cpu_data[(pcpu_id)].name) - -/* get percpu data for current pcpu */ -#define get_cpu_var(name) per_cpu(name, get_pcpu_id()) #endif diff --git a/hypervisor/include/common/per_cpu.h b/hypervisor/include/common/per_cpu.h new file mode 100644 index 000000000..ee4a1e0ea --- /dev/null +++ b/hypervisor/include/common/per_cpu.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2018-2022 Intel Corporation. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PER_CPU_H +#define PER_CPU_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +struct per_cpu_region { + /* + * X86 arch percpu struct member vmxon_region + * need to be page size aligned, so we keep it + * on top to same memory, + */ + struct per_cpu_arch arch; + uint64_t irq_count[NR_IRQS]; + uint64_t softirq_pending; + uint64_t spurious; + struct acrn_vcpu *ever_run_vcpu; +#ifdef STACK_PROTECTOR + struct stack_canary stk_canary; +#endif + struct per_cpu_timers cpu_timers; + /*TODO: we only need sched_ctl as configured, + not neccessarily to have them all */ + struct sched_control sched_ctl; + struct sched_noop_control sched_noop_ctl; + struct sched_iorr_control sched_iorr_ctl; + struct sched_bvt_control sched_bvt_ctl; + struct sched_prio_control sched_prio_ctl; + struct thread_object idle; + uint64_t pcpu_flag; + uint32_t softirq_servicing; + uint32_t mode_to_kick_pcpu; + uint32_t mode_to_idle; + struct smp_call_info_data smp_call_info; + struct list_head softirq_dev_entry_list; + enum pcpu_boot_state boot_state; + uint8_t stack[CONFIG_STACK_SIZE] __aligned(16); + uint64_t shutdown_vm_bitmap; + /* + * We maintain a per-pCPU array of vCPUs. vCPUs of a VM won't + * share same pCPU. So the maximum possible # of vCPUs that can + * run on a pCPU is CONFIG_MAX_VM_NUM. + * vcpu_array address must be aligned to 64-bit for atomic access + * to avoid contention between offline_vcpu and posted interrupt handler + */ + struct acrn_vcpu *vcpu_array[CONFIG_MAX_VM_NUM] __aligned(8); +#ifdef HV_DEBUG + struct shared_buf *sbuf[ACRN_SBUF_PER_PCPU_ID_MAX]; + char logbuf[LOG_MESSAGE_MAX_SIZE]; + uint32_t npk_log_ref; +#endif + +} __aligned(PAGE_SIZE); /* per_cpu_region size aligned with PAGE_SIZE */ + +extern struct per_cpu_region per_cpu_data[MAX_PCPU_NUM]; +/* + * get percpu data for pcpu_id. + */ +#define per_cpu(member_path, pcpu_id) \ + (per_cpu_data[(pcpu_id)].member_path) + +/* get percpu data for current pcpu */ +#define get_cpu_var(member_path) per_cpu(member_path, get_pcpu_id()) + +#endif /* PER_CPU_H */