From f574d5926b7ff37d39a4ee7a8d4c65292b19bf5f Mon Sep 17 00:00:00 2001 From: Min Lim Date: Tue, 8 Jan 2019 10:41:54 -0800 Subject: [PATCH] profiling: code cleanup This is the profiling code cleanup to avoid type conversion and align tab/space in data structure definition Tracked-On: #2257 Signed-off-by: Min Lim Acked-by: Eddie Dong --- hypervisor/debug/profiling.c | 30 ++--- hypervisor/include/debug/profiling_internal.h | 127 +++++++++--------- 2 files changed, 79 insertions(+), 78 deletions(-) diff --git a/hypervisor/debug/profiling.c b/hypervisor/debug/profiling.c index b9fe9d0a6..6c9405d8c 100644 --- a/hypervisor/debug/profiling.c +++ b/hypervisor/debug/profiling.c @@ -13,15 +13,15 @@ #define MAJOR_VERSION 1 #define MINOR_VERSION 0 -#define LBR_NUM_REGISTERS 32U -#define PERF_OVF_BIT_MASK 0xC0000070000000FULL +#define LBR_NUM_REGISTERS 32U +#define PERF_OVF_BIT_MASK 0xC0000070000000FULL #define LVT_PERFCTR_BIT_UNMASK 0xFFFEFFFFU #define LVT_PERFCTR_BIT_MASK 0x10000U #define VALID_DEBUGCTL_BIT_MASK 0x1801U -static uint64_t sep_collection_switch; -static uint64_t socwatch_collection_switch; -static bool in_pmu_profiling; +static uint64_t sep_collection_switch; +static uint64_t socwatch_collection_switch; +static bool in_pmu_profiling; static uint32_t profiling_pmi_irq = IRQ_INVALID; @@ -616,7 +616,7 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data) ((uint64_t)get_cpu_var(profiling_info.vm_info).external_vector == VECTOR_PMI)) { psample->csample.os_id - =(uint32_t) get_cpu_var(profiling_info.vm_info).guest_vm_id; + = get_cpu_var(profiling_info.vm_info).guest_vm_id; (void)memset(psample->csample.task, 0U, 16); psample->csample.cpu_id = get_cpu_id(); psample->csample.process_id = 0U; @@ -631,7 +631,7 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data) get_cpu_var(profiling_info.vm_info).external_vector = -1; /* Attribute PMI to hypervisor context */ } else { - psample->csample.os_id = 0xFFFFFFFFU; + psample->csample.os_id = 0xFFFFU; (void)memcpy_s(psample->csample.task, 16, "VMM\0", 4); psample->csample.cpu_id = get_cpu_id(); psample->csample.process_id = 0U; @@ -867,12 +867,12 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr) vm_info_list.vm_list[vm_idx].vm_id_num = -1; (void)memcpy_s((void *)vm_info_list.vm_list[vm_idx].vm_name, 4U, "VMM\0", 4U); for (i = 0U; i < pcpu_nums; i++) { - vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id = (int32_t)i; - vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id = (int32_t)i; + vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id = i; + vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id = i; vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id - = (int32_t)per_cpu(lapic_id, i); + = per_cpu(lapic_id, i); } - vm_info_list.vm_list[vm_idx].num_vcpus = (int32_t)i; + vm_info_list.vm_list[vm_idx].num_vcpus = i; vm_info_list.num_vms = 1; for (j = 0U; j < CONFIG_MAX_VM_NUM; j++) { @@ -883,7 +883,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr) vm_info_list.num_vms++; vm_idx++; - vm_info_list.vm_list[vm_idx].vm_id_num = (int32_t)tmp_vm->vm_id; + vm_info_list.vm_list[vm_idx].vm_id_num = tmp_vm->vm_id; (void)memcpy_s((void *)vm_info_list.vm_list[vm_idx].guid, 16U, tmp_vm->GUID, 16U); snprintf(vm_info_list.vm_list[vm_idx].vm_name, 16U, "vm_%d", @@ -892,9 +892,9 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr) i = 0U; foreach_vcpu(i, tmp_vm, vcpu) { vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id - = (int32_t)vcpu->vcpu_id; + = vcpu->vcpu_id; vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id - = (int32_t)vcpu->pcpu_id; + = vcpu->pcpu_id; vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id = 0; vm_info_list.vm_list[vm_idx].num_vcpus++; } @@ -1363,7 +1363,7 @@ void profiling_post_vmexit_handler(struct acrn_vcpu *vcpu) ((socwatch_collection_switch & (1UL << (uint64_t)SOCWATCH_VM_SWITCH_TRACING)) > 0UL)) { get_cpu_var(profiling_info.vm_switch_trace).os_id - = (int32_t)vcpu->vm->vm_id; + = vcpu->vm->vm_id; get_cpu_var(profiling_info.vm_switch_trace).vm_enter_tsc = get_cpu_var(profiling_info.vm_info).vmenter_tsc; get_cpu_var(profiling_info.vm_switch_trace).vm_exit_tsc diff --git a/hypervisor/include/debug/profiling_internal.h b/hypervisor/include/debug/profiling_internal.h index c14f79f6e..8857031e3 100644 --- a/hypervisor/include/debug/profiling_internal.h +++ b/hypervisor/include/debug/profiling_internal.h @@ -9,19 +9,17 @@ #ifdef PROFILING_ON -#define MAX_NR_VCPUS 8 -#define MAX_NR_VMS 6 - -#define MAX_MSR_LIST_NUM 15U -#define MAX_GROUP_NUM 1U +#define MAX_MSR_LIST_NUM 15U +#define MAX_GROUP_NUM 1U #define COLLECT_PROFILE_DATA 0 -#define COLLECT_POWER_DATA 1 +#define COLLECT_POWER_DATA 1 -#define SEP_BUF_ENTRY_SIZE 32U -#define SOCWATCH_MSR_OP 100U +#define SEP_BUF_ENTRY_SIZE 32U +#define SOCWATCH_MSR_OP 100U + +#define MAGIC_NUMBER 0x99999988U -#define MAGIC_NUMBER 0x99999988U enum MSR_CMD_STATUS { MSR_OP_READY = 0, MSR_OP_REQUESTED, @@ -95,28 +93,28 @@ struct profiling_pcpuid { }; struct profiling_control { - int32_t collector_id; - int32_t reserved; - uint64_t switches; + int32_t collector_id; + int32_t reserved; + uint64_t switches; }; struct profiling_vcpu_pcpu_map { - int32_t vcpu_id; - int32_t pcpu_id; - int32_t apic_id; + int16_t vcpu_id; + int16_t pcpu_id; + uint32_t apic_id; }; struct profiling_vm_info { - int32_t vm_id_num; - uint8_t guid[16]; - char vm_name[16]; - int32_t num_vcpus; - struct profiling_vcpu_pcpu_map cpu_map[MAX_NR_VCPUS]; + uint16_t vm_id_num; + uint8_t guid[16]; + char vm_name[16]; + uint16_t num_vcpus; + struct profiling_vcpu_pcpu_map cpu_map[CONFIG_MAX_VCPUS_PER_VM]; }; struct profiling_vm_info_list { - int32_t num_vms; - struct profiling_vm_info vm_list[MAX_NR_VMS]; + uint16_t num_vms; + struct profiling_vm_info vm_list[CONFIG_MAX_VM_NUM]; }; struct sw_msr_op_info { @@ -128,19 +126,19 @@ struct sw_msr_op_info { struct profiling_msr_op { /* value to write or location to write into */ - uint64_t value; + uint64_t value; /* MSR address to read/write; last entry will have value of -1 */ - uint32_t msr_id; + uint32_t msr_id; /* parameter; usage depends on operation */ - uint16_t param; - uint8_t msr_op_type; - uint8_t reg_type; + uint16_t param; + uint8_t msr_op_type; + uint8_t reg_type; }; struct profiling_msr_ops_list { - int32_t collector_id; - uint32_t num_entries; - int32_t msr_op_state; + int32_t collector_id; + uint32_t num_entries; + int32_t msr_op_state; struct profiling_msr_op entries[MAX_MSR_LIST_NUM]; }; struct profiling_pmi_config { @@ -167,14 +165,14 @@ struct vmexit_msr { }; struct guest_vm_info { - uint64_t vmenter_tsc; - uint64_t vmexit_tsc; - uint64_t vmexit_reason; - uint64_t guest_rip; - uint64_t guest_rflags; - uint64_t guest_cs; - int32_t guest_vm_id; - int32_t external_vector; + uint64_t vmenter_tsc; + uint64_t vmexit_tsc; + uint64_t vmexit_reason; + uint64_t guest_rip; + uint64_t guest_rflags; + uint64_t guest_cs; + uint16_t guest_vm_id; + int32_t external_vector; }; struct sep_state { sep_pmu_state pmu_state; @@ -230,23 +228,25 @@ struct data_header { #define DATA_HEADER_SIZE ((uint64_t)sizeof(struct data_header)) struct core_pmu_sample { /* context where PMI is triggered */ - uint32_t os_id; + uint16_t os_id; + /* reserved */ + uint16_t reserved; /* the task id */ - uint32_t task_id; + uint32_t task_id; /* instruction pointer */ - uint64_t rip; + uint64_t rip; /* the task name */ - char task[16]; + char task[16]; /* physical cpu ID */ - uint32_t cpu_id; + uint32_t cpu_id; /* the process id */ - uint32_t process_id; + uint32_t process_id; /* perf global status msr value (for overflow status) */ - uint64_t overflow_status; + uint64_t overflow_status; /* rflags */ - uint32_t rflags; + uint32_t rflags; /* code segment */ - uint32_t cs; + uint32_t cs; } __aligned(SEP_BUF_ENTRY_SIZE); #define CORE_PMU_SAMPLE_SIZE ((uint64_t)sizeof(struct core_pmu_sample)) @@ -254,28 +254,29 @@ struct core_pmu_sample { struct lbr_pmu_sample { /* LBR TOS */ - uint64_t lbr_tos; + uint64_t lbr_tos; /* LBR FROM IP */ - uint64_t lbr_from_ip[NUM_LBR_ENTRY]; + uint64_t lbr_from_ip[NUM_LBR_ENTRY]; /* LBR TO IP */ - uint64_t lbr_to_ip[NUM_LBR_ENTRY]; + uint64_t lbr_to_ip[NUM_LBR_ENTRY]; /* LBR info */ - uint64_t lbr_info[NUM_LBR_ENTRY]; + uint64_t lbr_info[NUM_LBR_ENTRY]; } __aligned(SEP_BUF_ENTRY_SIZE); #define LBR_PMU_SAMPLE_SIZE ((uint64_t)sizeof(struct lbr_pmu_sample)) struct pmu_sample { /* core pmu sample */ - struct core_pmu_sample csample; + struct core_pmu_sample csample; /* lbr pmu sample */ - struct lbr_pmu_sample lsample; + struct lbr_pmu_sample lsample; } __aligned(SEP_BUF_ENTRY_SIZE); struct vm_switch_trace { uint64_t vm_enter_tsc; uint64_t vm_exit_tsc; uint64_t vm_exit_reason; - int32_t os_id; + uint16_t os_id; + uint16_t reserved; }__aligned(SEP_BUF_ENTRY_SIZE); #define VM_SWITCH_TRACE_SIZE ((uint64_t)sizeof(struct vm_switch_trace)) @@ -283,15 +284,15 @@ struct vm_switch_trace { * Wrapper containing SEP sampling/profiling related data structures */ struct profiling_info_wrapper { - struct profiling_msr_ops_list *msr_node; - struct sep_state sep_state; - struct guest_vm_info vm_info; - ipi_commands ipi_cmd; - struct pmu_sample pmu_sample; - struct vm_switch_trace vm_switch_trace; - socwatch_state soc_state; - struct sw_msr_op_info sw_msr_op_info; - spinlock_t sw_lock; + struct profiling_msr_ops_list *msr_node; + struct sep_state sep_state; + struct guest_vm_info vm_info; + ipi_commands ipi_cmd; + struct pmu_sample pmu_sample; + struct vm_switch_trace vm_switch_trace; + socwatch_state soc_state; + struct sw_msr_op_info sw_msr_op_info; + spinlock_t sw_lock; } __aligned(8); int32_t profiling_get_version_info(struct acrn_vm *vm, uint64_t addr);