profiling: code cleanup

This is the profiling code cleanup to avoid type conversion
and align tab/space in data structure definition

Tracked-On: #2257
Signed-off-by: Min Lim <min.yeol.lim@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Min Lim 2019-01-08 10:41:54 -08:00 committed by wenlingz
parent e2971ebc98
commit f574d5926b
2 changed files with 79 additions and 78 deletions

View File

@ -13,15 +13,15 @@
#define MAJOR_VERSION 1 #define MAJOR_VERSION 1
#define MINOR_VERSION 0 #define MINOR_VERSION 0
#define LBR_NUM_REGISTERS 32U #define LBR_NUM_REGISTERS 32U
#define PERF_OVF_BIT_MASK 0xC0000070000000FULL #define PERF_OVF_BIT_MASK 0xC0000070000000FULL
#define LVT_PERFCTR_BIT_UNMASK 0xFFFEFFFFU #define LVT_PERFCTR_BIT_UNMASK 0xFFFEFFFFU
#define LVT_PERFCTR_BIT_MASK 0x10000U #define LVT_PERFCTR_BIT_MASK 0x10000U
#define VALID_DEBUGCTL_BIT_MASK 0x1801U #define VALID_DEBUGCTL_BIT_MASK 0x1801U
static uint64_t sep_collection_switch; static uint64_t sep_collection_switch;
static uint64_t socwatch_collection_switch; static uint64_t socwatch_collection_switch;
static bool in_pmu_profiling; static bool in_pmu_profiling;
static uint32_t profiling_pmi_irq = IRQ_INVALID; static uint32_t profiling_pmi_irq = IRQ_INVALID;
@ -616,7 +616,7 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data)
((uint64_t)get_cpu_var(profiling_info.vm_info).external_vector ((uint64_t)get_cpu_var(profiling_info.vm_info).external_vector
== VECTOR_PMI)) { == VECTOR_PMI)) {
psample->csample.os_id psample->csample.os_id
=(uint32_t) get_cpu_var(profiling_info.vm_info).guest_vm_id; = get_cpu_var(profiling_info.vm_info).guest_vm_id;
(void)memset(psample->csample.task, 0U, 16); (void)memset(psample->csample.task, 0U, 16);
psample->csample.cpu_id = get_cpu_id(); psample->csample.cpu_id = get_cpu_id();
psample->csample.process_id = 0U; psample->csample.process_id = 0U;
@ -631,7 +631,7 @@ static void profiling_pmi_handler(uint32_t irq, __unused void *data)
get_cpu_var(profiling_info.vm_info).external_vector = -1; get_cpu_var(profiling_info.vm_info).external_vector = -1;
/* Attribute PMI to hypervisor context */ /* Attribute PMI to hypervisor context */
} else { } else {
psample->csample.os_id = 0xFFFFFFFFU; psample->csample.os_id = 0xFFFFU;
(void)memcpy_s(psample->csample.task, 16, "VMM\0", 4); (void)memcpy_s(psample->csample.task, 16, "VMM\0", 4);
psample->csample.cpu_id = get_cpu_id(); psample->csample.cpu_id = get_cpu_id();
psample->csample.process_id = 0U; psample->csample.process_id = 0U;
@ -867,12 +867,12 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
vm_info_list.vm_list[vm_idx].vm_id_num = -1; vm_info_list.vm_list[vm_idx].vm_id_num = -1;
(void)memcpy_s((void *)vm_info_list.vm_list[vm_idx].vm_name, 4U, "VMM\0", 4U); (void)memcpy_s((void *)vm_info_list.vm_list[vm_idx].vm_name, 4U, "VMM\0", 4U);
for (i = 0U; i < pcpu_nums; i++) { for (i = 0U; i < pcpu_nums; i++) {
vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id = (int32_t)i; vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id = i;
vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id = (int32_t)i; vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id = i;
vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id
= (int32_t)per_cpu(lapic_id, i); = per_cpu(lapic_id, i);
} }
vm_info_list.vm_list[vm_idx].num_vcpus = (int32_t)i; vm_info_list.vm_list[vm_idx].num_vcpus = i;
vm_info_list.num_vms = 1; vm_info_list.num_vms = 1;
for (j = 0U; j < CONFIG_MAX_VM_NUM; j++) { for (j = 0U; j < CONFIG_MAX_VM_NUM; j++) {
@ -883,7 +883,7 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
vm_info_list.num_vms++; vm_info_list.num_vms++;
vm_idx++; vm_idx++;
vm_info_list.vm_list[vm_idx].vm_id_num = (int32_t)tmp_vm->vm_id; vm_info_list.vm_list[vm_idx].vm_id_num = tmp_vm->vm_id;
(void)memcpy_s((void *)vm_info_list.vm_list[vm_idx].guid, (void)memcpy_s((void *)vm_info_list.vm_list[vm_idx].guid,
16U, tmp_vm->GUID, 16U); 16U, tmp_vm->GUID, 16U);
snprintf(vm_info_list.vm_list[vm_idx].vm_name, 16U, "vm_%d", snprintf(vm_info_list.vm_list[vm_idx].vm_name, 16U, "vm_%d",
@ -892,9 +892,9 @@ int32_t profiling_vm_list_info(struct acrn_vm *vm, uint64_t addr)
i = 0U; i = 0U;
foreach_vcpu(i, tmp_vm, vcpu) { foreach_vcpu(i, tmp_vm, vcpu) {
vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id
= (int32_t)vcpu->vcpu_id; = vcpu->vcpu_id;
vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id
= (int32_t)vcpu->pcpu_id; = vcpu->pcpu_id;
vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id = 0; vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id = 0;
vm_info_list.vm_list[vm_idx].num_vcpus++; vm_info_list.vm_list[vm_idx].num_vcpus++;
} }
@ -1363,7 +1363,7 @@ void profiling_post_vmexit_handler(struct acrn_vcpu *vcpu)
((socwatch_collection_switch & ((socwatch_collection_switch &
(1UL << (uint64_t)SOCWATCH_VM_SWITCH_TRACING)) > 0UL)) { (1UL << (uint64_t)SOCWATCH_VM_SWITCH_TRACING)) > 0UL)) {
get_cpu_var(profiling_info.vm_switch_trace).os_id get_cpu_var(profiling_info.vm_switch_trace).os_id
= (int32_t)vcpu->vm->vm_id; = vcpu->vm->vm_id;
get_cpu_var(profiling_info.vm_switch_trace).vm_enter_tsc get_cpu_var(profiling_info.vm_switch_trace).vm_enter_tsc
= get_cpu_var(profiling_info.vm_info).vmenter_tsc; = get_cpu_var(profiling_info.vm_info).vmenter_tsc;
get_cpu_var(profiling_info.vm_switch_trace).vm_exit_tsc get_cpu_var(profiling_info.vm_switch_trace).vm_exit_tsc

View File

@ -9,19 +9,17 @@
#ifdef PROFILING_ON #ifdef PROFILING_ON
#define MAX_NR_VCPUS 8 #define MAX_MSR_LIST_NUM 15U
#define MAX_NR_VMS 6 #define MAX_GROUP_NUM 1U
#define MAX_MSR_LIST_NUM 15U
#define MAX_GROUP_NUM 1U
#define COLLECT_PROFILE_DATA 0 #define COLLECT_PROFILE_DATA 0
#define COLLECT_POWER_DATA 1 #define COLLECT_POWER_DATA 1
#define SEP_BUF_ENTRY_SIZE 32U #define SEP_BUF_ENTRY_SIZE 32U
#define SOCWATCH_MSR_OP 100U #define SOCWATCH_MSR_OP 100U
#define MAGIC_NUMBER 0x99999988U
#define MAGIC_NUMBER 0x99999988U
enum MSR_CMD_STATUS { enum MSR_CMD_STATUS {
MSR_OP_READY = 0, MSR_OP_READY = 0,
MSR_OP_REQUESTED, MSR_OP_REQUESTED,
@ -95,28 +93,28 @@ struct profiling_pcpuid {
}; };
struct profiling_control { struct profiling_control {
int32_t collector_id; int32_t collector_id;
int32_t reserved; int32_t reserved;
uint64_t switches; uint64_t switches;
}; };
struct profiling_vcpu_pcpu_map { struct profiling_vcpu_pcpu_map {
int32_t vcpu_id; int16_t vcpu_id;
int32_t pcpu_id; int16_t pcpu_id;
int32_t apic_id; uint32_t apic_id;
}; };
struct profiling_vm_info { struct profiling_vm_info {
int32_t vm_id_num; uint16_t vm_id_num;
uint8_t guid[16]; uint8_t guid[16];
char vm_name[16]; char vm_name[16];
int32_t num_vcpus; uint16_t num_vcpus;
struct profiling_vcpu_pcpu_map cpu_map[MAX_NR_VCPUS]; struct profiling_vcpu_pcpu_map cpu_map[CONFIG_MAX_VCPUS_PER_VM];
}; };
struct profiling_vm_info_list { struct profiling_vm_info_list {
int32_t num_vms; uint16_t num_vms;
struct profiling_vm_info vm_list[MAX_NR_VMS]; struct profiling_vm_info vm_list[CONFIG_MAX_VM_NUM];
}; };
struct sw_msr_op_info { struct sw_msr_op_info {
@ -128,19 +126,19 @@ struct sw_msr_op_info {
struct profiling_msr_op { struct profiling_msr_op {
/* value to write or location to write into */ /* value to write or location to write into */
uint64_t value; uint64_t value;
/* MSR address to read/write; last entry will have value of -1 */ /* MSR address to read/write; last entry will have value of -1 */
uint32_t msr_id; uint32_t msr_id;
/* parameter; usage depends on operation */ /* parameter; usage depends on operation */
uint16_t param; uint16_t param;
uint8_t msr_op_type; uint8_t msr_op_type;
uint8_t reg_type; uint8_t reg_type;
}; };
struct profiling_msr_ops_list { struct profiling_msr_ops_list {
int32_t collector_id; int32_t collector_id;
uint32_t num_entries; uint32_t num_entries;
int32_t msr_op_state; int32_t msr_op_state;
struct profiling_msr_op entries[MAX_MSR_LIST_NUM]; struct profiling_msr_op entries[MAX_MSR_LIST_NUM];
}; };
struct profiling_pmi_config { struct profiling_pmi_config {
@ -167,14 +165,14 @@ struct vmexit_msr {
}; };
struct guest_vm_info { struct guest_vm_info {
uint64_t vmenter_tsc; uint64_t vmenter_tsc;
uint64_t vmexit_tsc; uint64_t vmexit_tsc;
uint64_t vmexit_reason; uint64_t vmexit_reason;
uint64_t guest_rip; uint64_t guest_rip;
uint64_t guest_rflags; uint64_t guest_rflags;
uint64_t guest_cs; uint64_t guest_cs;
int32_t guest_vm_id; uint16_t guest_vm_id;
int32_t external_vector; int32_t external_vector;
}; };
struct sep_state { struct sep_state {
sep_pmu_state pmu_state; sep_pmu_state pmu_state;
@ -230,23 +228,25 @@ struct data_header {
#define DATA_HEADER_SIZE ((uint64_t)sizeof(struct data_header)) #define DATA_HEADER_SIZE ((uint64_t)sizeof(struct data_header))
struct core_pmu_sample { struct core_pmu_sample {
/* context where PMI is triggered */ /* context where PMI is triggered */
uint32_t os_id; uint16_t os_id;
/* reserved */
uint16_t reserved;
/* the task id */ /* the task id */
uint32_t task_id; uint32_t task_id;
/* instruction pointer */ /* instruction pointer */
uint64_t rip; uint64_t rip;
/* the task name */ /* the task name */
char task[16]; char task[16];
/* physical cpu ID */ /* physical cpu ID */
uint32_t cpu_id; uint32_t cpu_id;
/* the process id */ /* the process id */
uint32_t process_id; uint32_t process_id;
/* perf global status msr value (for overflow status) */ /* perf global status msr value (for overflow status) */
uint64_t overflow_status; uint64_t overflow_status;
/* rflags */ /* rflags */
uint32_t rflags; uint32_t rflags;
/* code segment */ /* code segment */
uint32_t cs; uint32_t cs;
} __aligned(SEP_BUF_ENTRY_SIZE); } __aligned(SEP_BUF_ENTRY_SIZE);
#define CORE_PMU_SAMPLE_SIZE ((uint64_t)sizeof(struct core_pmu_sample)) #define CORE_PMU_SAMPLE_SIZE ((uint64_t)sizeof(struct core_pmu_sample))
@ -254,28 +254,29 @@ struct core_pmu_sample {
struct lbr_pmu_sample { struct lbr_pmu_sample {
/* LBR TOS */ /* LBR TOS */
uint64_t lbr_tos; uint64_t lbr_tos;
/* LBR FROM IP */ /* LBR FROM IP */
uint64_t lbr_from_ip[NUM_LBR_ENTRY]; uint64_t lbr_from_ip[NUM_LBR_ENTRY];
/* LBR TO IP */ /* LBR TO IP */
uint64_t lbr_to_ip[NUM_LBR_ENTRY]; uint64_t lbr_to_ip[NUM_LBR_ENTRY];
/* LBR info */ /* LBR info */
uint64_t lbr_info[NUM_LBR_ENTRY]; uint64_t lbr_info[NUM_LBR_ENTRY];
} __aligned(SEP_BUF_ENTRY_SIZE); } __aligned(SEP_BUF_ENTRY_SIZE);
#define LBR_PMU_SAMPLE_SIZE ((uint64_t)sizeof(struct lbr_pmu_sample)) #define LBR_PMU_SAMPLE_SIZE ((uint64_t)sizeof(struct lbr_pmu_sample))
struct pmu_sample { struct pmu_sample {
/* core pmu sample */ /* core pmu sample */
struct core_pmu_sample csample; struct core_pmu_sample csample;
/* lbr pmu sample */ /* lbr pmu sample */
struct lbr_pmu_sample lsample; struct lbr_pmu_sample lsample;
} __aligned(SEP_BUF_ENTRY_SIZE); } __aligned(SEP_BUF_ENTRY_SIZE);
struct vm_switch_trace { struct vm_switch_trace {
uint64_t vm_enter_tsc; uint64_t vm_enter_tsc;
uint64_t vm_exit_tsc; uint64_t vm_exit_tsc;
uint64_t vm_exit_reason; uint64_t vm_exit_reason;
int32_t os_id; uint16_t os_id;
uint16_t reserved;
}__aligned(SEP_BUF_ENTRY_SIZE); }__aligned(SEP_BUF_ENTRY_SIZE);
#define VM_SWITCH_TRACE_SIZE ((uint64_t)sizeof(struct vm_switch_trace)) #define VM_SWITCH_TRACE_SIZE ((uint64_t)sizeof(struct vm_switch_trace))
@ -283,15 +284,15 @@ struct vm_switch_trace {
* Wrapper containing SEP sampling/profiling related data structures * Wrapper containing SEP sampling/profiling related data structures
*/ */
struct profiling_info_wrapper { struct profiling_info_wrapper {
struct profiling_msr_ops_list *msr_node; struct profiling_msr_ops_list *msr_node;
struct sep_state sep_state; struct sep_state sep_state;
struct guest_vm_info vm_info; struct guest_vm_info vm_info;
ipi_commands ipi_cmd; ipi_commands ipi_cmd;
struct pmu_sample pmu_sample; struct pmu_sample pmu_sample;
struct vm_switch_trace vm_switch_trace; struct vm_switch_trace vm_switch_trace;
socwatch_state soc_state; socwatch_state soc_state;
struct sw_msr_op_info sw_msr_op_info; struct sw_msr_op_info sw_msr_op_info;
spinlock_t sw_lock; spinlock_t sw_lock;
} __aligned(8); } __aligned(8);
int32_t profiling_get_version_info(struct acrn_vm *vm, uint64_t addr); int32_t profiling_get_version_info(struct acrn_vm *vm, uint64_t addr);