HV:Added support to configure PMI and VM switch info

This patch provides interface to configure(setup before actual
collection) PMI and VM switch tracing information.

profiling_config_pmi:
    Receives required information for configuring PMI from guest,
    populates the information info per_cpu region and SMP calls profiling_initilaize_pmi

profiling_initialize_pmi:
    Configure the PMU's for sep/socwatch profiling.
    Initial write of PMU registers.
    Walk through the entries and write the value of the register accordingly.

profiling_config_vmsw:
    Receives required information for configuring
    VMswitch from guest, Configure for VM-switch data on all cpus

profiling_initialize_vmsw: initializes VMSwitch	tracing

Tracked-On: projectacrn#1409
Acked-by: Eddie Dong <eddie.dong@intel.com>
Signed-off-by: Manisha <manisha.chinthapally@intel.com>
This commit is contained in:
Chinthapally, Manisha 2018-10-22 15:22:06 -07:00 committed by wenlingz
parent df549096f2
commit 898b9c8d4a
2 changed files with 250 additions and 13 deletions

View File

@ -8,6 +8,7 @@
#include <hypervisor.h>
#define ACRN_DBG_PROFILING 5U
#define ACRN_ERR_PROFILING 3U
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
@ -20,15 +21,57 @@ static uint32_t profiling_pmi_irq = IRQ_INVALID;
static void profiling_initialize_vmsw(void)
{
/* to be implemented */
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
__func__, get_cpu_id());
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
}
/*
* Configure the PMU's for sep/socwatch profiling.
* Initial write of PMU registers.
* Walk through the entries and write the value of the register accordingly.
* Note: current_group is always set to 0, only 1 group is supported.
*/
static void profiling_initialize_pmi(void)
{
/* to be implemented */
uint32_t i, group_id;
struct profiling_msr_op *msrop = NULL;
struct sep_state *ss = &get_cpu_var(profiling_info.sep_state);
dev_dbg(ACRN_DBG_PROFILING, "%s: entering cpu%d",
__func__, get_cpu_id());
if (ss == NULL) {
dev_dbg(ACRN_ERR_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
return;
}
group_id = ss->current_pmi_group_id = 0U;
for (i = 0U; i < MAX_MSR_LIST_NUM; i++) {
msrop = &(ss->pmi_initial_msr_list[group_id][i]);
if (msrop != NULL) {
if (msrop->msr_id == (uint32_t)-1) {
break;
}
if (msrop->msr_id == MSR_IA32_DEBUGCTL) {
ss->guest_debugctl_value = msrop->value;
}
if (msrop->msr_op_type == (uint8_t)MSR_OP_WRITE) {
msr_write(msrop->msr_id, msrop->value);
dev_dbg(ACRN_DBG_PROFILING,
"%s: MSRWRITE cpu%d, msr_id=0x%x, msr_val=0x%llx",
__func__, get_cpu_id(), msrop->msr_id, msrop->value);
}
}
}
ss->pmu_state = PMU_SETUP;
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting cpu%d",
__func__, get_cpu_id());
}
/*
@ -225,23 +268,142 @@ int32_t profiling_set_control(__unused struct vm *vm, __unused uint64_t addr)
/*
* Configure PMI on all cpus
*/
int32_t profiling_configure_pmi(__unused struct vm *vm, __unused uint64_t addr)
int32_t profiling_configure_pmi(struct vm *vm, uint64_t addr)
{
/* to be implemented
* call to smp_call_function profiling_ipi_handler
*/
uint16_t i;
struct profiling_pmi_config pmi_config;
(void)memset((void *)&pmi_config, 0U, sizeof(pmi_config));
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
if (copy_from_gpa(vm, &pmi_config, addr, sizeof(pmi_config)) != 0) {
pr_err("%s: Unable to copy addr from vm\n", __func__);
return -EINVAL;
}
for (i = 0U; i < phys_cpu_num; i++) {
if (!((per_cpu(profiling_info.sep_state, i).pmu_state ==
PMU_INITIALIZED) ||
(per_cpu(profiling_info.sep_state, i).pmu_state ==
PMU_SETUP))) {
pr_err("%s: invalid pmu_state %u on cpu%d",
__func__, per_cpu(profiling_info.sep_state, i).pmu_state, i);
return -EINVAL;
}
}
if (pmi_config.num_groups == 0U ||
pmi_config.num_groups > MAX_GROUP_NUM) {
pr_err("%s: invalid num_groups %u",
__func__, pmi_config.num_groups);
return -EINVAL;
}
for (i = 0U; i < phys_cpu_num; i++) {
per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_CONFIG;
per_cpu(profiling_info.sep_state, i).num_pmi_groups
= pmi_config.num_groups;
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_initial_msr_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
(void *)pmi_config.initial_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_start_msr_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
(void *)pmi_config.start_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_stop_msr_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
(void *)pmi_config.stop_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_entry_msr_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
(void *)pmi_config.entry_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
(void)memcpy_s((void *)per_cpu(profiling_info.sep_state, i).pmi_exit_msr_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM,
(void *)pmi_config.exit_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM*MAX_GROUP_NUM);
}
smp_call_function(pcpu_active_bitmap, profiling_ipi_handler, NULL);
if (copy_to_gpa(vm, &pmi_config, addr, sizeof(pmi_config)) != 0) {
pr_err("%s: Unable to copy addr to vm\n", __func__);
return -EINVAL;
}
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
return 0;
}
/*
* Configure for VM-switch data on all cpus
*/
int32_t profiling_configure_vmsw(__unused struct vm *vm, __unused uint64_t addr)
int32_t profiling_configure_vmsw(struct vm *vm, uint64_t addr)
{
/* to be implemented
* call to smp_call_function profiling_ipi_handler
*/
return 0;
uint16_t i;
int32_t ret = 0;
struct profiling_vmsw_config vmsw_config;
(void)memset((void *)&vmsw_config, 0U, sizeof(vmsw_config));
dev_dbg(ACRN_DBG_PROFILING, "%s: entering", __func__);
if (copy_from_gpa(vm, &vmsw_config, addr, sizeof(vmsw_config)) != 0) {
pr_err("%s: Unable to copy addr from vm\n", __func__);
return -EINVAL;
}
switch (vmsw_config.collector_id) {
case COLLECT_PROFILE_DATA:
for (i = 0U; i < phys_cpu_num; i++) {
per_cpu(profiling_info.ipi_cmd, i) = IPI_VMSW_CONFIG;
(void)memcpy_s(
(void *)per_cpu(profiling_info.sep_state, i).vmsw_initial_msr_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM,
(void *)vmsw_config.initial_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM);
(void)memcpy_s(
(void *)per_cpu(profiling_info.sep_state, i).vmsw_entry_msr_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM,
(void *)vmsw_config.entry_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM);
(void)memcpy_s(
(void *)per_cpu(profiling_info.sep_state, i).vmsw_exit_msr_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM,
(void *)vmsw_config.exit_list,
sizeof(struct profiling_msr_op)*MAX_MSR_LIST_NUM);
}
smp_call_function(pcpu_active_bitmap, profiling_ipi_handler, NULL);
break;
case COLLECT_POWER_DATA:
break;
default:
pr_err("%s: unknown collector %d",
__func__, vmsw_config.collector_id);
ret = -EINVAL;
break;
}
if (copy_to_gpa(vm, &vmsw_config, addr, sizeof(vmsw_config)) != 0) {
pr_err("%s: Unable to copy addr to vm\n", __func__);
return -EINVAL;
}
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
return ret;
}
/*
@ -349,4 +511,4 @@ void profiling_setup(void)
dev_dbg(ACRN_DBG_PROFILING, "%s: exiting", __func__);
}
#endif
#endif

View File

@ -11,10 +11,19 @@
#define MAX_NR_VCPUS 8
#define MAX_NR_VMS 6
#define MAX_MSR_LIST_NUM 15U
#define MAX_GROUP_NUM 1U
#define COLLECT_PROFILE_DATA 0
#define COLLECT_POWER_DATA 1
enum MSR_CMD_TYPE {
MSR_OP_NONE = 0,
MSR_OP_READ,
MSR_OP_WRITE,
MSR_OP_READ_CLEAR
};
typedef enum IPI_COMMANDS {
IPI_MSR_OP = 0,
IPI_PMU_CONFIG,
@ -24,6 +33,14 @@ typedef enum IPI_COMMANDS {
IPI_UNKNOWN,
} ipi_commands;
typedef enum SEP_PMU_STATE {
PMU_INITIALIZED = 0,
PMU_SETUP,
PMU_RUNNING,
PMU_UNINITIALIZED,
PMU_UNKNOWN
} sep_pmu_state;
typedef enum PROFILING_SEP_FEATURE {
CORE_PMU_SAMPLING = 0,
CORE_PMU_COUNTING,
@ -75,12 +92,70 @@ struct profiling_vm_info_list {
struct profiling_vm_info vm_list[MAX_NR_VMS];
};
struct profiling_msr_op {
/* value to write or location to write into */
uint64_t value;
/* MSR address to read/write; last entry will have value of -1 */
uint32_t msr_id;
/* parameter; usage depends on operation */
uint16_t param;
uint8_t msr_op_type;
uint8_t reg_type;
};
struct profiling_pmi_config {
uint32_t num_groups;
uint32_t trigger_count;
struct profiling_msr_op initial_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op start_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op stop_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op entry_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op exit_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
};
struct profiling_vmsw_config {
int32_t collector_id;
struct profiling_msr_op initial_list[MAX_MSR_LIST_NUM];
struct profiling_msr_op entry_list[MAX_MSR_LIST_NUM];
struct profiling_msr_op exit_list[MAX_MSR_LIST_NUM];
};
struct sep_state {
sep_pmu_state pmu_state;
uint32_t current_pmi_group_id;
uint32_t num_pmi_groups;
struct profiling_msr_op
pmi_initial_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op
pmi_start_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op
pmi_stop_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op
pmi_entry_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op
pmi_exit_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
uint32_t current_vmsw_group_id;
uint32_t num_msw_groups;
struct profiling_msr_op
vmsw_initial_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op
vmsw_entry_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
struct profiling_msr_op
vmsw_exit_msr_list[MAX_GROUP_NUM][MAX_MSR_LIST_NUM];
uint64_t guest_debugctl_value;
} __aligned(8);
/*
* Wrapper containing SEP sampling/profiling related data structures
*/
struct profiling_info_wrapper {
struct sep_state sep_state;
ipi_commands ipi_cmd;
};
} __aligned(8);
int32_t profiling_get_version_info(struct vm *vm, uint64_t addr);
int32_t profiling_get_pcpu_id(struct vm *vm, uint64_t addr);