mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-15 14:14:39 +00:00
Fix golint failures of framework/metrics_util.go
This fixes golint failures of framework/metrics_util.go. Cleanup: - SaturationTime was only used in test/e2e/scalability/density.go. So this moves it into the e2e test. - interestingClusterAutoscalerMetrics was not used in filterMetrics() so this removes the related code.
This commit is contained in:
@@ -65,31 +65,28 @@ const (
|
|||||||
caFunctionMetricLabel = "function"
|
caFunctionMetricLabel = "function"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// MetricsForE2E is metrics collection of components.
|
||||||
type MetricsForE2E metrics.Collection
|
type MetricsForE2E metrics.Collection
|
||||||
|
|
||||||
func (m *MetricsForE2E) filterMetrics() {
|
func (m *MetricsForE2E) filterMetrics() {
|
||||||
interestingAPIServerMetrics := make(metrics.APIServerMetrics)
|
apiServerMetrics := make(metrics.APIServerMetrics)
|
||||||
for _, metric := range InterestingAPIServerMetrics {
|
for _, metric := range interestingAPIServerMetrics {
|
||||||
interestingAPIServerMetrics[metric] = (*m).APIServerMetrics[metric]
|
apiServerMetrics[metric] = (*m).APIServerMetrics[metric]
|
||||||
}
|
}
|
||||||
interestingControllerManagerMetrics := make(metrics.ControllerManagerMetrics)
|
controllerManagerMetrics := make(metrics.ControllerManagerMetrics)
|
||||||
for _, metric := range InterestingControllerManagerMetrics {
|
for _, metric := range interestingControllerManagerMetrics {
|
||||||
interestingControllerManagerMetrics[metric] = (*m).ControllerManagerMetrics[metric]
|
controllerManagerMetrics[metric] = (*m).ControllerManagerMetrics[metric]
|
||||||
}
|
}
|
||||||
interestingClusterAutoscalerMetrics := make(metrics.ClusterAutoscalerMetrics)
|
kubeletMetrics := make(map[string]metrics.KubeletMetrics)
|
||||||
for _, metric := range InterestingClusterAutoscalerMetrics {
|
|
||||||
interestingClusterAutoscalerMetrics[metric] = (*m).ClusterAutoscalerMetrics[metric]
|
|
||||||
}
|
|
||||||
interestingKubeletMetrics := make(map[string]metrics.KubeletMetrics)
|
|
||||||
for kubelet, grabbed := range (*m).KubeletMetrics {
|
for kubelet, grabbed := range (*m).KubeletMetrics {
|
||||||
interestingKubeletMetrics[kubelet] = make(metrics.KubeletMetrics)
|
kubeletMetrics[kubelet] = make(metrics.KubeletMetrics)
|
||||||
for _, metric := range InterestingKubeletMetrics {
|
for _, metric := range interestingKubeletMetrics {
|
||||||
interestingKubeletMetrics[kubelet][metric] = grabbed[metric]
|
kubeletMetrics[kubelet][metric] = grabbed[metric]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(*m).APIServerMetrics = interestingAPIServerMetrics
|
(*m).APIServerMetrics = apiServerMetrics
|
||||||
(*m).ControllerManagerMetrics = interestingControllerManagerMetrics
|
(*m).ControllerManagerMetrics = controllerManagerMetrics
|
||||||
(*m).KubeletMetrics = interestingKubeletMetrics
|
(*m).KubeletMetrics = kubeletMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
func printSample(sample *model.Sample) string {
|
func printSample(sample *model.Sample) string {
|
||||||
@@ -112,21 +109,22 @@ func printSample(sample *model.Sample) string {
|
|||||||
return fmt.Sprintf("[%v] = %v", strings.Join(buf, ","), sample.Value)
|
return fmt.Sprintf("[%v] = %v", strings.Join(buf, ","), sample.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintHumanReadable returns e2e metrics with JSON format.
|
||||||
func (m *MetricsForE2E) PrintHumanReadable() string {
|
func (m *MetricsForE2E) PrintHumanReadable() string {
|
||||||
buf := bytes.Buffer{}
|
buf := bytes.Buffer{}
|
||||||
for _, interestingMetric := range InterestingAPIServerMetrics {
|
for _, interestingMetric := range interestingAPIServerMetrics {
|
||||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||||
for _, sample := range (*m).APIServerMetrics[interestingMetric] {
|
for _, sample := range (*m).APIServerMetrics[interestingMetric] {
|
||||||
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
|
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, interestingMetric := range InterestingControllerManagerMetrics {
|
for _, interestingMetric := range interestingControllerManagerMetrics {
|
||||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||||
for _, sample := range (*m).ControllerManagerMetrics[interestingMetric] {
|
for _, sample := range (*m).ControllerManagerMetrics[interestingMetric] {
|
||||||
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
|
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, interestingMetric := range InterestingClusterAutoscalerMetrics {
|
for _, interestingMetric := range interestingClusterAutoscalerMetrics {
|
||||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||||
for _, sample := range (*m).ClusterAutoscalerMetrics[interestingMetric] {
|
for _, sample := range (*m).ClusterAutoscalerMetrics[interestingMetric] {
|
||||||
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
|
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
|
||||||
@@ -134,7 +132,7 @@ func (m *MetricsForE2E) PrintHumanReadable() string {
|
|||||||
}
|
}
|
||||||
for kubelet, grabbed := range (*m).KubeletMetrics {
|
for kubelet, grabbed := range (*m).KubeletMetrics {
|
||||||
buf.WriteString(fmt.Sprintf("For %v:\n", kubelet))
|
buf.WriteString(fmt.Sprintf("For %v:\n", kubelet))
|
||||||
for _, interestingMetric := range InterestingKubeletMetrics {
|
for _, interestingMetric := range interestingKubeletMetrics {
|
||||||
buf.WriteString(fmt.Sprintf("\tFor %v:\n", interestingMetric))
|
buf.WriteString(fmt.Sprintf("\tFor %v:\n", interestingMetric))
|
||||||
for _, sample := range grabbed[interestingMetric] {
|
for _, sample := range grabbed[interestingMetric] {
|
||||||
buf.WriteString(fmt.Sprintf("\t\t%v\n", printSample(sample)))
|
buf.WriteString(fmt.Sprintf("\t\t%v\n", printSample(sample)))
|
||||||
@@ -144,18 +142,20 @@ func (m *MetricsForE2E) PrintHumanReadable() string {
|
|||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintJSON returns e2e metrics with JSON format.
|
||||||
func (m *MetricsForE2E) PrintJSON() string {
|
func (m *MetricsForE2E) PrintJSON() string {
|
||||||
m.filterMetrics()
|
m.filterMetrics()
|
||||||
return PrettyPrintJSON(m)
|
return PrettyPrintJSON(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SummaryKind returns the summary of e2e metrics.
|
||||||
func (m *MetricsForE2E) SummaryKind() string {
|
func (m *MetricsForE2E) SummaryKind() string {
|
||||||
return "MetricsForE2E"
|
return "MetricsForE2E"
|
||||||
}
|
}
|
||||||
|
|
||||||
var SchedulingLatencyMetricName = model.LabelValue(schedulermetric.SchedulerSubsystem + "_" + schedulermetric.SchedulingLatencyName)
|
var schedulingLatencyMetricName = model.LabelValue(schedulermetric.SchedulerSubsystem + "_" + schedulermetric.SchedulingLatencyName)
|
||||||
|
|
||||||
var InterestingAPIServerMetrics = []string{
|
var interestingAPIServerMetrics = []string{
|
||||||
"apiserver_request_total",
|
"apiserver_request_total",
|
||||||
// TODO(krzysied): apiserver_request_latencies_summary is a deprecated metric.
|
// TODO(krzysied): apiserver_request_latencies_summary is a deprecated metric.
|
||||||
// It should be replaced with new metric.
|
// It should be replaced with new metric.
|
||||||
@@ -163,7 +163,7 @@ var InterestingAPIServerMetrics = []string{
|
|||||||
"apiserver_init_events_total",
|
"apiserver_init_events_total",
|
||||||
}
|
}
|
||||||
|
|
||||||
var InterestingControllerManagerMetrics = []string{
|
var interestingControllerManagerMetrics = []string{
|
||||||
"garbage_collector_attempt_to_delete_queue_latency",
|
"garbage_collector_attempt_to_delete_queue_latency",
|
||||||
"garbage_collector_attempt_to_delete_work_duration",
|
"garbage_collector_attempt_to_delete_work_duration",
|
||||||
"garbage_collector_attempt_to_orphan_queue_latency",
|
"garbage_collector_attempt_to_orphan_queue_latency",
|
||||||
@@ -183,7 +183,7 @@ var InterestingControllerManagerMetrics = []string{
|
|||||||
"namespace_work_duration_count",
|
"namespace_work_duration_count",
|
||||||
}
|
}
|
||||||
|
|
||||||
var InterestingKubeletMetrics = []string{
|
var interestingKubeletMetrics = []string{
|
||||||
"kubelet_docker_operations_errors_total",
|
"kubelet_docker_operations_errors_total",
|
||||||
"kubelet_docker_operations_duration_seconds",
|
"kubelet_docker_operations_duration_seconds",
|
||||||
"kubelet_pod_start_duration_seconds",
|
"kubelet_pod_start_duration_seconds",
|
||||||
@@ -191,13 +191,13 @@ var InterestingKubeletMetrics = []string{
|
|||||||
"kubelet_pod_worker_start_duration_seconds",
|
"kubelet_pod_worker_start_duration_seconds",
|
||||||
}
|
}
|
||||||
|
|
||||||
var InterestingClusterAutoscalerMetrics = []string{
|
var interestingClusterAutoscalerMetrics = []string{
|
||||||
"function_duration_seconds",
|
"function_duration_seconds",
|
||||||
"errors_total",
|
"errors_total",
|
||||||
"evicted_pods_total",
|
"evicted_pods_total",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dashboard metrics
|
// LatencyMetric is a struct for dashboard metrics.
|
||||||
type LatencyMetric struct {
|
type LatencyMetric struct {
|
||||||
Perc50 time.Duration `json:"Perc50"`
|
Perc50 time.Duration `json:"Perc50"`
|
||||||
Perc90 time.Duration `json:"Perc90"`
|
Perc90 time.Duration `json:"Perc90"`
|
||||||
@@ -205,6 +205,7 @@ type LatencyMetric struct {
|
|||||||
Perc100 time.Duration `json:"Perc100"`
|
Perc100 time.Duration `json:"Perc100"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PodStartupLatency is a struct for managing latency of pod startup.
|
||||||
type PodStartupLatency struct {
|
type PodStartupLatency struct {
|
||||||
CreateToScheduleLatency LatencyMetric `json:"createToScheduleLatency"`
|
CreateToScheduleLatency LatencyMetric `json:"createToScheduleLatency"`
|
||||||
ScheduleToRunLatency LatencyMetric `json:"scheduleToRunLatency"`
|
ScheduleToRunLatency LatencyMetric `json:"scheduleToRunLatency"`
|
||||||
@@ -213,18 +214,22 @@ type PodStartupLatency struct {
|
|||||||
E2ELatency LatencyMetric `json:"e2eLatency"`
|
E2ELatency LatencyMetric `json:"e2eLatency"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SummaryKind returns the summary of pod startup latency.
|
||||||
func (l *PodStartupLatency) SummaryKind() string {
|
func (l *PodStartupLatency) SummaryKind() string {
|
||||||
return "PodStartupLatency"
|
return "PodStartupLatency"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintHumanReadable returns pod startup letency with JSON format.
|
||||||
func (l *PodStartupLatency) PrintHumanReadable() string {
|
func (l *PodStartupLatency) PrintHumanReadable() string {
|
||||||
return PrettyPrintJSON(l)
|
return PrettyPrintJSON(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintJSON returns pod startup letency with JSON format.
|
||||||
func (l *PodStartupLatency) PrintJSON() string {
|
func (l *PodStartupLatency) PrintJSON() string {
|
||||||
return PrettyPrintJSON(PodStartupLatencyToPerfData(l))
|
return PrettyPrintJSON(PodStartupLatencyToPerfData(l))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SchedulingMetrics is a struct for managing scheduling metrics.
|
||||||
type SchedulingMetrics struct {
|
type SchedulingMetrics struct {
|
||||||
PredicateEvaluationLatency LatencyMetric `json:"predicateEvaluationLatency"`
|
PredicateEvaluationLatency LatencyMetric `json:"predicateEvaluationLatency"`
|
||||||
PriorityEvaluationLatency LatencyMetric `json:"priorityEvaluationLatency"`
|
PriorityEvaluationLatency LatencyMetric `json:"priorityEvaluationLatency"`
|
||||||
@@ -236,23 +241,28 @@ type SchedulingMetrics struct {
|
|||||||
ThroughputPerc99 float64 `json:"throughputPerc99"`
|
ThroughputPerc99 float64 `json:"throughputPerc99"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SummaryKind returns the summary of scheduling metrics.
|
||||||
func (l *SchedulingMetrics) SummaryKind() string {
|
func (l *SchedulingMetrics) SummaryKind() string {
|
||||||
return "SchedulingMetrics"
|
return "SchedulingMetrics"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintHumanReadable returns scheduling metrics with JSON format.
|
||||||
func (l *SchedulingMetrics) PrintHumanReadable() string {
|
func (l *SchedulingMetrics) PrintHumanReadable() string {
|
||||||
return PrettyPrintJSON(l)
|
return PrettyPrintJSON(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintJSON returns scheduling metrics with JSON format.
|
||||||
func (l *SchedulingMetrics) PrintJSON() string {
|
func (l *SchedulingMetrics) PrintJSON() string {
|
||||||
return PrettyPrintJSON(l)
|
return PrettyPrintJSON(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Histogram is a struct for managing histogram.
|
||||||
type Histogram struct {
|
type Histogram struct {
|
||||||
Labels map[string]string `json:"labels"`
|
Labels map[string]string `json:"labels"`
|
||||||
Buckets map[string]int `json:"buckets"`
|
Buckets map[string]int `json:"buckets"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HistogramVec is an array of Histogram.
|
||||||
type HistogramVec []Histogram
|
type HistogramVec []Histogram
|
||||||
|
|
||||||
func newHistogram(labels map[string]string) *Histogram {
|
func newHistogram(labels map[string]string) *Histogram {
|
||||||
@@ -262,6 +272,7 @@ func newHistogram(labels map[string]string) *Histogram {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EtcdMetrics is a struct for managing etcd metrics.
|
||||||
type EtcdMetrics struct {
|
type EtcdMetrics struct {
|
||||||
BackendCommitDuration HistogramVec `json:"backendCommitDuration"`
|
BackendCommitDuration HistogramVec `json:"backendCommitDuration"`
|
||||||
SnapshotSaveTotalDuration HistogramVec `json:"snapshotSaveTotalDuration"`
|
SnapshotSaveTotalDuration HistogramVec `json:"snapshotSaveTotalDuration"`
|
||||||
@@ -279,24 +290,29 @@ func newEtcdMetrics() *EtcdMetrics {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SummaryKind returns the summary of etcd metrics.
|
||||||
func (l *EtcdMetrics) SummaryKind() string {
|
func (l *EtcdMetrics) SummaryKind() string {
|
||||||
return "EtcdMetrics"
|
return "EtcdMetrics"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintHumanReadable returns etcd metrics with JSON format.
|
||||||
func (l *EtcdMetrics) PrintHumanReadable() string {
|
func (l *EtcdMetrics) PrintHumanReadable() string {
|
||||||
return PrettyPrintJSON(l)
|
return PrettyPrintJSON(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintJSON returns etcd metrics with JSON format.
|
||||||
func (l *EtcdMetrics) PrintJSON() string {
|
func (l *EtcdMetrics) PrintJSON() string {
|
||||||
return PrettyPrintJSON(l)
|
return PrettyPrintJSON(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EtcdMetricsCollector is a struct for managing etcd metrics collector.
|
||||||
type EtcdMetricsCollector struct {
|
type EtcdMetricsCollector struct {
|
||||||
stopCh chan struct{}
|
stopCh chan struct{}
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
metrics *EtcdMetrics
|
metrics *EtcdMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewEtcdMetricsCollector creates a new etcd metrics collector.
|
||||||
func NewEtcdMetricsCollector() *EtcdMetricsCollector {
|
func NewEtcdMetricsCollector() *EtcdMetricsCollector {
|
||||||
return &EtcdMetricsCollector{
|
return &EtcdMetricsCollector{
|
||||||
stopCh: make(chan struct{}),
|
stopCh: make(chan struct{}),
|
||||||
@@ -357,6 +373,7 @@ func (mc *EtcdMetricsCollector) StartCollecting(interval time.Duration) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StopAndSummarize stops etcd metrics collector and summarizes the metrics.
|
||||||
func (mc *EtcdMetricsCollector) StopAndSummarize() error {
|
func (mc *EtcdMetricsCollector) StopAndSummarize() error {
|
||||||
close(mc.stopCh)
|
close(mc.stopCh)
|
||||||
mc.wg.Wait()
|
mc.wg.Wait()
|
||||||
@@ -381,17 +398,12 @@ func (mc *EtcdMetricsCollector) StopAndSummarize() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetMetrics returns metrics of etcd metrics collector.
|
||||||
func (mc *EtcdMetricsCollector) GetMetrics() *EtcdMetrics {
|
func (mc *EtcdMetricsCollector) GetMetrics() *EtcdMetrics {
|
||||||
return mc.metrics
|
return mc.metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
type SaturationTime struct {
|
// APICall is a struct for managing API call.
|
||||||
TimeToSaturate time.Duration `json:"timeToSaturate"`
|
|
||||||
NumberOfNodes int `json:"numberOfNodes"`
|
|
||||||
NumberOfPods int `json:"numberOfPods"`
|
|
||||||
Throughput float32 `json:"throughput"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type APICall struct {
|
type APICall struct {
|
||||||
Resource string `json:"resource"`
|
Resource string `json:"resource"`
|
||||||
Subresource string `json:"subresource"`
|
Subresource string `json:"subresource"`
|
||||||
@@ -401,18 +413,22 @@ type APICall struct {
|
|||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// APIResponsiveness is a struct for managing multiple API calls.
|
||||||
type APIResponsiveness struct {
|
type APIResponsiveness struct {
|
||||||
APICalls []APICall `json:"apicalls"`
|
APICalls []APICall `json:"apicalls"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SummaryKind returns the summary of API responsiveness.
|
||||||
func (a *APIResponsiveness) SummaryKind() string {
|
func (a *APIResponsiveness) SummaryKind() string {
|
||||||
return "APIResponsiveness"
|
return "APIResponsiveness"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintHumanReadable returns metrics with JSON format.
|
||||||
func (a *APIResponsiveness) PrintHumanReadable() string {
|
func (a *APIResponsiveness) PrintHumanReadable() string {
|
||||||
return PrettyPrintJSON(a)
|
return PrettyPrintJSON(a)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintJSON returns metrics of PerfData(50, 90 and 99th percentiles) with JSON format.
|
||||||
func (a *APIResponsiveness) PrintJSON() string {
|
func (a *APIResponsiveness) PrintJSON() string {
|
||||||
return PrettyPrintJSON(APICallToPerfData(a))
|
return PrettyPrintJSON(APICallToPerfData(a))
|
||||||
}
|
}
|
||||||
@@ -522,7 +538,7 @@ func readLatencyMetrics(c clientset.Interface) (*APIResponsiveness, error) {
|
|||||||
return &a, err
|
return &a, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prints top five summary metrics for request types with latency and returns
|
// HighLatencyRequests prints top five summary metrics for request types with latency and returns
|
||||||
// number of such request types above threshold. We use a higher threshold for
|
// number of such request types above threshold. We use a higher threshold for
|
||||||
// list calls if nodeCount is above a given threshold (i.e. cluster is big).
|
// list calls if nodeCount is above a given threshold (i.e. cluster is big).
|
||||||
func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIResponsiveness, error) {
|
func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIResponsiveness, error) {
|
||||||
@@ -562,7 +578,7 @@ func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIRespons
|
|||||||
return badMetrics, metrics, nil
|
return badMetrics, metrics, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies whether 50, 90 and 99th percentiles of a latency metric are
|
// VerifyLatencyWithinThreshold verifies whether 50, 90 and 99th percentiles of a latency metric are
|
||||||
// within the expected threshold.
|
// within the expected threshold.
|
||||||
func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName string) error {
|
func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName string) error {
|
||||||
if actual.Perc50 > threshold.Perc50 {
|
if actual.Perc50 > threshold.Perc50 {
|
||||||
@@ -577,7 +593,7 @@ func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName st
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resets latency metrics in apiserver.
|
// ResetMetrics resets latency metrics in apiserver.
|
||||||
func ResetMetrics(c clientset.Interface) error {
|
func ResetMetrics(c clientset.Interface) error {
|
||||||
Logf("Resetting latency metrics in apiserver...")
|
Logf("Resetting latency metrics in apiserver...")
|
||||||
body, err := c.CoreV1().RESTClient().Delete().AbsPath("/metrics").DoRaw()
|
body, err := c.CoreV1().RESTClient().Delete().AbsPath("/metrics").DoRaw()
|
||||||
@@ -663,11 +679,11 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, sample := range samples {
|
for _, sample := range samples {
|
||||||
if sample.Metric[model.MetricNameLabel] != SchedulingLatencyMetricName {
|
if sample.Metric[model.MetricNameLabel] != schedulingLatencyMetricName {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var metric *LatencyMetric = nil
|
var metric *LatencyMetric
|
||||||
switch sample.Metric[schedulermetric.OperationLabel] {
|
switch sample.Metric[schedulermetric.OperationLabel] {
|
||||||
case schedulermetric.PredicateEvaluation:
|
case schedulermetric.PredicateEvaluation:
|
||||||
metric = &result.PredicateEvaluationLatency
|
metric = &result.PredicateEvaluationLatency
|
||||||
@@ -691,7 +707,7 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
|||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies (currently just by logging them) the scheduling latencies.
|
// VerifySchedulerLatency verifies (currently just by logging them) the scheduling latencies.
|
||||||
func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||||
latency, err := getSchedulingLatency(c)
|
latency, err := getSchedulingLatency(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -700,6 +716,7 @@ func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
|||||||
return latency, nil
|
return latency, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResetSchedulerMetrics sends a DELETE request to kube-scheduler for resetting metrics.
|
||||||
func ResetSchedulerMetrics(c clientset.Interface) error {
|
func ResetSchedulerMetrics(c clientset.Interface) error {
|
||||||
responseText, err := sendRestRequestToScheduler(c, "DELETE")
|
responseText, err := sendRestRequestToScheduler(c, "DELETE")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -729,6 +746,7 @@ func convertSampleToBucket(sample *model.Sample, h *HistogramVec) {
|
|||||||
hist.Buckets[string(sample.Metric["le"])] = int(sample.Value)
|
hist.Buckets[string(sample.Metric["le"])] = int(sample.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrettyPrintJSON converts metrics to JSON format.
|
||||||
func PrettyPrintJSON(metrics interface{}) string {
|
func PrettyPrintJSON(metrics interface{}) string {
|
||||||
output := &bytes.Buffer{}
|
output := &bytes.Buffer{}
|
||||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||||
@@ -775,12 +793,14 @@ type PodLatencyData struct {
|
|||||||
Latency time.Duration
|
Latency time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LatencySlice is an array of PodLatencyData which encapsulates pod startup latency information.
|
||||||
type LatencySlice []PodLatencyData
|
type LatencySlice []PodLatencyData
|
||||||
|
|
||||||
func (a LatencySlice) Len() int { return len(a) }
|
func (a LatencySlice) Len() int { return len(a) }
|
||||||
func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency }
|
func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency }
|
||||||
|
|
||||||
|
// ExtractLatencyMetrics returns latency metrics for each percentile(50th, 90th and 99th).
|
||||||
func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
|
func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
|
||||||
length := len(latencies)
|
length := len(latencies)
|
||||||
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
|
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
|
||||||
@@ -805,6 +825,7 @@ func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLate
|
|||||||
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
|
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintLatencies outputs latencies to log with readable format.
|
||||||
func PrintLatencies(latencies []PodLatencyData, header string) {
|
func PrintLatencies(latencies []PodLatencyData, header string) {
|
||||||
metrics := ExtractLatencyMetrics(latencies)
|
metrics := ExtractLatencyMetrics(latencies)
|
||||||
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
|
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
|
||||||
|
@@ -81,6 +81,13 @@ type DensityTestConfig struct {
|
|||||||
DaemonConfigs []*testutils.DaemonConfig
|
DaemonConfigs []*testutils.DaemonConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type saturationTime struct {
|
||||||
|
TimeToSaturate time.Duration `json:"timeToSaturate"`
|
||||||
|
NumberOfNodes int `json:"numberOfNodes"`
|
||||||
|
NumberOfPods int `json:"numberOfPods"`
|
||||||
|
Throughput float32 `json:"throughput"`
|
||||||
|
}
|
||||||
|
|
||||||
func (dtc *DensityTestConfig) runSecretConfigs(testPhase *timer.Phase) {
|
func (dtc *DensityTestConfig) runSecretConfigs(testPhase *timer.Phase) {
|
||||||
defer testPhase.End()
|
defer testPhase.End()
|
||||||
for _, sc := range dtc.SecretConfigs {
|
for _, sc := range dtc.SecretConfigs {
|
||||||
@@ -418,7 +425,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
saturationThreshold = MinSaturationThreshold
|
saturationThreshold = MinSaturationThreshold
|
||||||
}
|
}
|
||||||
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
|
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
|
||||||
saturationData := framework.SaturationTime{
|
saturationData := saturationTime{
|
||||||
TimeToSaturate: e2eStartupTime,
|
TimeToSaturate: e2eStartupTime,
|
||||||
NumberOfNodes: nodeCount,
|
NumberOfNodes: nodeCount,
|
||||||
NumberOfPods: totalPods,
|
NumberOfPods: totalPods,
|
||||||
|
Reference in New Issue
Block a user