mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
e2e_node: check container metrics conditionally
When PodAndContainerStatsFromCRI FG is enabled, Kubelet tries to get list of metrics from the CRI runtime using CRI API 'ListMetricDescriptors'. As this API is not implemented in neither CRI-O nor Containerd versions used in the test-infra, ResourceMetrics test case fails to gather certain container metrics. Excluding container metrics from the expected list of metrics if PodAndContainerStatsFromCRI is enabled should solve the issue.
This commit is contained in:
parent
352056f09d
commit
2ac5dfe379
@ -22,10 +22,12 @@ import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/nodefeature"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
@ -74,6 +76,17 @@ var _ = SIGDescribe("ResourceMetricsAPI", nodefeature.ResourceMetrics, func() {
|
||||
memoryCapacity := node.Status.Capacity["memory"]
|
||||
memoryLimit := memoryCapacity.Value()
|
||||
|
||||
keys := []string{
|
||||
"resource_scrape_error", "node_cpu_usage_seconds_total", "node_memory_working_set_bytes",
|
||||
"pod_cpu_usage_seconds_total", "pod_memory_working_set_bytes",
|
||||
}
|
||||
|
||||
// NOTE: This check should be removed when ListMetricDescriptors is implemented
|
||||
// by CRI-O and Containerd
|
||||
if !e2eskipper.IsFeatureGateEnabled(features.PodAndContainerStatsFromCRI) {
|
||||
keys = append(keys, "container_cpu_usage_seconds_total", "container_memory_working_set_bytes", "container_start_time_seconds")
|
||||
}
|
||||
|
||||
matchResourceMetrics := gomega.And(gstruct.MatchKeys(gstruct.IgnoreMissing, gstruct.Keys{
|
||||
"resource_scrape_error": gstruct.Ignore(),
|
||||
"node_cpu_usage_seconds_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{
|
||||
@ -113,8 +126,7 @@ var _ = SIGDescribe("ResourceMetricsAPI", nodefeature.ResourceMetrics, func() {
|
||||
fmt.Sprintf("%s::%s", f.Namespace.Name, pod1): boundedSample(0*e2evolume.Kb, 80*e2evolume.Mb),
|
||||
}),
|
||||
}),
|
||||
haveKeys("resource_scrape_error", "node_cpu_usage_seconds_total", "node_memory_working_set_bytes", "container_cpu_usage_seconds_total",
|
||||
"container_memory_working_set_bytes", "container_start_time_seconds", "pod_cpu_usage_seconds_total", "pod_memory_working_set_bytes"),
|
||||
haveKeys(keys...),
|
||||
)
|
||||
ginkgo.By("Giving pods a minute to start up and produce metrics")
|
||||
gomega.Eventually(ctx, getResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)
|
||||
|
Loading…
Reference in New Issue
Block a user