mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
update ResourceMetricsAPI node-e2e test
/metrics/resource/v1alpha1 was deprecated and moved to /metrics/resource Renames to remove v1alpha1 from function names and matcher variables. Pod deletion was taking multiple minutes, so set GracePeriodSeconds to 0. Commented restart loop during test pod startup. Move ResourceMetricsAPI out of Orphans by giving it a NodeFeature tag. API removed in7b7c73b
#88568 Test created6051664
#73946
This commit is contained in:
parent
d60c72f214
commit
916c73b8a5
@ -156,7 +156,6 @@ go_test(
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/apis/podresources/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/apis/resourcemetrics/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/cm/cpumanager:go_default_library",
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletresourcemetricsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
@ -41,16 +40,16 @@ const (
|
||||
maxStatsAge = time.Minute
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
||||
var _ = framework.KubeDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() {
|
||||
f := framework.NewDefaultFramework("resource-metrics")
|
||||
ginkgo.Context("when querying /resource/metrics", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("Creating test pods")
|
||||
ginkgo.By("Creating test pods to measure their resource usage")
|
||||
numRestarts := int32(1)
|
||||
pods := getSummaryTestPods(f, numRestarts, pod0, pod1)
|
||||
f.PodClient().CreateBatch(pods)
|
||||
|
||||
ginkgo.By("Waiting for test pods to restart the desired number of times")
|
||||
ginkgo.By("restarting the containers to ensure container metrics are still being gathered after a container is restarted")
|
||||
gomega.Eventually(func() error {
|
||||
for _, pod := range pods {
|
||||
err := verifyPodRestartCount(f, pod.Name, len(pod.Spec.Containers), numRestarts)
|
||||
@ -64,13 +63,13 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
||||
ginkgo.By("Waiting 15 seconds for cAdvisor to collect 2 stats points")
|
||||
time.Sleep(15 * time.Second)
|
||||
})
|
||||
ginkgo.It("should report resource usage through the v1alpha1 resouce metrics api", func() {
|
||||
ginkgo.By("Fetching node so we can know proper node memory bounds for unconstrained cgroups")
|
||||
ginkgo.It("should report resource usage through the resouce metrics api", func() {
|
||||
ginkgo.By("Fetching node so we can match against an appropriate memory limit")
|
||||
node := getLocalNode(f)
|
||||
memoryCapacity := node.Status.Capacity["memory"]
|
||||
memoryLimit := memoryCapacity.Value()
|
||||
|
||||
matchV1alpha1Expectations := gstruct.MatchAllKeys(gstruct.Keys{
|
||||
matchResourceMetrics := gstruct.MatchAllKeys(gstruct.Keys{
|
||||
"scrape_error": gstruct.Ignore(),
|
||||
"node_cpu_usage_seconds_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{
|
||||
"": boundedSample(1, 1e6),
|
||||
@ -90,14 +89,15 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
||||
}),
|
||||
})
|
||||
ginkgo.By("Giving pods a minute to start up and produce metrics")
|
||||
gomega.Eventually(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
|
||||
gomega.Eventually(getResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)
|
||||
ginkgo.By("Ensuring the metrics match the expectations a few more times")
|
||||
gomega.Consistently(getV1alpha1ResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchV1alpha1Expectations)
|
||||
gomega.Consistently(getResourceMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("Deleting test pods")
|
||||
f.PodClient().DeleteSync(pod0, metav1.DeleteOptions{}, 10*time.Minute)
|
||||
f.PodClient().DeleteSync(pod1, metav1.DeleteOptions{}, 10*time.Minute)
|
||||
var zero int64 = 0
|
||||
f.PodClient().DeleteSync(pod0, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute)
|
||||
f.PodClient().DeleteSync(pod1, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute)
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
@ -110,8 +110,9 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func getV1alpha1ResourceMetrics() (e2emetrics.KubeletMetrics, error) {
|
||||
return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource/"+kubeletresourcemetricsv1alpha1.Version)
|
||||
func getResourceMetrics() (e2emetrics.KubeletMetrics, error) {
|
||||
ginkgo.By("getting stable resource metrics API")
|
||||
return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource")
|
||||
}
|
||||
|
||||
func nodeID(element interface{}) string {
|
||||
|
Loading…
Reference in New Issue
Block a user