node: e2e: add podresources metrics tests

add tests to ensure the podresources metrics are exposed,
and basic sanity tests for their values.

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2022-10-06 13:40:28 +02:00
parent 175a26e916
commit 3c60c1a10c

View File

@ -39,7 +39,10 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/onsi/gomega/gstruct"
"github.com/onsi/gomega/types"
"k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -821,6 +824,49 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
})
})
})
ginkgo.Context("when querying /metrics", func() {
ginkgo.BeforeEach(func() {
// ensure APIs have been called at least once
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
framework.ExpectNoError(err)
cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
framework.ExpectNoError(err)
defer conn.Close()
_, err = cli.List(context.TODO(), &kubeletpodresourcesv1.ListPodResourcesRequest{})
framework.ExpectNoError(err)
_, err = cli.GetAllocatableResources(context.TODO(), &kubeletpodresourcesv1.AllocatableResourcesRequest{})
framework.ExpectNoError(err)
})
ginkgo.It("should report the values for the podresources metrics", func() {
// we updated the kubelet config in BeforeEach, so we can assume we start fresh.
// being [Serial], we can also assume noone else but us is running pods.
ginkgo.By("Checking the value of the podresources metrics")
matchResourceMetrics := gstruct.MatchKeys(gstruct.IgnoreExtras, gstruct.Keys{
"kubelet_pod_resources_endpoint_requests_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{
"": timelessSampleAtLeast(1),
}),
"kubelet_pod_resources_endpoint_requests_list": gstruct.MatchAllElements(nodeID, gstruct.Elements{
"": timelessSampleAtLeast(1),
}),
"kubelet_pod_resources_endpoint_requests_get_allocatable": gstruct.MatchAllElements(nodeID, gstruct.Elements{
"": timelessSampleAtLeast(1),
}),
// not checking errors: the calls don't have non-catastrophic (e.g. out of memory) error conditions yet.
})
ginkgo.By("Giving the Kubelet time to start up and produce metrics")
gomega.Eventually(getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)
ginkgo.By("Ensuring the metrics match the expectations a few more times")
gomega.Consistently(getPodResourcesMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics)
})
})
})
func requireLackOfSRIOVDevices() {
@ -910,3 +956,18 @@ func getKubeVirtDevicePluginPod() *v1.Pod {
return p
}
func getPodResourcesMetrics() (e2emetrics.KubeletMetrics, error) {
// we are running out of good names, so we need to be unnecessarily specific to avoid clashes
ginkgo.By("getting Pod Resources metrics from the metrics API")
return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
}
func timelessSampleAtLeast(lower interface{}) types.GomegaMatcher {
return gstruct.PointTo(gstruct.MatchAllFields(gstruct.Fields{
// We already check Metric when matching the Id
"Metric": gstruct.Ignore(),
"Value": gomega.BeNumerically(">=", lower),
"Timestamp": gstruct.Ignore(),
}))
}