mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Merge pull request #52836 from kastenhq/pvc_name_e2etest
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.. Add e2e test to verify PVC metrics Adds an e2e test for kubernetes/features#363 fixes #52459
This commit is contained in:
commit
64fde98206
@ -37,6 +37,7 @@ go_library(
|
||||
"//pkg/cloudprovider/providers/vsphere:go_default_library",
|
||||
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
|
@ -17,11 +17,16 @@ limitations under the License.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
)
|
||||
@ -51,13 +56,17 @@ var _ = SIGDescribe("[Serial] Volume metrics", func() {
|
||||
|
||||
pvc = newClaim(test, ns, "default")
|
||||
var err error
|
||||
metricsGrabber, err = metrics.NewMetricsGrabber(c, nil, false, false, true, false, false)
|
||||
metricsGrabber, err = metrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
|
||||
|
||||
if err != nil {
|
||||
framework.Failf("Error creating metrics grabber : %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace)
|
||||
})
|
||||
|
||||
It("should create prometheus metrics for volume provisioning and attach/detach", func() {
|
||||
var err error
|
||||
|
||||
@ -69,10 +78,6 @@ var _ = SIGDescribe("[Serial] Volume metrics", func() {
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pvc).ToNot(Equal(nil))
|
||||
defer func() {
|
||||
framework.Logf("Deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
||||
framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil))
|
||||
}()
|
||||
|
||||
claims := []*v1.PersistentVolumeClaim{pvc}
|
||||
|
||||
@ -95,6 +100,49 @@ var _ = SIGDescribe("[Serial] Volume metrics", func() {
|
||||
verifyMetricCount(storageOpMetrics, updatedStorageMetrics, volumeOp)
|
||||
}
|
||||
})
|
||||
|
||||
It("should create volume metrics with the correct PVC ref", func() {
|
||||
var err error
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pvc).ToNot(Equal(nil))
|
||||
|
||||
claims := []*v1.PersistentVolumeClaim{pvc}
|
||||
pod := framework.MakePod(ns, claims, false, "")
|
||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name)
|
||||
|
||||
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for `VolumeStatsAggPeriod' to grab metrics
|
||||
time.Sleep(1 * time.Minute)
|
||||
|
||||
// Grab kubelet metrics from the node the pod was scheduled on
|
||||
kubeMetrics, err := metricsGrabber.GrabFromKubelet(pod.Spec.NodeName)
|
||||
Expect(err).NotTo(HaveOccurred(), "Error getting kubelet metrics : %v", err)
|
||||
|
||||
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
|
||||
// Verify volume stat metrics were collected for the referenced PVC
|
||||
volumeStatKeys := []string{
|
||||
kubeletmetrics.VolumeStatsUsedBytesKey,
|
||||
kubeletmetrics.VolumeStatsCapacityBytesKey,
|
||||
kubeletmetrics.VolumeStatsAvailableBytesKey,
|
||||
kubeletmetrics.VolumeStatsUsedBytesKey,
|
||||
kubeletmetrics.VolumeStatsInodesFreeKey,
|
||||
kubeletmetrics.VolumeStatsInodesUsedKey,
|
||||
}
|
||||
|
||||
for _, key := range volumeStatKeys {
|
||||
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
|
||||
verifyVolumeStatMetric(kubeletKeyName, pvc.Namespace, pvc.Name, kubeMetrics)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func verifyMetricCount(oldMetrics map[string]int64, newMetrics map[string]int64, metricName string) {
|
||||
@ -123,3 +171,30 @@ func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics) map[string
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Verifies the specified metrics are in `kubeletMetrics`
|
||||
func verifyVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics metrics.KubeletMetrics) {
|
||||
found := false
|
||||
errCount := 0
|
||||
if samples, ok := kubeletMetrics[metricKeyName]; ok {
|
||||
for _, sample := range samples {
|
||||
samplePVC, ok := sample.Metric["persistentvolumeclaim"]
|
||||
if !ok {
|
||||
framework.Logf("Error getting pvc for metric %s, sample %s", metricKeyName, sample.String())
|
||||
errCount++
|
||||
}
|
||||
sampleNS, ok := sample.Metric["namespace"]
|
||||
if !ok {
|
||||
framework.Logf("Error getting namespace for metric %s, sample %s", metricKeyName, sample.String())
|
||||
errCount++
|
||||
}
|
||||
|
||||
if string(samplePVC) == pvcName && string(sampleNS) == namespace {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
Expect(errCount).To(Equal(0), "Found invalid samples")
|
||||
Expect(found).To(BeTrue(), "PVC %s, Namespace %s not found for %s", pvcName, namespace, metricKeyName)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user