mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
use log functions of core framework
This commit is contained in:
parent
12e6930d8a
commit
11e0873d24
@ -38,7 +38,6 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/config:go_default_library",
|
"//test/e2e/framework/config:go_default_library",
|
||||||
"//test/e2e/framework/gpu:go_default_library",
|
"//test/e2e/framework/gpu:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/framework/metrics:go_default_library",
|
"//test/e2e/framework/metrics:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/instrumentation/common:go_default_library",
|
"//test/e2e/instrumentation/common:go_default_library",
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||||
"k8s.io/kubernetes/test/utils/image"
|
"k8s.io/kubernetes/test/utils/image"
|
||||||
@ -102,7 +101,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
|
|||||||
pollingFunction := checkForAcceleratorMetrics(projectID, gcmService, time.Now(), metricsMap)
|
pollingFunction := checkForAcceleratorMetrics(projectID, gcmService, time.Now(), metricsMap)
|
||||||
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
|
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Missing metrics: %+v", metricsMap)
|
framework.Logf("Missing metrics: %+v", metricsMap)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
@ -120,9 +119,9 @@ func checkForAcceleratorMetrics(projectID string, gcmService *gcm.Service, start
|
|||||||
if len(ts) > 0 {
|
if len(ts) > 0 {
|
||||||
counter = counter + 1
|
counter = counter + 1
|
||||||
metricsMap[metric] = true
|
metricsMap[metric] = true
|
||||||
e2elog.Logf("Received %v timeseries for metric %v", len(ts), metric)
|
framework.Logf("Received %v timeseries for metric %v", len(ts), metric)
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf("No timeseries for metric %v", metric)
|
framework.Logf("No timeseries for metric %v", metric)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if counter < 3 {
|
if counter < 3 {
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -72,8 +71,8 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
|
|||||||
if maxRetries--; maxRetries <= 0 {
|
if maxRetries--; maxRetries <= 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
e2elog.Logf("failed to retrieve kubelet stats -\n %v", errors)
|
framework.Logf("failed to retrieve kubelet stats -\n %v", errors)
|
||||||
time.Sleep(cadvisor.SleepDuration)
|
time.Sleep(cadvisor.SleepDuration)
|
||||||
}
|
}
|
||||||
e2elog.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
|
framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -266,20 +265,20 @@ func CreateAdapter(adapterDeploymentFile string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
stat, err := framework.RunKubectl("create", "-f", adapterURL)
|
stat, err := framework.RunKubectl("create", "-f", adapterURL)
|
||||||
e2elog.Logf(stat)
|
framework.Logf(stat)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func createClusterAdminBinding() error {
|
func createClusterAdminBinding() error {
|
||||||
stdout, stderr, err := framework.RunCmd("gcloud", "config", "get-value", "core/account")
|
stdout, stderr, err := framework.RunCmd("gcloud", "config", "get-value", "core/account")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf(stderr)
|
framework.Logf(stderr)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
serviceAccount := strings.TrimSpace(stdout)
|
serviceAccount := strings.TrimSpace(stdout)
|
||||||
e2elog.Logf("current service account: %q", serviceAccount)
|
framework.Logf("current service account: %q", serviceAccount)
|
||||||
stat, err := framework.RunKubectl("create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount)
|
stat, err := framework.RunKubectl("create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount)
|
||||||
e2elog.Logf(stat)
|
framework.Logf(stat)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -308,32 +307,32 @@ func CreateDescriptors(service *gcm.Service, projectID string) error {
|
|||||||
func CleanupDescriptors(service *gcm.Service, projectID string) {
|
func CleanupDescriptors(service *gcm.Service, projectID string) {
|
||||||
_, err := service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, CustomMetricName)).Do()
|
_, err := service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, CustomMetricName)).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||||
}
|
}
|
||||||
_, err = service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, UnusedMetricName)).Do()
|
_, err = service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, UnusedMetricName)).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments.
|
// CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments.
|
||||||
func CleanupAdapter(adapterDeploymentFile string) {
|
func CleanupAdapter(adapterDeploymentFile string) {
|
||||||
stat, err := framework.RunKubectl("delete", "-f", adapterDeploymentFile)
|
stat, err := framework.RunKubectl("delete", "-f", adapterDeploymentFile)
|
||||||
e2elog.Logf(stat)
|
framework.Logf(stat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to delete adapter deployments: %s", err)
|
framework.Logf("Failed to delete adapter deployments: %s", err)
|
||||||
}
|
}
|
||||||
err = exec.Command("rm", adapterDeploymentFile).Run()
|
err = exec.Command("rm", adapterDeploymentFile).Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to delete adapter deployment file: %s", err)
|
framework.Logf("Failed to delete adapter deployment file: %s", err)
|
||||||
}
|
}
|
||||||
cleanupClusterAdminBinding()
|
cleanupClusterAdminBinding()
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanupClusterAdminBinding() {
|
func cleanupClusterAdminBinding() {
|
||||||
stat, err := framework.RunKubectl("delete", "clusterrolebinding", ClusterAdminBinding)
|
stat, err := framework.RunKubectl("delete", "clusterrolebinding", ClusterAdminBinding)
|
||||||
e2elog.Logf(stat)
|
framework.Logf(stat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to delete cluster admin binding: %s", err)
|
framework.Logf("Failed to delete cluster admin binding: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,6 @@ import (
|
|||||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||||
"k8s.io/client-go/restmapper"
|
"k8s.io/client-go/restmapper"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
||||||
externalclient "k8s.io/metrics/pkg/client/external_metrics"
|
externalclient "k8s.io/metrics/pkg/client/external_metrics"
|
||||||
)
|
)
|
||||||
@ -58,7 +57,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
|||||||
kubeClient := f.ClientSet
|
kubeClient := f.ClientSet
|
||||||
config, err := framework.LoadConfig()
|
config, err := framework.LoadConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to load config: %s", err)
|
framework.Failf("Failed to load config: %s", err)
|
||||||
}
|
}
|
||||||
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config)
|
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config)
|
||||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient)
|
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient)
|
||||||
@ -73,7 +72,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
|||||||
kubeClient := f.ClientSet
|
kubeClient := f.ClientSet
|
||||||
config, err := framework.LoadConfig()
|
config, err := framework.LoadConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to load config: %s", err)
|
framework.Failf("Failed to load config: %s", err)
|
||||||
}
|
}
|
||||||
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config)
|
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config)
|
||||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient)
|
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient)
|
||||||
@ -88,7 +87,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
|||||||
kubeClient := f.ClientSet
|
kubeClient := f.ClientSet
|
||||||
config, err := framework.LoadConfig()
|
config, err := framework.LoadConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to load config: %s", err)
|
framework.Failf("Failed to load config: %s", err)
|
||||||
}
|
}
|
||||||
externalMetricsClient := externalclient.NewForConfigOrDie(config)
|
externalMetricsClient := externalclient.NewForConfigOrDie(config)
|
||||||
testExternalMetrics(f, kubeClient, externalMetricsClient)
|
testExternalMetrics(f, kubeClient, externalMetricsClient)
|
||||||
@ -103,32 +102,32 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
|
|||||||
|
|
||||||
gcmService, err := gcm.New(client)
|
gcmService, err := gcm.New(client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create gcm service, %v", err)
|
framework.Failf("Failed to create gcm service, %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||||
err = CreateDescriptors(gcmService, projectID)
|
err = CreateDescriptors(gcmService, projectID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create metric descriptor: %s", err)
|
framework.Failf("Failed to create metric descriptor: %s", err)
|
||||||
}
|
}
|
||||||
defer CleanupDescriptors(gcmService, projectID)
|
defer CleanupDescriptors(gcmService, projectID)
|
||||||
|
|
||||||
err = CreateAdapter(adapterDeployment)
|
err = CreateAdapter(adapterDeployment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to set up: %s", err)
|
framework.Failf("Failed to set up: %s", err)
|
||||||
}
|
}
|
||||||
defer CleanupAdapter(adapterDeployment)
|
defer CleanupAdapter(adapterDeployment)
|
||||||
|
|
||||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
|
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create ClusterRoleBindings: %v", err)
|
framework.Failf("Failed to create ClusterRoleBindings: %v", err)
|
||||||
}
|
}
|
||||||
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{})
|
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{})
|
||||||
|
|
||||||
// Run application that exports the metric
|
// Run application that exports the metric
|
||||||
_, err = createSDExporterPods(f, kubeClient)
|
_, err = createSDExporterPods(f, kubeClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create stackdriver-exporter pod: %s", err)
|
framework.Failf("Failed to create stackdriver-exporter pod: %s", err)
|
||||||
}
|
}
|
||||||
defer cleanupSDExporterPod(f, kubeClient)
|
defer cleanupSDExporterPod(f, kubeClient)
|
||||||
|
|
||||||
@ -149,33 +148,33 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
|
|||||||
|
|
||||||
gcmService, err := gcm.New(client)
|
gcmService, err := gcm.New(client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create gcm service, %v", err)
|
framework.Failf("Failed to create gcm service, %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||||
err = CreateDescriptors(gcmService, projectID)
|
err = CreateDescriptors(gcmService, projectID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create metric descriptor: %s", err)
|
framework.Failf("Failed to create metric descriptor: %s", err)
|
||||||
}
|
}
|
||||||
defer CleanupDescriptors(gcmService, projectID)
|
defer CleanupDescriptors(gcmService, projectID)
|
||||||
|
|
||||||
// Both deployments - for old and new resource model - expose External Metrics API.
|
// Both deployments - for old and new resource model - expose External Metrics API.
|
||||||
err = CreateAdapter(AdapterForOldResourceModel)
|
err = CreateAdapter(AdapterForOldResourceModel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to set up: %s", err)
|
framework.Failf("Failed to set up: %s", err)
|
||||||
}
|
}
|
||||||
defer CleanupAdapter(AdapterForOldResourceModel)
|
defer CleanupAdapter(AdapterForOldResourceModel)
|
||||||
|
|
||||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
|
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create ClusterRoleBindings: %v", err)
|
framework.Failf("Failed to create ClusterRoleBindings: %v", err)
|
||||||
}
|
}
|
||||||
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{})
|
defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{})
|
||||||
|
|
||||||
// Run application that exports the metric
|
// Run application that exports the metric
|
||||||
pod, err := createSDExporterPods(f, kubeClient)
|
pod, err := createSDExporterPods(f, kubeClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create stackdriver-exporter pod: %s", err)
|
framework.Failf("Failed to create stackdriver-exporter pod: %s", err)
|
||||||
}
|
}
|
||||||
defer cleanupSDExporterPod(f, kubeClient)
|
defer cleanupSDExporterPod(f, kubeClient)
|
||||||
|
|
||||||
@ -190,34 +189,34 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
|
|||||||
func verifyResponsesFromCustomMetricsAPI(f *framework.Framework, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient) {
|
func verifyResponsesFromCustomMetricsAPI(f *framework.Framework, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient) {
|
||||||
resources, err := discoveryClient.ServerResourcesForGroupVersion("custom.metrics.k8s.io/v1beta1")
|
resources, err := discoveryClient.ServerResourcesForGroupVersion("custom.metrics.k8s.io/v1beta1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to retrieve a list of supported metrics: %s", err)
|
framework.Failf("Failed to retrieve a list of supported metrics: %s", err)
|
||||||
}
|
}
|
||||||
if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+CustomMetricName) {
|
if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+CustomMetricName) {
|
||||||
e2elog.Failf("Metric '%s' expected but not received", CustomMetricName)
|
framework.Failf("Metric '%s' expected but not received", CustomMetricName)
|
||||||
}
|
}
|
||||||
if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+UnusedMetricName) {
|
if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+UnusedMetricName) {
|
||||||
e2elog.Failf("Metric '%s' expected but not received", UnusedMetricName)
|
framework.Failf("Metric '%s' expected but not received", UnusedMetricName)
|
||||||
}
|
}
|
||||||
value, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObject(schema.GroupKind{Group: "", Kind: "Pod"}, stackdriverExporterPod1, CustomMetricName, labels.NewSelector())
|
value, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObject(schema.GroupKind{Group: "", Kind: "Pod"}, stackdriverExporterPod1, CustomMetricName, labels.NewSelector())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed query: %s", err)
|
framework.Failf("Failed query: %s", err)
|
||||||
}
|
}
|
||||||
if value.Value.Value() != CustomMetricValue {
|
if value.Value.Value() != CustomMetricValue {
|
||||||
e2elog.Failf("Unexpected metric value for metric %s: expected %v but received %v", CustomMetricName, CustomMetricValue, value.Value)
|
framework.Failf("Unexpected metric value for metric %s: expected %v but received %v", CustomMetricName, CustomMetricValue, value.Value)
|
||||||
}
|
}
|
||||||
filter, err := labels.NewRequirement("name", selection.Equals, []string{stackdriverExporterLabel})
|
filter, err := labels.NewRequirement("name", selection.Equals, []string{stackdriverExporterLabel})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Couldn't create a label filter")
|
framework.Failf("Couldn't create a label filter")
|
||||||
}
|
}
|
||||||
values, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObjects(schema.GroupKind{Group: "", Kind: "Pod"}, labels.NewSelector().Add(*filter), CustomMetricName, labels.NewSelector())
|
values, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObjects(schema.GroupKind{Group: "", Kind: "Pod"}, labels.NewSelector().Add(*filter), CustomMetricName, labels.NewSelector())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed query: %s", err)
|
framework.Failf("Failed query: %s", err)
|
||||||
}
|
}
|
||||||
if len(values.Items) != 1 {
|
if len(values.Items) != 1 {
|
||||||
e2elog.Failf("Expected results for exactly 1 pod, but %v results received", len(values.Items))
|
framework.Failf("Expected results for exactly 1 pod, but %v results received", len(values.Items))
|
||||||
}
|
}
|
||||||
if values.Items[0].DescribedObject.Name != stackdriverExporterPod1 || values.Items[0].Value.Value() != CustomMetricValue {
|
if values.Items[0].DescribedObject.Name != stackdriverExporterPod1 || values.Items[0].Value.Value() != CustomMetricValue {
|
||||||
e2elog.Failf("Unexpected metric value for metric %s and pod %s: %v", CustomMetricName, values.Items[0].DescribedObject.Name, values.Items[0].Value.Value())
|
framework.Failf("Unexpected metric value for metric %s and pod %s: %v", CustomMetricName, values.Items[0].DescribedObject.Name, values.Items[0].Value.Value())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,27 +241,27 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric
|
|||||||
NamespacedMetrics("dummy").
|
NamespacedMetrics("dummy").
|
||||||
List("custom.googleapis.com|"+CustomMetricName, labels.NewSelector().Add(*req1, *req2, *req3, *req4, *req5))
|
List("custom.googleapis.com|"+CustomMetricName, labels.NewSelector().Add(*req1, *req2, *req3, *req4, *req5))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed query: %s", err)
|
framework.Failf("Failed query: %s", err)
|
||||||
}
|
}
|
||||||
if len(values.Items) != 1 {
|
if len(values.Items) != 1 {
|
||||||
e2elog.Failf("Expected exactly one external metric value, but % values received", len(values.Items))
|
framework.Failf("Expected exactly one external metric value, but % values received", len(values.Items))
|
||||||
}
|
}
|
||||||
if values.Items[0].MetricName != "custom.googleapis.com|"+CustomMetricName ||
|
if values.Items[0].MetricName != "custom.googleapis.com|"+CustomMetricName ||
|
||||||
values.Items[0].Value.Value() != CustomMetricValue ||
|
values.Items[0].Value.Value() != CustomMetricValue ||
|
||||||
// Check one label just to make sure labels are included
|
// Check one label just to make sure labels are included
|
||||||
values.Items[0].MetricLabels["resource.labels.pod_id"] != string(pod.UID) {
|
values.Items[0].MetricLabels["resource.labels.pod_id"] != string(pod.UID) {
|
||||||
e2elog.Failf("Unexpected result for metric %s: %v", CustomMetricName, values.Items[0])
|
framework.Failf("Unexpected result for metric %s: %v", CustomMetricName, values.Items[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) {
|
func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) {
|
||||||
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod1, &metav1.DeleteOptions{})
|
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod1, &metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err)
|
framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err)
|
||||||
}
|
}
|
||||||
err = cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod2, &metav1.DeleteOptions{})
|
err = cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod2, &metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err)
|
framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
|
|
||||||
@ -72,7 +71,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !masterRegistered {
|
if !masterRegistered {
|
||||||
e2elog.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.")
|
framework.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
response, err := grabber.GrabFromScheduler()
|
response, err := grabber.GrabFromScheduler()
|
||||||
@ -93,7 +92,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !masterRegistered {
|
if !masterRegistered {
|
||||||
e2elog.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.")
|
framework.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
response, err := grabber.GrabFromControllerManager()
|
response, err := grabber.GrabFromControllerManager()
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -172,7 +171,7 @@ func validateQueryReturnsCorrectValues(c clientset.Interface, query string, expe
|
|||||||
if len(samples) < minSamplesCount {
|
if len(samples) < minSamplesCount {
|
||||||
return fmt.Errorf("Not enough samples for query '%v', got %v", query, samples)
|
return fmt.Errorf("Not enough samples for query '%v', got %v", query, samples)
|
||||||
}
|
}
|
||||||
e2elog.Logf("Executed query '%v' returned %v", query, samples)
|
framework.Logf("Executed query '%v' returned %v", query, samples)
|
||||||
for _, value := range samples {
|
for _, value := range samples {
|
||||||
error := math.Abs(value-expectedValue) / expectedValue
|
error := math.Abs(value-expectedValue) / expectedValue
|
||||||
if error >= errorTolerance {
|
if error >= errorTolerance {
|
||||||
@ -212,7 +211,7 @@ func getInstanceLabelsAvailableForMetric(c clientset.Interface, duration time.Du
|
|||||||
instanceLabels := make([]string, 0)
|
instanceLabels := make([]string, 0)
|
||||||
m, ok := result.(model.Matrix)
|
m, ok := result.(model.Matrix)
|
||||||
if !ok {
|
if !ok {
|
||||||
e2elog.Failf("Expected matrix response for query '%v', got: %T", query, result)
|
framework.Failf("Expected matrix response for query '%v', got: %T", query, result)
|
||||||
return instanceLabels, nil
|
return instanceLabels, nil
|
||||||
}
|
}
|
||||||
for _, stream := range m {
|
for _, stream := range m {
|
||||||
@ -239,7 +238,7 @@ func fetchPrometheusTargetDiscovery(c clientset.Interface) (TargetDiscovery, err
|
|||||||
Raw()
|
Raw()
|
||||||
var qres promTargetsResponse
|
var qres promTargetsResponse
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf(string(response))
|
framework.Logf(string(response))
|
||||||
return qres.Data, err
|
return qres.Data, err
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(response, &qres)
|
err = json.Unmarshal(response, &qres)
|
||||||
@ -304,7 +303,7 @@ func queryPrometheus(c clientset.Interface, query string, start, end time.Time,
|
|||||||
Do().
|
Do().
|
||||||
Raw()
|
Raw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf(string(response))
|
framework.Logf(string(response))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var qres promQueryResponse
|
var qres promQueryResponse
|
||||||
@ -370,10 +369,10 @@ func retryUntilSucceeds(validator func() error, timeout time.Duration) {
|
|||||||
if time.Since(startTime) >= timeout {
|
if time.Since(startTime) >= timeout {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
e2elog.Logf(err.Error())
|
framework.Logf(err.Error())
|
||||||
time.Sleep(prometheusSleepBetweenAttempts)
|
time.Sleep(prometheusSleepBetweenAttempts)
|
||||||
}
|
}
|
||||||
e2elog.Failf(err.Error())
|
framework.Failf(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAllNodes(c clientset.Interface) ([]string, error) {
|
func getAllNodes(c clientset.Interface) ([]string, error) {
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
|
|
||||||
gcm "google.golang.org/api/monitoring/v3"
|
gcm "google.golang.org/api/monitoring/v3"
|
||||||
@ -84,9 +83,9 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
|||||||
// and uncomment following lines (comment out the two lines above): (DON'T set the env var below)
|
// and uncomment following lines (comment out the two lines above): (DON'T set the env var below)
|
||||||
/*
|
/*
|
||||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||||
e2elog.Logf("Couldn't get application default credentials, %v", err)
|
framework.Logf("Couldn't get application default credentials, %v", err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Error accessing application default credentials, %v", err)
|
framework.Failf("Error accessing application default credentials, %v", err)
|
||||||
}
|
}
|
||||||
client := oauth2.NewClient(oauth2.NoContext, ts)
|
client := oauth2.NewClient(oauth2.NoContext, ts)
|
||||||
*/
|
*/
|
||||||
@ -111,7 +110,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
|||||||
pollingFunction := checkForMetrics(projectID, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU)
|
pollingFunction := checkForMetrics(projectID, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU)
|
||||||
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
|
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Missing metrics: %+v\n", metricsMap)
|
framework.Logf("Missing metrics: %+v\n", metricsMap)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
@ -130,9 +129,9 @@ func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time,
|
|||||||
if len(ts) > 0 {
|
if len(ts) > 0 {
|
||||||
counter = counter + 1
|
counter = counter + 1
|
||||||
metricsMap[metric] = true
|
metricsMap[metric] = true
|
||||||
e2elog.Logf("Received %v timeseries for metric %v\n", len(ts), metric)
|
framework.Logf("Received %v timeseries for metric %v\n", len(ts), metric)
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf("No timeseries for metric %v\n", metric)
|
framework.Logf("No timeseries for metric %v\n", metric)
|
||||||
}
|
}
|
||||||
|
|
||||||
var sum float64
|
var sum float64
|
||||||
@ -149,10 +148,10 @@ func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
sum = sum + *max.Value.DoubleValue
|
sum = sum + *max.Value.DoubleValue
|
||||||
e2elog.Logf("Received %v points for metric %v\n",
|
framework.Logf("Received %v points for metric %v\n",
|
||||||
len(t.Points), metric)
|
len(t.Points), metric)
|
||||||
}
|
}
|
||||||
e2elog.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit))
|
framework.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit))
|
||||||
if math.Abs(sum*float64(cpuLimit)-float64(cpuUsed)) > tolerance*float64(cpuUsed) {
|
if math.Abs(sum*float64(cpuLimit)-float64(cpuUsed)) > tolerance*float64(cpuUsed) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
)
|
)
|
||||||
@ -69,7 +68,7 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
|
|||||||
|
|
||||||
oauthClient, err := google.DefaultClient(context.Background(), MonitoringScope)
|
oauthClient, err := google.DefaultClient(context.Background(), MonitoringScope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create oauth client: %s", err)
|
framework.Failf("Failed to create oauth client: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create test pod with unique name.
|
// Create test pod with unique name.
|
||||||
@ -83,22 +82,22 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
|
|||||||
|
|
||||||
resp, err := oauthClient.Get(endpoint)
|
resp, err := oauthClient.Get(endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to call Stackdriver Metadata API %s", err)
|
framework.Failf("Failed to call Stackdriver Metadata API %s", err)
|
||||||
}
|
}
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
e2elog.Failf("Stackdriver Metadata API returned error status: %s", resp.Status)
|
framework.Failf("Stackdriver Metadata API returned error status: %s", resp.Status)
|
||||||
}
|
}
|
||||||
metadataAPIResponse, err := ioutil.ReadAll(resp.Body)
|
metadataAPIResponse, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to read response from Stackdriver Metadata API: %s", err)
|
framework.Failf("Failed to read response from Stackdriver Metadata API: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
exists, err := verifyPodExists(metadataAPIResponse, uniqueContainerName)
|
exists, err := verifyPodExists(metadataAPIResponse, uniqueContainerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to process response from Stackdriver Metadata API: %s", err)
|
framework.Failf("Failed to process response from Stackdriver Metadata API: %s", err)
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
e2elog.Failf("Missing Metadata for container %q", uniqueContainerName)
|
framework.Failf("Missing Metadata for container %q", uniqueContainerName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user