Merge pull request #83010 from clarklee92/use-log-functions-of-e2e/autoscaling

Use log functions of core framework on test/e2e/autoscaling
This commit is contained in:
Kubernetes Prow Robot 2019-09-24 02:23:30 -07:00 committed by GitHub
commit e31f1ba53e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 28 additions and 32 deletions

View File

@ -40,7 +40,6 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",

View File

@ -43,7 +43,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/scheduling"
@ -211,7 +210,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
e2elog.Failf("TEST_GPU_TYPE not defined")
framework.Failf("TEST_GPU_TYPE not defined")
return
}
@ -238,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
e2elog.Failf("TEST_GPU_TYPE not defined")
framework.Failf("TEST_GPU_TYPE not defined")
return
}
@ -268,7 +267,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
e2elog.Failf("TEST_GPU_TYPE not defined")
framework.Failf("TEST_GPU_TYPE not defined")
return
}
@ -297,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
e2elog.Failf("TEST_GPU_TYPE not defined")
framework.Failf("TEST_GPU_TYPE not defined")
return
}
@ -499,7 +498,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer func() {
errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
if len(errs) > 0 {
e2elog.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pv, pvc = nil, nil
if diskName != "" {
@ -921,10 +920,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
time.Sleep(scaleUpTimeout)
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount)
status, err := getClusterwideStatus(c)
e2elog.Logf("Clusterwide status: %v", status)
framework.Logf("Clusterwide status: %v", status)
framework.ExpectNoError(err)
framework.ExpectEqual(status, "Unhealthy")
}
@ -1232,7 +1231,7 @@ func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
nodes := make([]*v1.Node, 0, 1)
nodeList, err := e2enode.GetReadyNodesIncludingTainted(f.ClientSet)
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
// TODO: write a wrapper for ExpectNoErrorWithOffset()
framework.ExpectNoErrorWithOffset(0, err)
@ -1309,7 +1308,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
}
}
e2elog.Failf("Failed to reserve memory within timeout")
framework.Failf("Failed to reserve memory within timeout")
return nil
}
@ -1880,7 +1879,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
}
}
if finalErr != nil {
e2elog.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
}
}

View File

@ -30,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
"github.com/onsi/ginkgo"
@ -240,35 +239,35 @@ func (tc *CustomMetricTestCase) Run() {
// and uncomment following lines:
/*
ts, err := google.DefaultTokenSource(oauth2.NoContext)
e2elog.Logf("Couldn't get application default credentials, %v", err)
framework.Logf("Couldn't get application default credentials, %v", err)
if err != nil {
e2elog.Failf("Error accessing application default credentials, %v", err)
framework.Failf("Error accessing application default credentials, %v", err)
}
client := oauth2.NewClient(oauth2.NoContext, ts)
*/
gcmService, err := gcm.New(client)
if err != nil {
e2elog.Failf("Failed to create gcm service, %v", err)
framework.Failf("Failed to create gcm service, %v", err)
}
// Set up a cluster: create a custom metric and set up k8s-sd adapter
err = monitoring.CreateDescriptors(gcmService, projectID)
if err != nil {
e2elog.Failf("Failed to create metric descriptor: %v", err)
framework.Failf("Failed to create metric descriptor: %v", err)
}
defer monitoring.CleanupDescriptors(gcmService, projectID)
err = monitoring.CreateAdapter(monitoring.AdapterDefault)
if err != nil {
e2elog.Failf("Failed to set up: %v", err)
framework.Failf("Failed to set up: %v", err)
}
defer monitoring.CleanupAdapter(monitoring.AdapterDefault)
// Run application that exports the metric
err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
if err != nil {
e2elog.Failf("Failed to create stackdriver-exporter pod: %v", err)
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
}
defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
@ -278,7 +277,7 @@ func (tc *CustomMetricTestCase) Run() {
// Autoscale the deployment
_, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa)
if err != nil {
e2elog.Failf("Failed to create HPA: %v", err)
framework.Failf("Failed to create HPA: %v", err)
}
defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{})
@ -442,13 +441,13 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
deployment, err := cs.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil {
e2elog.Failf("Failed to get replication controller %s: %v", deployment, err)
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
}
replicas := int(deployment.Status.ReadyReplicas)
e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
})
if err != nil {
e2elog.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
}
}

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -258,7 +257,7 @@ func getScheduableCores(nodes []v1.Node) int64 {
scInt64, scOk := sc.AsInt64()
if !scOk {
e2elog.Logf("Unable to compute integer values of schedulable cores in the cluster")
framework.Logf("Unable to compute integer values of schedulable cores in the cluster")
return 0
}
return scInt64
@ -276,7 +275,7 @@ func deleteDNSScalingConfigMap(c clientset.Interface) error {
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
return err
}
e2elog.Logf("DNS autoscaling ConfigMap deleted.")
framework.Logf("DNS autoscaling ConfigMap deleted.")
return nil
}
@ -303,7 +302,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e
if err != nil {
return err
}
e2elog.Logf("DNS autoscaling ConfigMap updated.")
framework.Logf("DNS autoscaling ConfigMap updated.")
return nil
}
@ -337,14 +336,14 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error {
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
return err
}
e2elog.Logf("DNS autoscaling pod %v deleted.", podName)
framework.Logf("DNS autoscaling pod %v deleted.", podName)
return nil
}
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
var current int
var expected int
e2elog.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
condition := func() (bool, error) {
current, err = getDNSReplicas(c)
if err != nil {
@ -352,7 +351,7 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep
}
expected = getExpected(c)
if current != expected {
e2elog.Logf("Replicas not as expected: got %v, expected %v", current, expected)
framework.Logf("Replicas not as expected: got %v, expected %v", current, expected)
return false, nil
}
return true, nil
@ -361,12 +360,12 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
}
e2elog.Logf("kube-dns reaches expected replicas: %v", expected)
framework.Logf("kube-dns reaches expected replicas: %v", expected)
return nil
}
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
e2elog.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
condition := func() (bool, error) {
configMap, err = fetchDNSScalingConfigMap(c)
if err != nil {