mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
cleanup test code in upgrades and autoscaling pkg
This commit is contained in:
parent
92107f30ea
commit
d870514162
@ -40,7 +40,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//test/e2e/common:go_default_library",
|
"//test/e2e/common:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/framework/node:go_default_library",
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/pv:go_default_library",
|
"//test/e2e/framework/pv:go_default_library",
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/klog"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -36,7 +37,6 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"k8s.io/klog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -41,9 +41,9 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/klog"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||||
@ -52,7 +52,6 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"k8s.io/klog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -211,7 +210,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
framework.SkipUnlessProviderIs("gke")
|
||||||
if gpuType == "" {
|
if gpuType == "" {
|
||||||
e2elog.Failf("TEST_GPU_TYPE not defined")
|
framework.Failf("TEST_GPU_TYPE not defined")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
framework.SkipUnlessProviderIs("gke")
|
||||||
if gpuType == "" {
|
if gpuType == "" {
|
||||||
e2elog.Failf("TEST_GPU_TYPE not defined")
|
framework.Failf("TEST_GPU_TYPE not defined")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,7 +267,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
framework.SkipUnlessProviderIs("gke")
|
||||||
if gpuType == "" {
|
if gpuType == "" {
|
||||||
e2elog.Failf("TEST_GPU_TYPE not defined")
|
framework.Failf("TEST_GPU_TYPE not defined")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
framework.SkipUnlessProviderIs("gke")
|
||||||
if gpuType == "" {
|
if gpuType == "" {
|
||||||
e2elog.Failf("TEST_GPU_TYPE not defined")
|
framework.Failf("TEST_GPU_TYPE not defined")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -499,7 +498,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
defer func() {
|
defer func() {
|
||||||
errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
|
errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
e2elog.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||||
}
|
}
|
||||||
pv, pvc = nil, nil
|
pv, pvc = nil, nil
|
||||||
if diskName != "" {
|
if diskName != "" {
|
||||||
@ -921,10 +920,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||||
time.Sleep(scaleUpTimeout)
|
time.Sleep(scaleUpTimeout)
|
||||||
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
|
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
|
||||||
framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount)
|
framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount)
|
||||||
status, err := getClusterwideStatus(c)
|
status, err := getClusterwideStatus(c)
|
||||||
e2elog.Logf("Clusterwide status: %v", status)
|
framework.Logf("Clusterwide status: %v", status)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(status, "Unhealthy")
|
framework.ExpectEqual(status, "Unhealthy")
|
||||||
}
|
}
|
||||||
@ -1232,7 +1231,7 @@ func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
|
|||||||
nodes := make([]*v1.Node, 0, 1)
|
nodes := make([]*v1.Node, 0, 1)
|
||||||
nodeList, err := e2enode.GetReadyNodesIncludingTainted(f.ClientSet)
|
nodeList, err := e2enode.GetReadyNodesIncludingTainted(f.ClientSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
framework.Logf("Unexpected error occurred: %v", err)
|
||||||
}
|
}
|
||||||
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
// TODO: write a wrapper for ExpectNoErrorWithOffset()
|
||||||
framework.ExpectNoErrorWithOffset(0, err)
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
@ -1309,7 +1308,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
|||||||
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
|
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e2elog.Failf("Failed to reserve memory within timeout")
|
framework.Failf("Failed to reserve memory within timeout")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1880,7 +1879,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if finalErr != nil {
|
if finalErr != nil {
|
||||||
e2elog.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
|
framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -240,35 +239,35 @@ func (tc *CustomMetricTestCase) Run() {
|
|||||||
// and uncomment following lines:
|
// and uncomment following lines:
|
||||||
/*
|
/*
|
||||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||||
e2elog.Logf("Couldn't get application default credentials, %v", err)
|
framework.Logf("Couldn't get application default credentials, %v", err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Error accessing application default credentials, %v", err)
|
framework.Failf("Error accessing application default credentials, %v", err)
|
||||||
}
|
}
|
||||||
client := oauth2.NewClient(oauth2.NoContext, ts)
|
client := oauth2.NewClient(oauth2.NoContext, ts)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
gcmService, err := gcm.New(client)
|
gcmService, err := gcm.New(client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create gcm service, %v", err)
|
framework.Failf("Failed to create gcm service, %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||||
err = monitoring.CreateDescriptors(gcmService, projectID)
|
err = monitoring.CreateDescriptors(gcmService, projectID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create metric descriptor: %v", err)
|
framework.Failf("Failed to create metric descriptor: %v", err)
|
||||||
}
|
}
|
||||||
defer monitoring.CleanupDescriptors(gcmService, projectID)
|
defer monitoring.CleanupDescriptors(gcmService, projectID)
|
||||||
|
|
||||||
err = monitoring.CreateAdapter(monitoring.AdapterDefault)
|
err = monitoring.CreateAdapter(monitoring.AdapterDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to set up: %v", err)
|
framework.Failf("Failed to set up: %v", err)
|
||||||
}
|
}
|
||||||
defer monitoring.CleanupAdapter(monitoring.AdapterDefault)
|
defer monitoring.CleanupAdapter(monitoring.AdapterDefault)
|
||||||
|
|
||||||
// Run application that exports the metric
|
// Run application that exports the metric
|
||||||
err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
|
err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create stackdriver-exporter pod: %v", err)
|
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
|
||||||
}
|
}
|
||||||
defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
|
defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
|
||||||
|
|
||||||
@ -278,7 +277,7 @@ func (tc *CustomMetricTestCase) Run() {
|
|||||||
// Autoscale the deployment
|
// Autoscale the deployment
|
||||||
_, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa)
|
_, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to create HPA: %v", err)
|
framework.Failf("Failed to create HPA: %v", err)
|
||||||
}
|
}
|
||||||
defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{})
|
defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||||
|
|
||||||
@ -442,13 +441,13 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t
|
|||||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
deployment, err := cs.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
deployment, err := cs.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to get replication controller %s: %v", deployment, err)
|
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
||||||
}
|
}
|
||||||
replicas := int(deployment.Status.ReadyReplicas)
|
replicas := int(deployment.Status.ReadyReplicas)
|
||||||
e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
||||||
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
|
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
@ -258,7 +257,7 @@ func getScheduableCores(nodes []v1.Node) int64 {
|
|||||||
|
|
||||||
scInt64, scOk := sc.AsInt64()
|
scInt64, scOk := sc.AsInt64()
|
||||||
if !scOk {
|
if !scOk {
|
||||||
e2elog.Logf("Unable to compute integer values of schedulable cores in the cluster")
|
framework.Logf("Unable to compute integer values of schedulable cores in the cluster")
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return scInt64
|
return scInt64
|
||||||
@ -276,7 +275,7 @@ func deleteDNSScalingConfigMap(c clientset.Interface) error {
|
|||||||
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
|
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
e2elog.Logf("DNS autoscaling ConfigMap deleted.")
|
framework.Logf("DNS autoscaling ConfigMap deleted.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -303,7 +302,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
e2elog.Logf("DNS autoscaling ConfigMap updated.")
|
framework.Logf("DNS autoscaling ConfigMap updated.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,14 +336,14 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error {
|
|||||||
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
|
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
e2elog.Logf("DNS autoscaling pod %v deleted.", podName)
|
framework.Logf("DNS autoscaling pod %v deleted.", podName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
|
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
|
||||||
var current int
|
var current int
|
||||||
var expected int
|
var expected int
|
||||||
e2elog.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
|
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
current, err = getDNSReplicas(c)
|
current, err = getDNSReplicas(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -352,7 +351,7 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep
|
|||||||
}
|
}
|
||||||
expected = getExpected(c)
|
expected = getExpected(c)
|
||||||
if current != expected {
|
if current != expected {
|
||||||
e2elog.Logf("Replicas not as expected: got %v, expected %v", current, expected)
|
framework.Logf("Replicas not as expected: got %v, expected %v", current, expected)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -361,12 +360,12 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep
|
|||||||
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
|
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
|
||||||
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
|
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
|
||||||
}
|
}
|
||||||
e2elog.Logf("kube-dns reaches expected replicas: %v", expected)
|
framework.Logf("kube-dns reaches expected replicas: %v", expected)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
|
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
|
||||||
e2elog.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
|
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
configMap, err = fetchDNSScalingConfigMap(c)
|
configMap, err = fetchDNSScalingConfigMap(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -37,7 +37,6 @@ go_library(
|
|||||||
"//test/e2e/common:go_default_library",
|
"//test/e2e/common:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/framework/service:go_default_library",
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/e2e/framework/statefulset:go_default_library",
|
"//test/e2e/framework/statefulset:go_default_library",
|
||||||
"//test/e2e/framework/testfiles:go_default_library",
|
"//test/e2e/framework/testfiles:go_default_library",
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -80,7 +79,7 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
|
|||||||
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
|
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
|
||||||
// rely on the namespace deletion to clean up everything
|
// rely on the namespace deletion to clean up everything
|
||||||
ginkgo.By("Logging container failures")
|
ginkgo.By("Logging container failures")
|
||||||
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
|
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
|
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
|
||||||
|
@ -29,7 +29,6 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/deployment:go_default_library",
|
"//test/e2e/framework/deployment:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
"//test/e2e/framework/statefulset:go_default_library",
|
"//test/e2e/framework/statefulset:go_default_library",
|
||||||
"//test/e2e/upgrades:go_default_library",
|
"//test/e2e/upgrades:go_default_library",
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -81,7 +80,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
ginkgo.By("Creating a DaemonSet")
|
ginkgo.By("Creating a DaemonSet")
|
||||||
var err error
|
var err error
|
||||||
if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
|
if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
|
||||||
e2elog.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
|
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Waiting for DaemonSet pods to become ready")
|
ginkgo.By("Waiting for DaemonSet pods to become ready")
|
||||||
@ -114,7 +113,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
|
|||||||
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
|
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if !res {
|
if !res {
|
||||||
e2elog.Failf("expected DaemonSet pod to be running on all nodes, it was not")
|
framework.Failf("expected DaemonSet pod to be running on all nodes, it was not")
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet resource itself should be good
|
// DaemonSet resource itself should be good
|
||||||
@ -122,7 +121,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
|
|||||||
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
|
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if !res {
|
if !res {
|
||||||
e2elog.Failf("expected DaemonSet to be in a good state, it was not")
|
framework.Failf("expected DaemonSet to be in a good state, it was not")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,7 +134,7 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m
|
|||||||
nodeNames := make([]string, 0)
|
nodeNames := make([]string, 0)
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
if len(node.Spec.Taints) != 0 {
|
if len(node.Spec.Taints) != 0 {
|
||||||
e2elog.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name)
|
framework.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name)
|
||||||
}
|
}
|
||||||
// DaemonSet Pods are expected to run on all the nodes in e2e.
|
// DaemonSet Pods are expected to run on all the nodes in e2e.
|
||||||
nodeNames = append(nodeNames, node.Name)
|
nodeNames = append(nodeNames, node.Name)
|
||||||
@ -156,11 +155,11 @@ func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet ma
|
|||||||
nodesToPodCount := make(map[string]int)
|
nodesToPodCount := make(map[string]int)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if controller.IsPodActive(&pod) {
|
if controller.IsPodActive(&pod) {
|
||||||
e2elog.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName)
|
framework.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName)
|
||||||
nodesToPodCount[pod.Spec.NodeName]++
|
nodesToPodCount[pod.Spec.NodeName]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e2elog.Logf("nodesToPodCount: %v", nodesToPodCount)
|
framework.Logf("nodesToPodCount: %v", nodesToPodCount)
|
||||||
|
|
||||||
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
||||||
for _, nodeName := range nodeNames {
|
for _, nodeName := range nodeNames {
|
||||||
|
@ -32,7 +32,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/version"
|
"k8s.io/apimachinery/pkg/util/version"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
)
|
)
|
||||||
@ -90,13 +89,13 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if _, err := t.listUsers(); err != nil {
|
if _, err := t.listUsers(); err != nil {
|
||||||
e2elog.Logf("Service endpoint is up but isn't responding")
|
framework.Logf("Service endpoint is up but isn't responding")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
e2elog.Logf("Service endpoint is up")
|
framework.Logf("Service endpoint is up")
|
||||||
|
|
||||||
ginkgo.By("Adding 2 dummy users")
|
ginkgo.By("Adding 2 dummy users")
|
||||||
err = t.addUser("Alice")
|
err = t.addUser("Alice")
|
||||||
@ -177,7 +176,7 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
|||||||
go wait.Until(func() {
|
go wait.Until(func() {
|
||||||
writeAttempts++
|
writeAttempts++
|
||||||
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
|
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
|
||||||
e2elog.Logf("Unable to add user: %v", err)
|
framework.Logf("Unable to add user: %v", err)
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
errors[err.Error()]++
|
errors[err.Error()]++
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
@ -189,7 +188,7 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
|||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
users, err := t.listUsers()
|
users, err := t.listUsers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Could not retrieve users: %v", err)
|
framework.Logf("Could not retrieve users: %v", err)
|
||||||
failures++
|
failures++
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
errors[err.Error()]++
|
errors[err.Error()]++
|
||||||
@ -199,14 +198,14 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
|||||||
success++
|
success++
|
||||||
lastUserCount = len(users)
|
lastUserCount = len(users)
|
||||||
}, 10*time.Millisecond, done)
|
}, 10*time.Millisecond, done)
|
||||||
e2elog.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
||||||
|
|
||||||
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
||||||
ratio := float64(success) / float64(success+failures)
|
ratio := float64(success) / float64(success+failures)
|
||||||
e2elog.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
||||||
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
||||||
e2elog.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
||||||
e2elog.Logf("Errors: %v", errors)
|
framework.Logf("Errors: %v", errors)
|
||||||
// TODO(maisem): tweak this value once we have a few test runs.
|
// TODO(maisem): tweak this value once we have a few test runs.
|
||||||
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
@ -21,12 +21,11 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConfigMapUpgradeTest tests that a ConfigMap is available before and after
|
// ConfigMapUpgradeTest tests that a ConfigMap is available before and after
|
||||||
@ -59,7 +58,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
ginkgo.By("Creating a ConfigMap")
|
ginkgo.By("Creating a ConfigMap")
|
||||||
var err error
|
var err error
|
||||||
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil {
|
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil {
|
||||||
e2elog.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
|
framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Making sure the ConfigMap is consumable")
|
ginkgo.By("Making sure the ConfigMap is consumable")
|
||||||
|
@ -32,7 +32,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/version"
|
"k8s.io/apimachinery/pkg/util/version"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
)
|
)
|
||||||
@ -85,13 +84,13 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if _, err := t.listUsers(); err != nil {
|
if _, err := t.listUsers(); err != nil {
|
||||||
e2elog.Logf("Service endpoint is up but isn't responding")
|
framework.Logf("Service endpoint is up but isn't responding")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
e2elog.Logf("Service endpoint is up")
|
framework.Logf("Service endpoint is up")
|
||||||
|
|
||||||
ginkgo.By("Adding 2 dummy users")
|
ginkgo.By("Adding 2 dummy users")
|
||||||
err = t.addUser("Alice")
|
err = t.addUser("Alice")
|
||||||
@ -165,7 +164,7 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
|
|||||||
go wait.Until(func() {
|
go wait.Until(func() {
|
||||||
writeAttempts++
|
writeAttempts++
|
||||||
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
|
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
|
||||||
e2elog.Logf("Unable to add user: %v", err)
|
framework.Logf("Unable to add user: %v", err)
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
errors[err.Error()]++
|
errors[err.Error()]++
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
@ -177,7 +176,7 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
|
|||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
users, err := t.listUsers()
|
users, err := t.listUsers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Could not retrieve users: %v", err)
|
framework.Logf("Could not retrieve users: %v", err)
|
||||||
failures++
|
failures++
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
errors[err.Error()]++
|
errors[err.Error()]++
|
||||||
@ -187,14 +186,14 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
|
|||||||
success++
|
success++
|
||||||
lastUserCount = len(users)
|
lastUserCount = len(users)
|
||||||
}, 10*time.Millisecond, done)
|
}, 10*time.Millisecond, done)
|
||||||
e2elog.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
|
||||||
|
|
||||||
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
|
||||||
ratio := float64(success) / float64(success+failures)
|
ratio := float64(success) / float64(success+failures)
|
||||||
e2elog.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
|
||||||
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
ratio = float64(t.successfulWrites) / float64(writeAttempts)
|
||||||
e2elog.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
|
||||||
e2elog.Logf("Errors: %v", errors)
|
framework.Logf("Errors: %v", errors)
|
||||||
// TODO(maisem): tweak this value once we have a few test runs.
|
// TODO(maisem): tweak this value once we have a few test runs.
|
||||||
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -110,12 +109,12 @@ func (t *KubeProxyDowngradeTest) Teardown(f *framework.Framework) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
|
func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
|
||||||
e2elog.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout)
|
framework.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout)
|
||||||
|
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
pods, err := getKubeProxyStaticPods(c)
|
pods, err := getKubeProxyStaticPods(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to get kube-proxy static pods: %v", err)
|
framework.Logf("Failed to get kube-proxy static pods: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,7 +126,7 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if numberkubeProxyPods != numberSchedulableNodes {
|
if numberkubeProxyPods != numberSchedulableNodes {
|
||||||
e2elog.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items))
|
framework.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -140,17 +139,17 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
|
func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
|
||||||
e2elog.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout)
|
framework.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout)
|
||||||
|
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
pods, err := getKubeProxyStaticPods(c)
|
pods, err := getKubeProxyStaticPods(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to get kube-proxy static pods: %v", err)
|
framework.Logf("Failed to get kube-proxy static pods: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pods.Items) != 0 {
|
if len(pods.Items) != 0 {
|
||||||
e2elog.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items))
|
framework.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -163,24 +162,24 @@ func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
|
func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
|
||||||
e2elog.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout)
|
framework.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout)
|
||||||
|
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
daemonSets, err := getKubeProxyDaemonSet(c)
|
daemonSets, err := getKubeProxyDaemonSet(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to get kube-proxy DaemonSet: %v", err)
|
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(daemonSets.Items) != 1 {
|
if len(daemonSets.Items) != 1 {
|
||||||
e2elog.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items))
|
framework.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
|
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
|
||||||
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
|
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
|
||||||
if numberkubeProxyPods != numberSchedulableNodes {
|
if numberkubeProxyPods != numberSchedulableNodes {
|
||||||
e2elog.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)
|
framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -193,17 +192,17 @@ func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error {
|
func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error {
|
||||||
e2elog.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout)
|
framework.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout)
|
||||||
|
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
daemonSets, err := getKubeProxyDaemonSet(c)
|
daemonSets, err := getKubeProxyDaemonSet(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to get kube-proxy DaemonSet: %v", err)
|
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(daemonSets.Items) != 0 {
|
if len(daemonSets.Items) != 0 {
|
||||||
e2elog.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items))
|
framework.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -32,7 +32,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/version"
|
"k8s.io/apimachinery/pkg/util/version"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
)
|
)
|
||||||
@ -100,13 +99,13 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if _, err := t.countNames(); err != nil {
|
if _, err := t.countNames(); err != nil {
|
||||||
e2elog.Logf("Service endpoint is up but isn't responding")
|
framework.Logf("Service endpoint is up but isn't responding")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
e2elog.Logf("Service endpoint is up")
|
framework.Logf("Service endpoint is up")
|
||||||
|
|
||||||
ginkgo.By("Adding 2 names to the database")
|
ginkgo.By("Adding 2 names to the database")
|
||||||
err = t.addName(strconv.Itoa(t.nextWrite))
|
err = t.addName(strconv.Itoa(t.nextWrite))
|
||||||
@ -128,7 +127,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
|
|||||||
go wait.Until(func() {
|
go wait.Until(func() {
|
||||||
_, err := t.countNames()
|
_, err := t.countNames()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Error while trying to read data: %v", err)
|
framework.Logf("Error while trying to read data: %v", err)
|
||||||
readFailure++
|
readFailure++
|
||||||
} else {
|
} else {
|
||||||
readSuccess++
|
readSuccess++
|
||||||
@ -138,7 +137,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
|
|||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
err := t.addName(strconv.Itoa(t.nextWrite))
|
err := t.addName(strconv.Itoa(t.nextWrite))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Error while trying to write data: %v", err)
|
framework.Logf("Error while trying to write data: %v", err)
|
||||||
writeFailure++
|
writeFailure++
|
||||||
} else {
|
} else {
|
||||||
writeSuccess++
|
writeSuccess++
|
||||||
@ -146,10 +145,10 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
|
|||||||
}, framework.Poll, done)
|
}, framework.Poll, done)
|
||||||
|
|
||||||
t.successfulWrites = writeSuccess
|
t.successfulWrites = writeSuccess
|
||||||
e2elog.Logf("Successful reads: %d", readSuccess)
|
framework.Logf("Successful reads: %d", readSuccess)
|
||||||
e2elog.Logf("Successful writes: %d", writeSuccess)
|
framework.Logf("Successful writes: %d", writeSuccess)
|
||||||
e2elog.Logf("Failed reads: %d", readFailure)
|
framework.Logf("Failed reads: %d", readFailure)
|
||||||
e2elog.Logf("Failed writes: %d", writeFailure)
|
framework.Logf("Failed writes: %d", writeFailure)
|
||||||
|
|
||||||
// TODO: Not sure what the ratio defining a successful test run should be. At time of writing the
|
// TODO: Not sure what the ratio defining a successful test run should be. At time of writing the
|
||||||
// test, failures only seem to happen when a race condition occurs (read/write starts, doesn't
|
// test, failures only seem to happen when a race condition occurs (read/write starts, doesn't
|
||||||
@ -158,10 +157,10 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
|
|||||||
readRatio := float64(readSuccess) / float64(readSuccess+readFailure)
|
readRatio := float64(readSuccess) / float64(readSuccess+readFailure)
|
||||||
writeRatio := float64(writeSuccess) / float64(writeSuccess+writeFailure)
|
writeRatio := float64(writeSuccess) / float64(writeSuccess+writeFailure)
|
||||||
if readRatio < 0.75 {
|
if readRatio < 0.75 {
|
||||||
e2elog.Failf("Too many failures reading data. Success ratio: %f", readRatio)
|
framework.Failf("Too many failures reading data. Success ratio: %f", readRatio)
|
||||||
}
|
}
|
||||||
if writeRatio < 0.75 {
|
if writeRatio < 0.75 {
|
||||||
e2elog.Failf("Too many failures writing data. Success ratio: %f", writeRatio)
|
framework.Failf("Too many failures writing data. Success ratio: %f", writeRatio)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -57,7 +56,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
ginkgo.By("Creating a secret")
|
ginkgo.By("Creating a secret")
|
||||||
var err error
|
var err error
|
||||||
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil {
|
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil {
|
||||||
e2elog.Failf("unable to create test secret %s: %v", t.secret.Name, err)
|
framework.Failf("unable to create test secret %s: %v", t.secret.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Making sure the secret is consumable")
|
ginkgo.By("Making sure the secret is consumable")
|
||||||
|
@ -18,7 +18,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/pv:go_default_library",
|
"//test/e2e/framework/pv:go_default_library",
|
||||||
"//test/e2e/storage/utils:go_default_library",
|
"//test/e2e/storage/utils:go_default_library",
|
||||||
|
@ -17,15 +17,14 @@ limitations under the License.
|
|||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||||
|
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PersistentVolumeUpgradeTest test that a pv is available before and after a cluster upgrade.
|
// PersistentVolumeUpgradeTest test that a pv is available before and after a cluster upgrade.
|
||||||
@ -75,7 +74,7 @@ func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan s
|
|||||||
func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
|
func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
|
||||||
errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, nil, t.pvc)
|
errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, nil, t.pvc)
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
e2elog.Failf("Failed to delete 1 or more PVs/PVCs. Errors: %v", utilerrors.NewAggregate(errs))
|
framework.Failf("Failed to delete 1 or more PVs/PVCs. Errors: %v", utilerrors.NewAggregate(errs))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
Loading…
Reference in New Issue
Block a user