From db2c02715438e53f45f4d2d07f3af4b5bb4924d8 Mon Sep 17 00:00:00 2001 From: Jun Xiang Tee Date: Mon, 16 Oct 2017 12:10:04 -0700 Subject: [PATCH] add replicaset upgrade test --- test/e2e/framework/rs_util.go | 81 +++++++++------------ test/e2e/lifecycle/cluster_upgrade.go | 1 + test/e2e/upgrades/apps/BUILD | 2 + test/e2e/upgrades/apps/replicasets.go | 101 ++++++++++++++++++++++++++ 4 files changed, 139 insertions(+), 46 deletions(-) create mode 100644 test/e2e/upgrades/apps/replicasets.go diff --git a/test/e2e/framework/rs_util.go b/test/e2e/framework/rs_util.go index a7c6ce57172..21b9f751c1c 100644 --- a/test/e2e/framework/rs_util.go +++ b/test/e2e/framework/rs_util.go @@ -18,16 +18,14 @@ package framework import ( "fmt" - "time" . "github.com/onsi/ginkgo" + "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" - apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" testutils "k8s.io/kubernetes/test/utils" ) @@ -57,48 +55,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp return nil } -// Delete a ReplicaSet and all pods it spawned -func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error { - By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns)) - rs, err := clientset.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{}) - if err != nil { - if apierrs.IsNotFound(err) { - Logf("ReplicaSet %s was already deleted: %v", name, err) - return nil - } - return err - } - startTime := time.Now() - err = clientset.ExtensionsV1beta1().ReplicaSets(ns).Delete(name, &metav1.DeleteOptions{}) - if apierrs.IsNotFound(err) { - Logf("ReplicaSet %s was already deleted: %v", name, err) - return nil - } - deleteRSTime := time.Now().Sub(startTime) - Logf("Deleting RS %s took: %v", name, deleteRSTime) - if err == nil { - err = waitForReplicaSetPodsGone(clientset, rs) - } - terminatePodTime := time.Now().Sub(startTime) - deleteRSTime - Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime) - return err -} - -// waitForReplicaSetPodsGone waits until there are no pods reported under a -// ReplicaSet selector (because the pods have completed termination). -func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) error { - return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) { - selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) - ExpectNoError(err) - options := metav1.ListOptions{LabelSelector: selector.String()} - if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 { - return true, nil - } - return false, nil - }) -} - -// WaitForReadyReplicaSet waits until the replica set has all of its replicas ready. +// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready. func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) { rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{}) @@ -108,7 +65,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil }) if err == wait.ErrWaitTimeout { - err = fmt.Errorf("replica set %q never became ready", name) + err = fmt.Errorf("replicaset %q never became ready", name) } return err } @@ -119,3 +76,35 @@ func RunReplicaSet(config testutils.ReplicaSetConfig) error { config.ContainerDumpFunc = LogFailedContainers return testutils.RunReplicaSet(config) } + +func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *extensions.ReplicaSet { + return &extensions.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "ReplicaSet", + APIVersion: "extensions/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: extensions.ReplicaSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: podLabels, + }, + Replicas: &replicas, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: imageName, + Image: image, + }, + }, + }, + }, + }, + } +} diff --git a/test/e2e/lifecycle/cluster_upgrade.go b/test/e2e/lifecycle/cluster_upgrade.go index d52855d4857..6986be69c7d 100644 --- a/test/e2e/lifecycle/cluster_upgrade.go +++ b/test/e2e/lifecycle/cluster_upgrade.go @@ -42,6 +42,7 @@ import ( var upgradeTests = []upgrades.Test{ &upgrades.ServiceUpgradeTest{}, &upgrades.SecretUpgradeTest{}, + &apps.ReplicaSetUpgradeTest{}, &apps.StatefulSetUpgradeTest{}, &apps.DeploymentUpgradeTest{}, &apps.JobUpgradeTest{}, diff --git a/test/e2e/upgrades/apps/BUILD b/test/e2e/upgrades/apps/BUILD index 65a271b188c..be4e825dd7d 100644 --- a/test/e2e/upgrades/apps/BUILD +++ b/test/e2e/upgrades/apps/BUILD @@ -11,6 +11,7 @@ go_library( "daemonsets.go", "deployments.go", "job.go", + "replicasets.go", "statefulset.go", ], deps = [ @@ -28,6 +29,7 @@ go_library( "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], ) diff --git a/test/e2e/upgrades/apps/replicasets.go b/test/e2e/upgrades/apps/replicasets.go new file mode 100644 index 00000000000..a9cea7555a0 --- /dev/null +++ b/test/e2e/upgrades/apps/replicasets.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upgrades + +import ( + "fmt" + "time" + + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/upgrades" + + . "github.com/onsi/ginkgo" + imageutils "k8s.io/kubernetes/test/utils/image" +) + +const ( + interval = 10 * time.Second + timeout = 5 * time.Minute + rsName = "rs" + scaleNum = 2 +) + +// TODO: Test that the replicaset stays available during master (and maybe +// node and cluster upgrades). + +// ReplicaSetUpgradeTest tests that a replicaset survives upgrade. +type ReplicaSetUpgradeTest struct { + UID types.UID +} + +func (ReplicaSetUpgradeTest) Name() string { return "[sig-apps] replicaset-upgrade" } + +func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) { + c := f.ClientSet + ns := f.Namespace.Name + nginxImage := imageutils.GetE2EImage(imageutils.NginxSlim) + + By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns)) + replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage) + rs, err := c.Extensions().ReplicaSets(ns).Create(replicaSet) + framework.ExpectNoError(err) + + By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName)) + framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName)) + + r.UID = rs.UID +} + +// Test checks whether the replicasets are the same after an upgrade. +func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { + c := f.ClientSet + ns := f.Namespace.Name + rsClient := c.Extensions().ReplicaSets(ns) + + // Block until upgrade is done + By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName)) + <-done + + // Verify the RS is the same (survives) after the upgrade + By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName)) + upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{}) + framework.ExpectNoError(err) + if upgradedRS.UID != r.UID { + framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID)) + } + + By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName)) + framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName)) + + // Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready + By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum)) + _, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *extensions.ReplicaSet) { + *rs.Spec.Replicas = scaleNum + }) + framework.ExpectNoError(err) + + By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName)) + framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName)) +} + +// Teardown cleans up any remaining resources. +func (r *ReplicaSetUpgradeTest) Teardown(f *framework.Framework) { + // rely on the namespace deletion to clean up everything +}