mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #52449 from crimsonfaith91/rs-upgrade
Automatic merge from submit-queue (batch tested with PRs 53106, 52193, 51250, 52449, 53861). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. add replicaset upgrade test **What this PR does / why we need it**: This PR adds existing replicaset upgrade test. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: xref #52118 **Release note**: ```release-note NONE ```
This commit is contained in:
commit
2956c16328
@ -18,16 +18,14 @@ package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
@ -57,48 +55,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete a ReplicaSet and all pods it spawned
|
||||
func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
|
||||
rs, err := clientset.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("ReplicaSet %s was already deleted: %v", name, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
startTime := time.Now()
|
||||
err = clientset.ExtensionsV1beta1().ReplicaSets(ns).Delete(name, &metav1.DeleteOptions{})
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("ReplicaSet %s was already deleted: %v", name, err)
|
||||
return nil
|
||||
}
|
||||
deleteRSTime := time.Now().Sub(startTime)
|
||||
Logf("Deleting RS %s took: %v", name, deleteRSTime)
|
||||
if err == nil {
|
||||
err = waitForReplicaSetPodsGone(clientset, rs)
|
||||
}
|
||||
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
|
||||
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
|
||||
return err
|
||||
}
|
||||
|
||||
// waitForReplicaSetPodsGone waits until there are no pods reported under a
|
||||
// ReplicaSet selector (because the pods have completed termination).
|
||||
func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) error {
|
||||
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
ExpectNoError(err)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForReadyReplicaSet waits until the replica set has all of its replicas ready.
|
||||
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
|
||||
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
@ -108,7 +65,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("replica set %q never became ready", name)
|
||||
err = fmt.Errorf("replicaset %q never became ready", name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -119,3 +76,35 @@ func RunReplicaSet(config testutils.ReplicaSetConfig) error {
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunReplicaSet(config)
|
||||
}
|
||||
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ import (
|
||||
var upgradeTests = []upgrades.Test{
|
||||
&upgrades.ServiceUpgradeTest{},
|
||||
&upgrades.SecretUpgradeTest{},
|
||||
&apps.ReplicaSetUpgradeTest{},
|
||||
&apps.StatefulSetUpgradeTest{},
|
||||
&apps.DeploymentUpgradeTest{},
|
||||
&apps.JobUpgradeTest{},
|
||||
|
@ -11,6 +11,7 @@ go_library(
|
||||
"daemonsets.go",
|
||||
"deployments.go",
|
||||
"job.go",
|
||||
"replicasets.go",
|
||||
"statefulset.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/upgrades/apps",
|
||||
@ -29,6 +30,7 @@ go_library(
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
101
test/e2e/upgrades/apps/replicasets.go
Normal file
101
test/e2e/upgrades/apps/replicasets.go
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package upgrades
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
interval = 10 * time.Second
|
||||
timeout = 5 * time.Minute
|
||||
rsName = "rs"
|
||||
scaleNum = 2
|
||||
)
|
||||
|
||||
// TODO: Test that the replicaset stays available during master (and maybe
|
||||
// node and cluster upgrades).
|
||||
|
||||
// ReplicaSetUpgradeTest tests that a replicaset survives upgrade.
|
||||
type ReplicaSetUpgradeTest struct {
|
||||
UID types.UID
|
||||
}
|
||||
|
||||
func (ReplicaSetUpgradeTest) Name() string { return "[sig-apps] replicaset-upgrade" }
|
||||
|
||||
func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
nginxImage := imageutils.GetE2EImage(imageutils.NginxSlim)
|
||||
|
||||
By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
|
||||
replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
|
||||
rs, err := c.Extensions().ReplicaSets(ns).Create(replicaSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
|
||||
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
|
||||
|
||||
r.UID = rs.UID
|
||||
}
|
||||
|
||||
// Test checks whether the replicasets are the same after an upgrade.
|
||||
func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
rsClient := c.Extensions().ReplicaSets(ns)
|
||||
|
||||
// Block until upgrade is done
|
||||
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName))
|
||||
<-done
|
||||
|
||||
// Verify the RS is the same (survives) after the upgrade
|
||||
By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName))
|
||||
upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if upgradedRS.UID != r.UID {
|
||||
framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID))
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
|
||||
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
|
||||
|
||||
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
|
||||
By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
|
||||
_, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *extensions.ReplicaSet) {
|
||||
*rs.Spec.Replicas = scaleNum
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
|
||||
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
|
||||
}
|
||||
|
||||
// Teardown cleans up any remaining resources.
|
||||
func (r *ReplicaSetUpgradeTest) Teardown(f *framework.Framework) {
|
||||
// rely on the namespace deletion to clean up everything
|
||||
}
|
Loading…
Reference in New Issue
Block a user