mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
convert testScaledRolloutDeployment e2e test to integration test
This commit is contained in:
parent
849d7f8595
commit
25469e9b44
@ -69,7 +69,6 @@ go_library(
|
|||||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
|
||||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -35,7 +35,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
|
||||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
@ -87,10 +86,6 @@ var _ = SIGDescribe("Deployment", func() {
|
|||||||
It("deployment should support rollback", func() {
|
It("deployment should support rollback", func() {
|
||||||
testRollbackDeployment(f)
|
testRollbackDeployment(f)
|
||||||
})
|
})
|
||||||
It("scaled rollout deployment should not block on annotation check", func() {
|
|
||||||
testScaledRolloutDeployment(f)
|
|
||||||
})
|
|
||||||
|
|
||||||
It("iterative rollouts should eventually progress", func() {
|
It("iterative rollouts should eventually progress", func() {
|
||||||
testIterativeDeployments(f)
|
testIterativeDeployments(f)
|
||||||
})
|
})
|
||||||
@ -621,159 +616,6 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
func testScaledRolloutDeployment(f *framework.Framework) {
|
|
||||||
ns := f.Namespace.Name
|
|
||||||
c := f.ClientSet
|
|
||||||
|
|
||||||
podLabels := map[string]string{"name": NginxImageName}
|
|
||||||
replicas := int32(10)
|
|
||||||
|
|
||||||
// Create a nginx deployment.
|
|
||||||
deploymentName := "nginx"
|
|
||||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
|
||||||
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
|
|
||||||
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
|
||||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
|
||||||
|
|
||||||
framework.Logf("Creating deployment %q", deploymentName)
|
|
||||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
|
||||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Verify that the required pods have come up.
|
|
||||||
framework.Logf("Waiting for all required pods to come up")
|
|
||||||
err = framework.VerifyPodsRunning(f.ClientSet, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
|
||||||
|
|
||||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
|
||||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
first, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Update the deployment with a non-existent image so that the new replica set will be blocked.
|
|
||||||
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
|
||||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
|
||||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
|
||||||
})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
|
||||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
|
|
||||||
Expect(fmt.Errorf("Observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
framework.Logf("Checking that the replica sets for %q are synced", deploymentName)
|
|
||||||
second, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
first, err = c.ExtensionsV1beta1().ReplicaSets(first.Namespace).Get(first.Name, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
firstCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), first)
|
|
||||||
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, firstCond)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
secondCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), second)
|
|
||||||
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, secondCond)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.Logf("Updating the size (up) and template at the same time for deployment %q", deploymentName)
|
|
||||||
newReplicas := int32(20)
|
|
||||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
|
||||||
update.Spec.Replicas = &newReplicas
|
|
||||||
update.Spec.Template.Spec.Containers[0].Image = NautilusImage
|
|
||||||
})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
|
|
||||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
oldRSs, _, rs, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
for _, rs := range append(oldRSs, rs) {
|
|
||||||
framework.Logf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name)
|
|
||||||
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
|
|
||||||
if !ok || desired == *(deployment.Spec.Replicas) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the deployment with a non-existent image so that the new replica set will be blocked.
|
|
||||||
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
|
||||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
|
||||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
|
||||||
})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
|
||||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) {
|
|
||||||
Expect(fmt.Errorf("Observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
framework.Logf("Checking that the replica sets for %q are synced", deploymentName)
|
|
||||||
oldRs, err := c.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
newRs, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
oldCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), oldRs)
|
|
||||||
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, oldCond)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
newCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), newRs)
|
|
||||||
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, newCond)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.Logf("Updating the size (down) and template at the same time for deployment %q", deploymentName)
|
|
||||||
newReplicas = int32(5)
|
|
||||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
|
||||||
update.Spec.Replicas = &newReplicas
|
|
||||||
update.Spec.Template.Spec.Containers[0].Image = KittenImage
|
|
||||||
})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
|
|
||||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
oldRSs, _, rs, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
for _, rs := range append(oldRSs, rs) {
|
|
||||||
framework.Logf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name)
|
|
||||||
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
|
|
||||||
if !ok || desired == *(deployment.Spec.Replicas) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func randomScale(d *extensions.Deployment, i int) {
|
func randomScale(d *extensions.Deployment, i int) {
|
||||||
switch r := rand.Float32(); {
|
switch r := rand.Float32(); {
|
||||||
case r < 0.3:
|
case r < 0.3:
|
||||||
@ -904,17 +746,6 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||||||
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
|
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
func replicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) wait.ConditionFunc {
|
|
||||||
desiredGeneration := replicaSet.Generation
|
|
||||||
return func() (bool, error) {
|
|
||||||
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(rs.Spec.Replicas), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDeploymentsControllerRef(f *framework.Framework) {
|
func testDeploymentsControllerRef(f *framework.Framework) {
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
c := f.ClientSet
|
c := f.ClientSet
|
||||||
@ -954,16 +785,6 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) func() (bool, error) {
|
|
||||||
return func() (bool, error) {
|
|
||||||
err := checkDeploymentReplicaSetsControllerRef(c, ns, uid, label)
|
|
||||||
if err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||||
rsList := listDeploymentReplicaSets(c, ns, label)
|
rsList := listDeploymentReplicaSets(c, ns, label)
|
||||||
for _, rs := range rsList.Items {
|
for _, rs := range rsList.Items {
|
||||||
|
@ -42,6 +42,7 @@ go_library(
|
|||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
|
@ -876,3 +876,195 @@ func TestOverlappingDeployments(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deployment should not block rollout when updating spec replica number and template at the same time.
|
||||||
|
func TestScaledRolloutDeployment(t *testing.T) {
|
||||||
|
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||||
|
defer closeFn()
|
||||||
|
name := "test-scaled-rollout-deployment"
|
||||||
|
ns := framework.CreateTestingNamespace(name, s, t)
|
||||||
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
|
stopCh := make(chan struct{})
|
||||||
|
defer close(stopCh)
|
||||||
|
informers.Start(stopCh)
|
||||||
|
go rm.Run(5, stopCh)
|
||||||
|
go dc.Run(5, stopCh)
|
||||||
|
|
||||||
|
// Create a deployment with rolling update strategy, max surge = 3, and max unavailable = 2
|
||||||
|
var err error
|
||||||
|
replicas := int32(10)
|
||||||
|
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
|
||||||
|
tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
||||||
|
tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||||
|
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create deployment %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if err = tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||||
|
t.Fatalf("deployment %q failed to complete: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record current replicaset before starting new rollout
|
||||||
|
firstRS, err := tester.expectNewReplicaSet()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the deployment with another new image but do not mark the pods as ready to block new replicaset
|
||||||
|
fakeImage2 := "fakeimage2"
|
||||||
|
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||||
|
update.Spec.Template.Spec.Containers[0].Image = fakeImage2
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed updating deployment %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if err = tester.waitForDeploymentRevisionAndImage("2", fakeImage2); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the deployment has minimum available replicas after 2nd rollout
|
||||||
|
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get deployment %q: %v", name, err)
|
||||||
|
}
|
||||||
|
minAvailableReplicas := deploymentutil.MinAvailable(tester.deployment)
|
||||||
|
if tester.deployment.Status.AvailableReplicas < minAvailableReplicas {
|
||||||
|
t.Fatalf("deployment %q does not have minimum number of available replicas after 2nd rollout", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for old replicaset of 1st rollout to have desired replicas
|
||||||
|
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(firstRS.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get replicaset %q: %v", firstRS.Name, err)
|
||||||
|
}
|
||||||
|
if err = tester.waitRSStable(firstRS); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for new replicaset of 2nd rollout to have desired replicas
|
||||||
|
secondRS, err := tester.expectNewReplicaSet()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = tester.waitRSStable(secondRS); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scale up the deployment and update its image to another new image simultaneously (this time marks all pods as ready)
|
||||||
|
newReplicas := int32(20)
|
||||||
|
fakeImage3 := "fakeimage3"
|
||||||
|
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||||
|
update.Spec.Replicas = &newReplicas
|
||||||
|
update.Spec.Template.Spec.Containers[0].Image = fakeImage3
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed updating deployment %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if err = tester.waitForDeploymentRevisionAndImage("3", fakeImage3); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||||
|
t.Fatalf("deployment %q failed to complete: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify every replicaset has correct desiredReplicas annotation after 3rd rollout
|
||||||
|
thirdRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed getting new revision 3 replicaset for deployment %q: %v", name, err)
|
||||||
|
}
|
||||||
|
rss := []*v1beta1.ReplicaSet{firstRS, secondRS, thirdRS}
|
||||||
|
for _, curRS := range rss {
|
||||||
|
curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err)
|
||||||
|
}
|
||||||
|
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(curRS)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("failed to retrieve desiredReplicas annotation for replicaset %q", curRS.Name)
|
||||||
|
}
|
||||||
|
if desired != *(tester.deployment.Spec.Replicas) {
|
||||||
|
t.Fatalf("unexpected desiredReplicas annotation for replicaset %q: expected %d, got %d", curRS.Name, *(tester.deployment.Spec.Replicas), desired)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the deployment with another new image but do not mark the pods as ready to block new replicaset
|
||||||
|
fakeImage4 := "fakeimage4"
|
||||||
|
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||||
|
update.Spec.Template.Spec.Containers[0].Image = fakeImage4
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed updating deployment %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if err = tester.waitForDeploymentRevisionAndImage("4", fakeImage4); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the deployment has minimum available replicas after 4th rollout
|
||||||
|
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get deployment %q: %v", name, err)
|
||||||
|
}
|
||||||
|
minAvailableReplicas = deploymentutil.MinAvailable(tester.deployment)
|
||||||
|
if tester.deployment.Status.AvailableReplicas < minAvailableReplicas {
|
||||||
|
t.Fatalf("deployment %q does not have minimum number of available replicas after 4th rollout", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for old replicaset of 3rd rollout to have desired replicas
|
||||||
|
thirdRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(thirdRS.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get replicaset %q: %v", thirdRS.Name, err)
|
||||||
|
}
|
||||||
|
if err = tester.waitRSStable(thirdRS); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for new replicaset of 4th rollout to have desired replicas
|
||||||
|
fourthRS, err := tester.expectNewReplicaSet()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = tester.waitRSStable(fourthRS); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scale down the deployment and update its image to another new image simultaneously (this time marks all pods as ready)
|
||||||
|
newReplicas = int32(5)
|
||||||
|
fakeImage5 := "fakeimage5"
|
||||||
|
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||||
|
update.Spec.Replicas = &newReplicas
|
||||||
|
update.Spec.Template.Spec.Containers[0].Image = fakeImage5
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed updating deployment %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if err = tester.waitForDeploymentRevisionAndImage("5", fakeImage5); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||||
|
t.Fatalf("deployment %q failed to complete: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify every replicaset has correct desiredReplicas annotation after 5th rollout
|
||||||
|
fifthRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed getting new revision 5 replicaset for deployment %q: %v", name, err)
|
||||||
|
}
|
||||||
|
rss = []*v1beta1.ReplicaSet{thirdRS, fourthRS, fifthRS}
|
||||||
|
for _, curRS := range rss {
|
||||||
|
curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err)
|
||||||
|
}
|
||||||
|
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(curRS)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("failed to retrieve desiredReplicas annotation for replicaset %q", curRS.Name)
|
||||||
|
}
|
||||||
|
if desired != *(tester.deployment.Spec.Replicas) {
|
||||||
|
t.Fatalf("unexpected desiredReplicas annotation for replicaset %q: expected %d, got %d", curRS.Name, *(tester.deployment.Spec.Replicas), desired)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/api/extensions/v1beta1"
|
"k8s.io/api/extensions/v1beta1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -80,7 +81,8 @@ func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
|
|||||||
Replicas: &replicas,
|
Replicas: &replicas,
|
||||||
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
|
Selector: &metav1.LabelSelector{MatchLabels: testLabels()},
|
||||||
Strategy: v1beta1.DeploymentStrategy{
|
Strategy: v1beta1.DeploymentStrategy{
|
||||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||||
|
RollingUpdate: new(v1beta1.RollingUpdateDeployment),
|
||||||
},
|
},
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -212,6 +214,11 @@ func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func intOrStrP(num int) *intstr.IntOrString {
|
||||||
|
intstr := intstr.FromInt(num)
|
||||||
|
return &intstr
|
||||||
|
}
|
||||||
|
|
||||||
// markUpdatedPodsReady manually marks updated Deployment pods status to ready,
|
// markUpdatedPodsReady manually marks updated Deployment pods status to ready,
|
||||||
// until the deployment is complete
|
// until the deployment is complete
|
||||||
func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) {
|
func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) {
|
||||||
@ -405,3 +412,7 @@ func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) {
|
|||||||
}
|
}
|
||||||
return ownedPods, nil
|
return ownedPods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *deploymentTester) waitRSStable(replicaset *v1beta1.ReplicaSet) error {
|
||||||
|
return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout)
|
||||||
|
}
|
||||||
|
@ -18,6 +18,7 @@ go_test(
|
|||||||
"//pkg/api/v1/pod:go_default_library",
|
"//pkg/api/v1/pod:go_default_library",
|
||||||
"//pkg/controller/replicaset:go_default_library",
|
"//pkg/controller/replicaset:go_default_library",
|
||||||
"//test/integration/framework:go_default_library",
|
"//test/integration/framework:go_default_library",
|
||||||
|
"//test/utils:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
@ -41,6 +41,7 @@ import (
|
|||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
testutil "k8s.io/kubernetes/test/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -217,15 +218,8 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.R
|
|||||||
|
|
||||||
// Verify .Status.Replicas is equal to .Spec.Replicas
|
// Verify .Status.Replicas is equal to .Spec.Replicas
|
||||||
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet) {
|
func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet) {
|
||||||
rsClient := clientSet.Extensions().ReplicaSets(rs.Namespace)
|
if err := testutil.WaitRSStable(t, clientSet, rs, interval, timeout); err != nil {
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
t.Fatal(err)
|
||||||
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return newRS.Status.Replicas == *rs.Spec.Replicas, nil
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("Failed to verify .Status.Replicas is equal to .Spec.Replicas for rs %s: %v", rs.Name, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ package utils
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
extensions "k8s.io/api/extensions/v1beta1"
|
extensions "k8s.io/api/extensions/v1beta1"
|
||||||
@ -50,3 +51,18 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string,
|
|||||||
}
|
}
|
||||||
return rs, pollErr
|
return rs, pollErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify .Status.Replicas is equal to .Spec.Replicas
|
||||||
|
func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *extensions.ReplicaSet, pollInterval, pollTimeout time.Duration) error {
|
||||||
|
desiredGeneration := rs.Generation
|
||||||
|
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||||
|
newRS, err := clientSet.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return newRS.Status.ObservedGeneration >= desiredGeneration && newRS.Status.Replicas == *rs.Spec.Replicas, nil
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("failed to verify .Status.Replicas is equal to .Spec.Replicas for replicaset %q: %v", rs.Name, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user