mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #54819 from janetkuo/dep-inte-rolling
Automatic merge from submit-queue (batch tested with PRs 54493, 52501, 55172, 54780, 54819). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add integration test for deployment rolling update, rollback, rollover **What this PR does / why we need it**: **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: ref #52113 **Special notes for your reviewer**: **Release note**: ```release-note NONE ```
This commit is contained in:
commit
77e5e2f9fc
@ -22,6 +22,7 @@ go_test(
|
|||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/api/extensions/v1beta1"
|
"k8s.io/api/extensions/v1beta1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
@ -79,6 +80,120 @@ func TestNewDeployment(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deployments should support roll out, roll back, and roll over
|
||||||
|
func TestDeploymentRollingUpdate(t *testing.T) {
|
||||||
|
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||||
|
defer closeFn()
|
||||||
|
name := "test-rolling-update-deployment"
|
||||||
|
ns := framework.CreateTestingNamespace(name, s, t)
|
||||||
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
|
// Start informer and controllers
|
||||||
|
stopCh := make(chan struct{})
|
||||||
|
defer close(stopCh)
|
||||||
|
informers.Start(stopCh)
|
||||||
|
go rm.Run(5, stopCh)
|
||||||
|
go dc.Run(5, stopCh)
|
||||||
|
|
||||||
|
replicas := int32(20)
|
||||||
|
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
|
||||||
|
tester.deployment.Spec.MinReadySeconds = 4
|
||||||
|
quarter := intstr.FromString("25%")
|
||||||
|
tester.deployment.Spec.Strategy.RollingUpdate = &v1beta1.RollingUpdateDeployment{
|
||||||
|
MaxUnavailable: &quarter,
|
||||||
|
MaxSurge: &quarter,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a deployment.
|
||||||
|
var err error
|
||||||
|
tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||||
|
}
|
||||||
|
oriImage := tester.deployment.Spec.Template.Spec.Containers[0].Image
|
||||||
|
if err := tester.waitForDeploymentRevisionAndImage("1", oriImage); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Roll out a new image.
|
||||||
|
image := "new-image"
|
||||||
|
if oriImage == image {
|
||||||
|
t.Fatalf("bad test setup, deployment %s roll out with the same image", tester.deployment.Name)
|
||||||
|
}
|
||||||
|
imageFn := func(update *v1beta1.Deployment) {
|
||||||
|
update.Spec.Template.Spec.Containers[0].Image = image
|
||||||
|
}
|
||||||
|
tester.deployment, err = tester.updateDeployment(imageFn)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to update deployment %s: %v", tester.deployment.Name, err)
|
||||||
|
}
|
||||||
|
if err := tester.waitForDeploymentRevisionAndImage("2", image); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Roll back to the last revision.
|
||||||
|
revision := int64(0)
|
||||||
|
rollback := newDeploymentRollback(tester.deployment.Name, nil, revision)
|
||||||
|
if err = c.ExtensionsV1beta1().Deployments(ns.Name).Rollback(rollback); err != nil {
|
||||||
|
t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err)
|
||||||
|
}
|
||||||
|
// Wait for the deployment to start rolling back
|
||||||
|
if err = tester.waitForDeploymentRollbackCleared(); err != nil {
|
||||||
|
t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err)
|
||||||
|
}
|
||||||
|
// Wait for the deployment to be rolled back to the template stored in revision 1 and rolled forward to revision 3.
|
||||||
|
if err := tester.waitForDeploymentRevisionAndImage("3", oriImage); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Roll over a deployment before the previous rolling update finishes.
|
||||||
|
image = "dont-finish"
|
||||||
|
imageFn = func(update *v1beta1.Deployment) {
|
||||||
|
update.Spec.Template.Spec.Containers[0].Image = image
|
||||||
|
}
|
||||||
|
tester.deployment, err = tester.updateDeployment(imageFn)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to update deployment %s: %v", tester.deployment.Name, err)
|
||||||
|
}
|
||||||
|
if err := tester.waitForDeploymentRevisionAndImage("4", image); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// We don't mark pods as ready so that rollout won't finish.
|
||||||
|
// Before the rollout finishes, trigger another rollout.
|
||||||
|
image = "rollover"
|
||||||
|
imageFn = func(update *v1beta1.Deployment) {
|
||||||
|
update.Spec.Template.Spec.Containers[0].Image = image
|
||||||
|
}
|
||||||
|
tester.deployment, err = tester.updateDeployment(imageFn)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to update deployment %s: %v", tester.deployment.Name, err)
|
||||||
|
}
|
||||||
|
if err := tester.waitForDeploymentRevisionAndImage("5", image); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.ExtensionsV1beta1())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err)
|
||||||
|
}
|
||||||
|
for _, oldRS := range allOldRSs {
|
||||||
|
if *oldRS.Spec.Replicas != 0 {
|
||||||
|
t.Errorf("expected old replicaset %s of deployment %s to have 0 replica, got %d", oldRS.Name, tester.deployment.Name, *oldRS.Spec.Replicas)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// selectors are IMMUTABLE for all API versions except apps/v1beta1 and extensions/v1beta1
|
// selectors are IMMUTABLE for all API versions except apps/v1beta1 and extensions/v1beta1
|
||||||
func TestDeploymentSelectorImmutability(t *testing.T) {
|
func TestDeploymentSelectorImmutability(t *testing.T) {
|
||||||
s, closeFn, c := dcSimpleSetup(t)
|
s, closeFn, c := dcSimpleSetup(t)
|
||||||
|
@ -19,6 +19,7 @@ package deployment
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -211,40 +212,50 @@ func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// markUpdatedPodsReady manually marks updated Deployment pods status to ready
|
// markUpdatedPodsReady manually marks updated Deployment pods status to ready,
|
||||||
func (d *deploymentTester) markUpdatedPodsReady() {
|
// until the deployment is complete
|
||||||
|
func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
ns := d.deployment.Namespace
|
ns := d.deployment.Namespace
|
||||||
var readyPods int32
|
|
||||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||||
readyPods = 0
|
// We're done when the deployment is complete
|
||||||
|
if completed, err := d.deploymentComplete(); err != nil {
|
||||||
|
return false, err
|
||||||
|
} else if completed {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
// Otherwise, mark remaining pods as ready
|
||||||
pods, err := d.listUpdatedPods()
|
pods, err := d.listUpdatedPods()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.t.Log(err)
|
d.t.Log(err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if len(pods) != int(*d.deployment.Spec.Replicas) {
|
d.t.Logf("%d/%d of deployment pods are created", len(pods), *d.deployment.Spec.Replicas)
|
||||||
d.t.Logf("%d/%d of deployment pods are created", len(pods), *d.deployment.Spec.Replicas)
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
for i := range pods {
|
for i := range pods {
|
||||||
pod := pods[i]
|
pod := pods[i]
|
||||||
if podutil.IsPodReady(&pod) {
|
if podutil.IsPodReady(&pod) {
|
||||||
readyPods++
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = markPodReady(d.c, ns, &pod); err != nil {
|
if err = markPodReady(d.c, ns, &pod); err != nil {
|
||||||
d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err)
|
d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err)
|
||||||
} else {
|
|
||||||
readyPods++
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return readyPods >= *d.deployment.Spec.Replicas, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.t.Fatalf("failed to mark updated Deployment pods to ready: %v", err)
|
d.t.Fatalf("failed to mark updated Deployment pods to ready: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *deploymentTester) deploymentComplete() (bool, error) {
|
||||||
|
latest, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return deploymentutil.DeploymentComplete(d.deployment, &latest.Status), nil
|
||||||
|
}
|
||||||
|
|
||||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||||
// Rolling update strategy should not be broken during a rolling update.
|
// Rolling update strategy should not be broken during a rolling update.
|
||||||
func (d *deploymentTester) waitForDeploymentCompleteAndCheckRolling() error {
|
func (d *deploymentTester) waitForDeploymentCompleteAndCheckRolling() error {
|
||||||
@ -262,28 +273,42 @@ func (d *deploymentTester) waitForDeploymentComplete() error {
|
|||||||
// while marking updated Deployment pods as ready at the same time.
|
// while marking updated Deployment pods as ready at the same time.
|
||||||
// Uses hard check to make sure rolling update strategy is not violated at any times.
|
// Uses hard check to make sure rolling update strategy is not violated at any times.
|
||||||
func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady() error {
|
func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady() error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
// Manually mark updated Deployment pods as ready in a separate goroutine
|
// Manually mark updated Deployment pods as ready in a separate goroutine
|
||||||
go d.markUpdatedPodsReady()
|
wg.Add(1)
|
||||||
|
go d.markUpdatedPodsReady(&wg)
|
||||||
|
|
||||||
// Wait for the Deployment status to complete while Deployment pods are becoming ready
|
// Wait for the Deployment status to complete while Deployment pods are becoming ready
|
||||||
err := d.waitForDeploymentCompleteAndCheckRolling()
|
err := d.waitForDeploymentCompleteAndCheckRolling()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err)
|
return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for goroutine to finish
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForDeploymentCompleteAndMarkPodsReady waits for the Deployment to complete
|
// waitForDeploymentCompleteAndMarkPodsReady waits for the Deployment to complete
|
||||||
// while marking updated Deployment pods as ready at the same time.
|
// while marking updated Deployment pods as ready at the same time.
|
||||||
func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error {
|
func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
// Manually mark updated Deployment pods as ready in a separate goroutine
|
// Manually mark updated Deployment pods as ready in a separate goroutine
|
||||||
go d.markUpdatedPodsReady()
|
wg.Add(1)
|
||||||
|
go d.markUpdatedPodsReady(&wg)
|
||||||
|
|
||||||
// Wait for the Deployment status to complete using soft check, while Deployment pods are becoming ready
|
// Wait for the Deployment status to complete using soft check, while Deployment pods are becoming ready
|
||||||
err := d.waitForDeploymentComplete()
|
err := d.waitForDeploymentComplete()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err)
|
return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for goroutine to finish
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user