Move Deployments to ReplicaSets and switch the Deployment selector to the new LabelSelector.

Update the Deployments' API types, defaulting code, conversions, helpers
and validation to use ReplicaSets instead of ReplicationControllers and
LabelSelector instead of map[string]string for selectors.

Also update the Deployment controller, registry, kubectl subcommands,
client listers package and e2e tests to use ReplicaSets and
LabelSelector for Deployments.
This commit is contained in:
Madhusudan.C.S
2016-01-19 16:40:18 -08:00
parent fce98f3cf2
commit 518f08aa7c
24 changed files with 853 additions and 735 deletions

View File

@@ -47,7 +47,7 @@ var _ = Describe("Deployment [Feature:Deployment]", func() {
It("RecreateDeployment should delete old pods and create new ones", func() {
testRecreateDeployment(f)
})
It("deployment should delete old rcs", func() {
It("deployment should delete old replica sets", func() {
testDeploymentCleanUpPolicy(f)
})
It("deployment should support rollover", func() {
@@ -59,22 +59,22 @@ var _ = Describe("Deployment [Feature:Deployment]", func() {
It("deployment should support rollback", func() {
testRollbackDeployment(f)
})
It("deployment should support rollback when there's RC with no revision", func() {
testRollbackDeploymentRCNoRevision(f)
It("deployment should support rollback when there's replica set with no revision", func() {
testRollbackDeploymentRSNoRevision(f)
})
})
func newRC(rcName string, replicas int, rcPodLabels map[string]string, imageName string, image string) *api.ReplicationController {
return &api.ReplicationController{
func newReplicaSet(rsName string, replicas int, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
return &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: rcName,
Name: rsName,
},
Spec: api.ReplicationControllerSpec{
Spec: extensions.ReplicaSetSpec{
Replicas: replicas,
Selector: rcPodLabels,
Selector: &extensions.LabelSelector{MatchLabels: rsPodLabels},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: rcPodLabels,
Labels: rsPodLabels,
},
Spec: api.PodSpec{
Containers: []api.Container{
@@ -96,7 +96,7 @@ func newDeployment(deploymentName string, replicas int, podLabels map[string]str
},
Spec: extensions.DeploymentSpec{
Replicas: replicas,
Selector: podLabels,
Selector: &extensions.LabelSelector{MatchLabels: podLabels},
Strategy: extensions.DeploymentStrategy{
Type: strategyType,
},
@@ -126,27 +126,27 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
}
}
// checkDeploymentRevision checks if the input deployment's and its new RC's revision and images are as expected.
func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *api.ReplicationController) {
// checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected.
func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Check revision of the new RC of this deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
// Check revision of the new replica set of this deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(newRC.Annotations).NotTo(Equal(nil))
Expect(newRC.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
Expect(newRS.Annotations).NotTo(Equal(nil))
Expect(newRS.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
// Check revision of This deployment
Expect(deployment.Annotations).NotTo(Equal(nil))
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
if len(imageName) > 0 {
// Check the image the new RC creates
Expect(newRC.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
Expect(newRC.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
// Check the image the new replica set creates
Expect(newRS.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
Expect(newRS.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
// Check the image the deployment creates
Expect(deployment.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
Expect(deployment.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
}
return deployment, newRC
return deployment, newRS
}
func testNewDeployment(f *Framework) {
@@ -161,7 +161,7 @@ func testNewDeployment(f *Framework) {
replicas := 1
Logf("Creating simple deployment %s", deploymentName)
d := newDeployment(deploymentName, replicas, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil)
d.Annotations = map[string]string{"test": "should-copy-to-RC", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-RC"}
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
_, err := c.Extensions().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
defer func() {
@@ -169,10 +169,10 @@ func testNewDeployment(f *Framework) {
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
// Check that deployment is created fine.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
@@ -191,12 +191,12 @@ func testNewDeployment(f *Framework) {
Expect(deployment.Status.UpdatedReplicas).Should(Equal(replicas))
// Check if it's updated to revision 1 correctly
_, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", "nginx", "nginx")
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", "nginx", "nginx")
// Check other annotations
Expect(newRC.Annotations["test"]).Should(Equal("should-copy-to-RC"))
Expect(newRC.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal(""))
Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-RC"))
Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-RC"))
Expect(newRS.Annotations["test"]).Should(Equal("should-copy-to-replica-set"))
Expect(newRS.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal(""))
Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-replica-set"))
Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-replica-set"))
}
func testRollingUpdateDeployment(f *Framework) {
@@ -207,18 +207,18 @@ func testRollingUpdateDeployment(f *Framework) {
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"}
rcPodLabels := map[string]string{
rsPodLabels := map[string]string{
"name": "sample-pod",
"pod": "nginx",
}
rcName := "nginx-controller"
rsName := "nginx-controller"
replicas := 3
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
_, err := c.Extensions().ReplicaSets(ns).Create(newReplicaSet(rsName, replicas, rsPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
@@ -237,10 +237,10 @@ func testRollingUpdateDeployment(f *Framework) {
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
@@ -258,24 +258,24 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-2"}
rcPodLabels := map[string]string{
rsPodLabels := map[string]string{
"name": "sample-pod-2",
"pod": "nginx",
}
rcName := "nginx-controller"
rsName := "nginx-controller"
replicas := 1
rcRevision := "3546343826724305832"
rsRevision := "3546343826724305832"
annotations := make(map[string]string)
annotations[deploymentutil.RevisionAnnotation] = rcRevision
rc := newRC(rcName, replicas, rcPodLabels, "nginx", "nginx")
rc.Annotations = annotations
annotations[deploymentutil.RevisionAnnotation] = rsRevision
rs := newRS(rsName, replicas, rsPodLabels, "nginx", "nginx")
rs.Annotations = annotations
_, err := c.Core().ReplicationControllers(ns).Create(rc)
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1)
@@ -294,10 +294,10 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
@@ -311,13 +311,14 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
Logf("error in listing events: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// There should be 2 events, one to scale up the new RC and then to scale down the old RC.
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down
// the old ReplicaSet.
Expect(len(events.Items)).Should(Equal(2))
newRC, err := deploymentutil.GetNewRC(*deployment, c)
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(newRC).NotTo(Equal(nil))
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up rc %s to 1", newRC.Name)))
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down rc %s to 0", rcName)))
Expect(newRS).NotTo(Equal(nil))
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 1", newRS.Name)))
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName)))
// Check if it's updated to revision 3546343826724305833 correctly
checkDeploymentRevision(c, ns, deploymentName, "3546343826724305833", "redis", "redis")
@@ -331,18 +332,18 @@ func testRecreateDeployment(f *Framework) {
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
rcPodLabels := map[string]string{
rsPodLabels := map[string]string{
"name": "sample-pod-3",
"pod": "nginx",
}
rcName := "nginx-controller"
rsName := "nginx-controller"
replicas := 3
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
@@ -361,10 +362,10 @@ func testRecreateDeployment(f *Framework) {
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
@@ -383,13 +384,13 @@ func testRecreateDeployment(f *Framework) {
Logf("error in listing events: %s", err)
Expect(err).NotTo(HaveOccurred())
}
// There should be 2 events, one to scale up the new RC and then to scale down the old RC.
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down the old ReplicaSet.
Expect(len(events.Items)).Should(Equal(2))
newRC, err := deploymentutil.GetNewRC(*deployment, c)
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(newRC).NotTo(Equal(nil))
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down rc %s to 0", rcName)))
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up rc %s to 3", newRC.Name)))
Expect(newRS).NotTo(Equal(nil))
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName)))
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 3", newRS.Name)))
// Check if it's updated to revision 1 correctly
checkDeploymentRevision(c, ns, deploymentName, "1", "redis", "redis")
@@ -402,15 +403,15 @@ func testDeploymentCleanUpPolicy(f *Framework) {
c := clientset.FromUnversionedClient(unversionedClient)
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
rcPodLabels := map[string]string{
rsPodLabels := map[string]string{
"name": "cleanup-pod",
"pod": "nginx",
}
rcName := "nginx-controller"
rsName := "nginx-controller"
replicas := 1
revisionHistoryLimit := new(int)
*revisionHistoryLimit = 0
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
@@ -430,13 +431,13 @@ func testDeploymentCleanUpPolicy(f *Framework) {
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
err = waitForDeploymentOldRCsNum(c, ns, deploymentName, *revisionHistoryLimit)
err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
Expect(err).NotTo(HaveOccurred())
}
@@ -450,21 +451,21 @@ func testRolloverDeployment(f *Framework) {
c := clientset.FromUnversionedClient(unversionedClient)
podName := "rollover-pod"
deploymentPodLabels := map[string]string{"name": podName}
rcPodLabels := map[string]string{
rsPodLabels := map[string]string{
"name": podName,
"pod": "nginx",
}
rcName := "nginx-controller"
rcReplicas := 4
_, err := c.Core().ReplicationControllers(ns).Create(newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx"))
rsName := "nginx-controller"
rsReplicas := 4
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, "nginx", "nginx"))
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Verify that the required pods have come up.
err = verifyPods(unversionedClient, ns, podName, false, rcReplicas)
err = verifyPods(unversionedClient, ns, podName, false, rsReplicas)
if err != nil {
Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
@@ -490,22 +491,22 @@ func testRolloverDeployment(f *Framework) {
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
}()
// Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Make sure the deployment starts to scale up and down RCs
// Make sure the deployment starts to scale up and down replica sets
waitForPartialEvents(unversionedClient, ns, deployment, 2)
// Check if it's updated to revision 1 correctly
_, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Before the deployment finishes, update the deployment to rollover the above 2 rcs and bring up redis pods.
// Before the deployment finishes, update the deployment to rollover the above 2 ReplicaSets and bring up redis pods.
// If the deployment already finished here, the test would fail. When this happens, increase its minReadySeconds or replicas to prevent it.
Expect(newRC.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas))
Expect(newRS.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas))
updatedDeploymentImage := "redis"
newDeployment.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImage
newDeployment.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
@@ -544,10 +545,10 @@ func testPausedDeployment(f *Framework) {
Expect(err).NotTo(HaveOccurred())
// Verify that there is no latest state realized for the new deployment.
rc, err := deploymentutil.GetNewRC(*deployment, c)
rs, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
if rc != nil {
err = fmt.Errorf("unexpected new rc/%s for deployment/%s", rc.Name, deployment.Name)
if rs != nil {
err = fmt.Errorf("unexpected new rs/%s for deployment/%s", rs.Name, deployment.Name)
Expect(err).NotTo(HaveOccurred())
}
@@ -557,27 +558,27 @@ func testPausedDeployment(f *Framework) {
Expect(err).NotTo(HaveOccurred())
opts := api.ListOptions{LabelSelector: labels.Set(deployment.Spec.Selector).AsSelector()}
w, err := c.Core().ReplicationControllers(ns).Watch(opts)
w, err := c.Extensions().ReplicaSets(ns).Watch(opts)
Expect(err).NotTo(HaveOccurred())
select {
case <-w.ResultChan():
// this is it
case <-time.After(time.Minute):
err = fmt.Errorf("expected a new rc to be created")
err = fmt.Errorf("expected a new replica set to be created")
Expect(err).NotTo(HaveOccurred())
}
// Pause the deployment and delete the replication controller.
// Pause the deployment and delete the replica set.
// The paused deployment shouldn't recreate a new one.
deployment.Spec.Paused = true
deployment.ResourceVersion = ""
deployment, err = c.Extensions().Deployments(ns).Update(deployment)
Expect(err).NotTo(HaveOccurred())
newRC, err := deploymentutil.GetNewRC(*deployment, c)
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
@@ -586,10 +587,10 @@ func testPausedDeployment(f *Framework) {
err = fmt.Errorf("deployment %q should be paused", deployment.Name)
Expect(err).NotTo(HaveOccurred())
}
shouldBeNil, err := deploymentutil.GetNewRC(*deployment, c)
shouldBeNil, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
if shouldBeNil != nil {
err = fmt.Errorf("deployment %q shouldn't have a rc but there is %q", deployment.Name, shouldBeNil.Name)
err = fmt.Errorf("deployment %q shouldn't have a replica set but there is %q", deployment.Name, shouldBeNil.Name)
Expect(err).NotTo(HaveOccurred())
}
}
@@ -618,14 +619,14 @@ func testRollbackDeployment(f *Framework) {
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
Expect(err).NotTo(HaveOccurred())
for _, oldRC := range oldRCs {
Expect(c.Core().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred())
for _, oldRS := range oldRSs {
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
}
}()
// Check that deployment is created fine.
@@ -689,36 +690,36 @@ func testRollbackDeployment(f *Framework) {
checkDeploymentRevision(c, ns, deploymentName, "4", updatedDeploymentImageName, updatedDeploymentImage)
}
// testRollbackDeploymentRCNoRevision tests that deployment supports rollback even when there's old RC without revision.
// An old RC without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision
// annotation to the old RC. Then rollback the deployment to last revision, and it should fail and emit related event.
// testRollbackDeploymentRSNoRevision tests that deployment supports rollback even when there's old replica set without revision.
// An old replica set without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision
// annotation to the old replica set. Then rollback the deployment to last revision, and it should fail and emit related event.
// Then update the deployment to v2 and rollback it to v1 should succeed and emit related event, now the deployment
// becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail and emit related event.
// Finally, rollback the deployment (v3) to v3 should be no-op and emit related event.
func testRollbackDeploymentRCNoRevision(f *Framework) {
func testRollbackDeploymentRSNoRevision(f *Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := clientset.FromUnversionedClient(f.Client)
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
rcPodLabels := map[string]string{
rsPodLabels := map[string]string{
"name": podName,
"pod": "nginx",
}
rcName := "nginx-controller"
rcReplicas := 0
rc := newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx")
rc.Annotations = make(map[string]string)
rc.Annotations["make"] = "difference"
_, err := c.Core().ReplicationControllers(ns).Create(rc)
rsName := "nginx-controller"
rsReplicas := 0
rs := newRS(rsName, rsReplicas, rsPodLabels, "nginx", "nginx")
rs.Annotations = make(map[string]string)
rs.Annotations["make"] = "difference"
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
defer func() {
Logf("deleting replication controller %s", rcName)
Expect(c.Core().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
Logf("deleting replica set %s", rsName)
Expect(c.Extensions().ReplicaSets(ns).Delete(rsName, nil)).NotTo(HaveOccurred())
}()
// Create a deployment to create nginx pods, which have different template than the rc created above.
// Create a deployment to create nginx pods, which have different template than the replica set created above.
deploymentName, deploymentImageName := "nginx-deployment", "nginx"
deploymentReplicas := 1
deploymentImage := "nginx"
@@ -732,14 +733,14 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
Expect(err).NotTo(HaveOccurred())
Logf("deleting deployment %s", deploymentName)
Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
// TODO: remove this once we can delete rcs with deployment
newRC, err := deploymentutil.GetNewRC(*deployment, c)
// TODO: remove this once we can delete replica sets with deployment
newRS, err := deploymentutil.GetNewReplicaSet(*deployment, c)
Expect(err).NotTo(HaveOccurred())
Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
Expect(c.Extensions().ReplicaSets(ns).Delete(newRS.Name, nil)).NotTo(HaveOccurred())
oldRSs, _, err := deploymentutil.GetOldReplicaSets(*deployment, c)
Expect(err).NotTo(HaveOccurred())
for _, oldRC := range oldRCs {
Expect(c.Core().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred())
for _, oldRS := range oldRSs {
Expect(c.Extensions().ReplicaSets(ns).Delete(oldRS.Name, nil)).NotTo(HaveOccurred())
}
}()
// Check that deployment is created fine.
@@ -761,9 +762,9 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
// Check if it's updated to revision 1 correctly
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Check that the rc we created still doesn't contain revision information
rc, err = c.Core().ReplicationControllers(ns).Get(rcName)
Expect(rc.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
// Check that the replica set we created still doesn't contain revision information
rs, err = c.Extensions().ReplicaSets(ns).Get(rsName)
Expect(rs.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
// Update the deploymentRollback to rollback to last revision
// Since there's only 1 revision in history, it should stay as revision 1