Switch test manifests to apps/v1, remove beta workloads calls

This commit is contained in:
Jordan Liggitt 2019-01-08 14:14:23 -05:00
parent 24f04b32c2
commit 6ca80760fd
23 changed files with 57 additions and 565 deletions

View File

@ -1,10 +1,13 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
labels:
name: nginx
spec:
selector:
matchLabels:
name: nginx1
replicas: 3
template:
metadata:

View File

@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx

View File

@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx

View File

@ -1,8 +1,13 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-depl
labels:
l1: l1
spec:
selector:
matchLabels:
l1: l1
template:
metadata:
labels:

View File

@ -1,16 +1,16 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-depl
# We expect this field to be defaulted to the new label l2
labels: null
spec:
# We expect this field to be defaulted to the new label l2
selector: null
selector:
matchLabels:
l1: l1
template:
metadata:
labels:
l2: l2
l1: l1
spec:
containers:
- name: nginx

View File

@ -1,7 +1,9 @@
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nginx
labels:
app: nginx-statefulset
spec:
selector:
matchLabels:

View File

@ -1,7 +1,9 @@
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nginx
labels:
app: nginx-statefulset
spec:
selector:
matchLabels:

View File

@ -434,9 +434,9 @@ func TestStorageVersionHashEqualities(t *testing.T) {
for _, r := range extList.APIResources {
if r.Name == "replicasets" {
extReplicasetHash = r.StorageVersionHash
}
}
assert.NotEmpty(extReplicasetHash)
}
}
resp, err = http.Get(server.URL + "/apis/apps/v1")
assert.Empty(err)
@ -445,9 +445,12 @@ func TestStorageVersionHashEqualities(t *testing.T) {
for _, r := range appsList.APIResources {
if r.Name == "replicasets" {
appsReplicasetHash = r.StorageVersionHash
assert.NotEmpty(appsReplicasetHash)
}
}
if len(extReplicasetHash) > 0 && len(appsReplicasetHash) > 0 {
assert.Equal(extReplicasetHash, appsReplicasetHash)
}
// Test 2: batch/v1/jobs and batch/v1beta1/cronjobs have different
// storage version hashes.

View File

@ -65,12 +65,7 @@ var GVRToStorageVersionHash = map[string]string{
"certificates.k8s.io/v1beta1/certificatesigningrequests": "UQh3YTCDIf0=",
"coordination.k8s.io/v1beta1/leases": "/sY7hl8ol1U=",
"coordination.k8s.io/v1/leases": "/sY7hl8ol1U=",
"extensions/v1beta1/daemonsets": "dd7pWHUlMKQ=",
"extensions/v1beta1/deployments": "8aSe+NMegvE=",
"extensions/v1beta1/ingresses": "ZOAfGflaKd0=",
"extensions/v1beta1/networkpolicies": "YpfwF18m1G8=",
"extensions/v1beta1/podsecuritypolicies": "khBLobUXkqA=",
"extensions/v1beta1/replicasets": "P1RzHs8/mWQ=",
"networking.k8s.io/v1/networkpolicies": "YpfwF18m1G8=",
"networking.k8s.io/v1beta1/ingresses": "ZOAfGflaKd0=",
"node.k8s.io/v1beta1/runtimeclasses": "8nMHWqj34s0=",
@ -97,14 +92,6 @@ var GVRToStorageVersionHash = map[string]string{
"apps/v1/deployments": "8aSe+NMegvE=",
"apps/v1/replicasets": "P1RzHs8/mWQ=",
"apps/v1/statefulsets": "H+vl74LkKdo=",
"apps/v1beta2/controllerrevisions": "85nkx63pcBU=",
"apps/v1beta2/daemonsets": "dd7pWHUlMKQ=",
"apps/v1beta2/deployments": "8aSe+NMegvE=",
"apps/v1beta2/replicasets": "P1RzHs8/mWQ=",
"apps/v1beta2/statefulsets": "H+vl74LkKdo=",
"apps/v1beta1/controllerrevisions": "85nkx63pcBU=",
"apps/v1beta1/deployments": "8aSe+NMegvE=",
"apps/v1beta1/statefulsets": "H+vl74LkKdo=",
"admissionregistration.k8s.io/v1beta1/mutatingwebhookconfigurations": "yxW1cpLtfp8=",
"admissionregistration.k8s.io/v1beta1/validatingwebhookconfigurations": "P9NhrezfnWE=",
"events.k8s.io/v1beta1/events": "r2yiGXH7wu8=",

View File

@ -31,21 +31,21 @@ run_daemonset_tests() {
# Command
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
# Template Generation should be 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '1'
kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '1'
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
# Template Generation should stay 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '1'
kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '1'
# Test set commands
kubectl set image daemonsets/bind "${kube_flags[@]:?}" "*=k8s.gcr.io/pause:test-cmd"
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '2'
kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '2'
kubectl set env daemonsets/bind "${kube_flags[@]:?}" foo=bar
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '3'
kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '3'
kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '4'
kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '4'
# Rollout restart should change generation
kubectl rollout restart daemonset/bind "${kube_flags[@]}"
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '5'
kubectl rollout restart daemonset/bind "${kube_flags[@]:?}"
kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '5'
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
@ -127,12 +127,9 @@ run_kubectl_apply_deployments_tests() {
# apply new deployment with new template labels
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]:?}"
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
# cleanup
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
@ -189,24 +186,20 @@ run_deployment_tests() {
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.apps/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
# Ensure we can interact with deployments through apps endpoints
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
kube::test::if_has_string "${output_message}" 'apps/v1'
# Clean up
kubectl delete deployment test-nginx-extensions "${kube_flags[@]:?}"
# Test kubectl create deployment
kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-apps' "{{${container_name_field:?}}}" 'nginx'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
kube::test::if_has_string "${output_message}" '10'
# Ensure we can interact with deployments through apps endpoints
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
kube::test::if_has_string "${output_message}" 'apps/v1'
# Describe command (resource only) should print detailed information

View File

@ -1123,26 +1123,10 @@ run_rc_tests() {
kube::test::if_has_string "${output_message}" 'service/expose-test-deployment exposed'
# Clean-up
kubectl delete service/expose-test-deployment "${kube_flags[@]}"
# Uses deployment selectors for created service
output_message=$(kubectl expose -f test/fixtures/pkg/kubectl/cmd/expose/appsv1beta2deployment.yaml --port 80 2>&1 "${kube_flags[@]}")
# Post-condition: service created for deployment.
kube::test::if_has_string "${output_message}" 'service/expose-test-deployment exposed'
# Clean-up
kubectl delete service/expose-test-deployment "${kube_flags[@]}"
# Uses deployment selectors for created service
output_message=$(kubectl expose -f test/fixtures/pkg/kubectl/cmd/expose/appsv1beta1deployment.yaml --port 80 2>&1 "${kube_flags[@]}")
# Post-condition: service created for deployment.
kube::test::if_has_string "${output_message}" 'service/expose-test-deployment exposed'
# Clean-up
kubectl delete service/expose-test-deployment "${kube_flags[@]}"
# Contains no selectors, should fail.
output_message=$(! kubectl expose -f test/fixtures/pkg/kubectl/cmd/expose/appsv1deployment-no-selectors.yaml --port 80 2>&1 "${kube_flags[@]}")
# Post-condition: service created for deployment.
kube::test::if_has_string "${output_message}" 'invalid deployment: no selectors'
# Contains no selectors, should fail.
output_message=$(! kubectl expose -f test/fixtures/pkg/kubectl/cmd/expose/appsv1beta2deployment-no-selectors.yaml --port 80 2>&1 "${kube_flags[@]}")
# Post-condition: service created for deployment.
kube::test::if_has_string "${output_message}" 'invalid deployment: no selectors'
### Expose a deployment as a service
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"

View File

@ -269,10 +269,10 @@ run_recursive_resources_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Command
output_message=$(kubectl convert --local -f hack/testdata/deployment-revision1.yaml --output-version=apps/v1 -o yaml "${kube_flags[@]}")
# Post-condition: apiVersion is still extensions/v1beta1 in the live deployment, but command output is the new value
kube::test::get_object_assert 'deployment nginx' "{{ .apiVersion }}" 'extensions/v1beta1'
kube::test::if_has_string "${output_message}" "apps/v1"
output_message=$(kubectl convert --local -f hack/testdata/deployment-revision1.yaml --output-version=extensions/v1beta1 -o yaml "${kube_flags[@]}")
# Post-condition: apiVersion is still apps/v1 in the live deployment, but command output is the new value
kube::test::get_object_assert 'deployment nginx' "{{ .apiVersion }}" 'apps/v1'
kube::test::if_has_string "${output_message}" "extensions/v1beta1"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"

View File

@ -365,7 +365,6 @@ runTests() {
pdb_min_available=".spec.minAvailable"
pdb_max_unavailable=".spec.maxUnavailable"
generation_field=".metadata.generation"
template_generation_field=".spec.templateGeneration"
container_len="(len .spec.template.spec.containers)"
image_field0="(index .spec.template.spec.containers 0).image"
image_field1="(index .spec.template.spec.containers 1).image"

View File

@ -105,9 +105,6 @@ var _ = SIGDescribe("Deployment", func() {
framework.ConformanceIt("deployment should support rollover", func() {
testRolloverDeployment(f)
})
ginkgo.It("deployment should support rollback", func() {
testRollbackDeployment(f)
})
ginkgo.It("iterative rollouts should eventually progress", func() {
testIterativeDeployments(f)
})
@ -504,147 +501,6 @@ func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) {
gomega.Expect(rs.Status.Replicas).Should(gomega.Equal(replicas))
}
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and
// then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3),
// and then rollback to last revision (which is revision 4 that comes from revision 2).
// Then rollback the deployment to revision 10 (doesn't exist in history) should fail.
// Finally, rollback current deployment (revision 4) to revision 4 should be no-op.
func testRollbackDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
// 1. Create a deployment to create nginx pods.
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
deploymentReplicas := int32(1)
deploymentImage := NginxImage
deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType
e2elog.Logf("Creating deployment %s", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
createAnnotation := map[string]string{"action": "create", "author": "node"}
d.Annotations = createAnnotation
deploy, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
// Wait for it to be updated to revision 1
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err)
// Current newRS annotation should be "create"
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
framework.ExpectNoError(err)
// 2. Update the deployment to create redis pods.
updatedDeploymentImage := RedisImage
updatedDeploymentImageName := RedisImageName
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
deployment, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
update.Annotations = updateAnnotation
})
framework.ExpectNoError(err)
// Use observedGeneration to determine if the controller noticed the pod template update.
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
framework.ExpectNoError(err)
// Wait for it to be updated to revision 2
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
framework.ExpectNoError(err)
// Current newRS annotation should be "update"
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
framework.ExpectNoError(err)
// 3. Update the deploymentRollback to rollback to revision 1
revision := int64(1)
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback := newDeploymentRollback(deploymentName, nil, revision)
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
framework.ExpectNoError(err)
// Wait for the deployment to start rolling back
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
framework.ExpectNoError(err)
// TODO: report RollbackDone in deployment status and check it here
// Wait for it to be updated to revision 3
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
framework.ExpectNoError(err)
// Current newRS annotation should be "create", after the rollback
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
framework.ExpectNoError(err)
// 4. Update the deploymentRollback to rollback to last revision
revision = 0
e2elog.Logf("rolling back deployment %s to last revision", deploymentName)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
framework.ExpectNoError(err)
// Wait for it to be updated to revision 4
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
framework.ExpectNoError(err)
// Current newRS annotation should be "update", after the rollback
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
framework.ExpectNoError(err)
// 5. Update the deploymentRollback to rollback to revision 10
// Since there's no revision 10 in history, it should stay as revision 4
revision = 10
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
framework.ExpectNoError(err)
// Wait for the deployment to start rolling back
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
framework.ExpectNoError(err)
// TODO: report RollbackRevisionNotFound in deployment status and check it here
// The pod template shouldn't change since there's no revision 10
// Check if it's still revision 4 and still has the old pod template
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
framework.ExpectNoError(err)
// 6. Update the deploymentRollback to rollback to revision 4
// Since it's already revision 4, it should be no-op
revision = 4
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
rollback = newDeploymentRollback(deploymentName, nil, revision)
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
framework.ExpectNoError(err)
// Wait for the deployment to start rolling back
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
framework.ExpectNoError(err)
// TODO: report RollbackTemplateUnchanged in deployment status and check it here
// The pod template shouldn't change since it's already revision 4
// Check if it's still revision 4 and still has the old pod template
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
framework.ExpectNoError(err)
}
func randomScale(d *appsv1.Deployment, i int) {
switch r := rand.Float32(); {
case r < 0.3:

View File

@ -1582,7 +1582,7 @@ metadata:
*/
framework.ConformanceIt("should create a deployment from an image ", func() {
ginkgo.By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag)
framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/apps.v1", nsFlag)
ginkgo.By("verifying the deployment " + dName + " was created")
d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{})
if err != nil {

View File

@ -1,18 +0,0 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: expose-test-deployment
labels:
name: expose-test-deployment
spec:
replicas: 3
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View File

@ -1,21 +0,0 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: expose-test-deployment
labels:
name: expose-test-deployment
spec:
replicas: 3
selector:
matchLabels:
name: nginx
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View File

@ -1,18 +0,0 @@
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: expose-test-deployment
labels:
name: expose-test-deployment
spec:
replicas: 3
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View File

@ -1,21 +0,0 @@
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: expose-test-deployment
labels:
name: expose-test-deployment
spec:
replicas: 3
selector:
matchLabels:
name: nginx
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View File

@ -18,7 +18,6 @@ package deployment
import (
"fmt"
"reflect"
"strings"
"testing"
@ -168,25 +167,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
t.Fatal(err)
}
// 2. Roll back to the last revision.
revision := int64(0)
rollback := newDeploymentRollback(tester.deployment.Name, nil, revision)
if err = c.ExtensionsV1beta1().Deployments(ns.Name).Rollback(rollback); err != nil {
t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err)
}
// Wait for the deployment to start rolling back
if err = tester.waitForDeploymentRollbackCleared(); err != nil {
t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err)
}
// Wait for the deployment to be rolled back to the template stored in revision 1 and rolled forward to revision 3.
if err := tester.waitForDeploymentRevisionAndImage("3", oriImage); err != nil {
t.Fatal(err)
}
if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
t.Fatal(err)
}
// 3. Roll over a deployment before the previous rolling update finishes.
// 2. Roll over a deployment before the previous rolling update finishes.
image = "dont-finish"
imageFn = func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = image
@ -195,9 +176,10 @@ func TestDeploymentRollingUpdate(t *testing.T) {
if err != nil {
t.Fatalf("failed to update deployment %s: %v", tester.deployment.Name, err)
}
if err := tester.waitForDeploymentRevisionAndImage("4", image); err != nil {
if err := tester.waitForDeploymentRevisionAndImage("3", image); err != nil {
t.Fatal(err)
}
// We don't mark pods as ready so that rollout won't finish.
// Before the rollout finishes, trigger another rollout.
image = "rollover"
@ -208,7 +190,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
if err != nil {
t.Fatalf("failed to update deployment %s: %v", tester.deployment.Name, err)
}
if err := tester.waitForDeploymentRevisionAndImage("5", image); err != nil {
if err := tester.waitForDeploymentRevisionAndImage("4", image); err != nil {
t.Fatal(err)
}
if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
@ -240,62 +222,11 @@ func TestDeploymentSelectorImmutability(t *testing.T) {
t.Fatalf("failed to create apps/v1 deployment %s: %v", tester.deployment.Name, err)
}
// test to ensure extensions/v1beta1 selector is mutable
// TODO: drop the extensions/v1beta1 portion of this test when extensions/v1beta1 is no longer served
newSelectorLabels := map[string]string{"name_extensions_v1beta1": "test_extensions_v1beta1"}
deploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get extensions/v1beta deployment %s: %v", name, err)
}
deploymentExtensionsV1beta1.Spec.Selector.MatchLabels = newSelectorLabels
deploymentExtensionsV1beta1.Spec.Template.Labels = newSelectorLabels
updatedDeploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Update(deploymentExtensionsV1beta1)
if err != nil {
t.Fatalf("failed to update extensions/v1beta1 deployment %s: %v", deploymentExtensionsV1beta1.Name, err)
}
if !reflect.DeepEqual(updatedDeploymentExtensionsV1beta1.Spec.Selector.MatchLabels, newSelectorLabels) {
t.Errorf("selector should be changed for extensions/v1beta1, expected: %v, got: %v", newSelectorLabels, updatedDeploymentExtensionsV1beta1.Spec.Selector.MatchLabels)
}
// test to ensure apps/v1beta1 selector is mutable
deploymentAppsV1beta1, err := c.AppsV1beta1().Deployments(ns.Name).Get(updatedDeploymentExtensionsV1beta1.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get apps/v1beta1 deployment %s: %v", updatedDeploymentExtensionsV1beta1.Name, err)
}
newSelectorLabels = map[string]string{"name_apps_v1beta1": "test_apps_v1beta1"}
deploymentAppsV1beta1.Spec.Selector.MatchLabels = newSelectorLabels
deploymentAppsV1beta1.Spec.Template.Labels = newSelectorLabels
updatedDeploymentAppsV1beta1, err := c.AppsV1beta1().Deployments(ns.Name).Update(deploymentAppsV1beta1)
if err != nil {
t.Fatalf("failed to update apps/v1beta1 deployment %s: %v", deploymentAppsV1beta1.Name, err)
}
if !reflect.DeepEqual(updatedDeploymentAppsV1beta1.Spec.Selector.MatchLabels, newSelectorLabels) {
t.Errorf("selector should be changed for apps/v1beta1, expected: %v, got: %v", newSelectorLabels, updatedDeploymentAppsV1beta1.Spec.Selector.MatchLabels)
}
// test to ensure apps/v1beta2 selector is immutable
deploymentAppsV1beta2, err := c.AppsV1beta2().Deployments(ns.Name).Get(updatedDeploymentAppsV1beta1.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get apps/v1beta2 deployment %s: %v", updatedDeploymentAppsV1beta1.Name, err)
}
newSelectorLabels = map[string]string{"name_apps_v1beta2": "test_apps_v1beta2"}
deploymentAppsV1beta2.Spec.Selector.MatchLabels = newSelectorLabels
deploymentAppsV1beta2.Spec.Template.Labels = newSelectorLabels
_, err = c.AppsV1beta2().Deployments(ns.Name).Update(deploymentAppsV1beta2)
if err == nil {
t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1beta2 deployment %s", deploymentAppsV1beta2.Name)
}
expectedErrType := "Invalid value"
expectedErrDetail := "field is immutable"
if !strings.Contains(err.Error(), expectedErrType) || !strings.Contains(err.Error(), expectedErrDetail) {
t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error())
}
// test to ensure apps/v1 selector is immutable
deploymentAppsV1, err := c.AppsV1().Deployments(ns.Name).Get(updatedDeploymentAppsV1beta1.Name, metav1.GetOptions{})
newSelectorLabels := map[string]string{"name_apps_v1beta1": "test_apps_v1beta1"}
deploymentAppsV1, err := c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get apps/v1 deployment %s: %v", updatedDeploymentAppsV1beta1.Name, err)
t.Fatalf("failed to get apps/v1 deployment %s: %v", name, err)
}
newSelectorLabels = map[string]string{"name_apps_v1": "test_apps_v1"}
deploymentAppsV1.Spec.Selector.MatchLabels = newSelectorLabels
@ -304,6 +235,8 @@ func TestDeploymentSelectorImmutability(t *testing.T) {
if err == nil {
t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1 deployment %s", deploymentAppsV1.Name)
}
expectedErrType := "Invalid value"
expectedErrDetail := "field is immutable"
if !strings.Contains(err.Error(), expectedErrType) || !strings.Contains(err.Error(), expectedErrDetail) {
t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error())
}
@ -555,124 +488,6 @@ func TestDeploymentHashCollision(t *testing.T) {
}
}
// Deployment supports rollback even when there's old replica set without revision.
// TODO: drop this test when extensions/v1beta1 is no longer served
func TestRollbackDeploymentRSNoRevision(t *testing.T) {
s, closeFn, rm, dc, informers, c := dcSetup(t)
defer closeFn()
name := "test-rollback-no-revision-deployment"
ns := framework.CreateTestingNamespace(name, s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
// Create an old RS without revision
rsName := "test-rollback-no-revision-controller"
rsReplicas := int32(1)
rs := newReplicaSet(rsName, ns.Name, rsReplicas)
rs.Annotations = make(map[string]string)
rs.Annotations["make"] = "difference"
rs.Spec.Template.Spec.Containers[0].Image = "different-image"
_, err := c.AppsV1().ReplicaSets(ns.Name).Create(rs)
if err != nil {
t.Fatalf("failed to create replicaset %s: %v", rsName, err)
}
replicas := int32(1)
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
oriImage := tester.deployment.Spec.Template.Spec.Containers[0].Image
// Set absolute rollout limits (defaults changed to percentages)
max := intstr.FromInt(1)
tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = &max
tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = &max
// Create a deployment which have different template than the replica set created above.
if tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment); err != nil {
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
}
// Start informer and controllers
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go rm.Run(5, stopCh)
go dc.Run(5, stopCh)
// Wait for the Deployment to be updated to revision 1
if err = tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
t.Fatal(err)
}
// 1. Rollback to the last revision
// Since there's only 1 revision in history, it should still be revision 1
revision := int64(0)
rollback := newDeploymentRollback(tester.deployment.Name, nil, revision)
if err = c.ExtensionsV1beta1().Deployments(ns.Name).Rollback(rollback); err != nil {
t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err)
}
// Wait for the deployment to start rolling back
if err = tester.waitForDeploymentRollbackCleared(); err != nil {
t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err)
}
// TODO: report RollbackRevisionNotFound in deployment status and check it here
// The pod template shouldn't change since there's no last revision
// Check if the deployment is still revision 1 and still has the old pod template
err = tester.checkDeploymentRevisionAndImage("1", oriImage)
if err != nil {
t.Fatal(err)
}
// 2. Update the deployment to revision 2.
updatedImage := "update"
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedImage
update.Spec.Template.Spec.Containers[0].Image = updatedImage
})
if err != nil {
t.Fatalf("failed updating deployment %s: %v", tester.deployment.Name, err)
}
// Use observedGeneration to determine if the controller noticed the pod template update.
// Wait for the controller to notice the resume.
if err = tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
t.Fatal(err)
}
// Wait for it to be updated to revision 2
if err = tester.waitForDeploymentRevisionAndImage("2", updatedImage); err != nil {
t.Fatal(err)
}
// Wait for the Deployment to complete while manually marking Deployment pods as ready at the same time
if err = tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
t.Fatal(err)
}
// 3. Update the deploymentRollback to rollback to revision 1
revision = int64(1)
rollback = newDeploymentRollback(tester.deployment.Name, nil, revision)
if err = c.ExtensionsV1beta1().Deployments(ns.Name).Rollback(rollback); err != nil {
t.Fatalf("failed to roll back deployment %s to revision %d: %v", tester.deployment.Name, revision, err)
}
// Wait for the deployment to start rolling back
if err = tester.waitForDeploymentRollbackCleared(); err != nil {
t.Fatalf("failed to roll back deployment %s to revision %d: %v", tester.deployment.Name, revision, err)
}
// TODO: report RollbackDone in deployment status and check it here
// The pod template should be updated to the one in revision 1
// Wait for it to be updated to revision 3
if err = tester.waitForDeploymentRevisionAndImage("3", oriImage); err != nil {
t.Fatal(err)
}
// Wait for the Deployment to complete while manually marking Deployment pods as ready at the same time
if err = tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
t.Fatal(err)
}
}
func checkRSHashLabels(rs *apps.ReplicaSet) (string, error) {
hash := rs.Labels[apps.DefaultDeploymentUniqueLabelKey]
selectorHash := rs.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey]

View File

@ -523,15 +523,9 @@ func TestAppsGroupBackwardCompatibility(t *testing.T) {
expectedStatusCodes map[int]bool
expectedVersion string
}{
// Post to extensions endpoint and get back from both: extensions and apps
{"POST", extensionsPath("deployments", metav1.NamespaceDefault, ""), deploymentExtensions, integration.Code201, ""},
{"GET", extensionsPath("deployments", metav1.NamespaceDefault, "test-deployment1"), "", integration.Code200, "extensions/v1beta1"},
{"GET", appsPath("deployments", metav1.NamespaceDefault, "test-deployment1"), "", integration.Code200, "apps/v1"},
{"DELETE", extensionsPath("deployments", metav1.NamespaceDefault, "test-deployment1"), "", integration.Code200, "extensions/v1beta1"},
// Post to apps endpoint and get back from both: apps and extensions
// Post to apps endpoint and get back from apps
{"POST", appsPath("deployments", metav1.NamespaceDefault, ""), deploymentApps, integration.Code201, ""},
{"GET", appsPath("deployments", metav1.NamespaceDefault, "test-deployment2"), "", integration.Code200, "apps/v1"},
{"GET", extensionsPath("deployments", metav1.NamespaceDefault, "test-deployment2"), "", integration.Code200, "extensions/v1beta1"},
// set propagationPolicy=Orphan to force the object to be returned so we can check the apiVersion (otherwise, we just get a status object back)
{"DELETE", appsPath("deployments", metav1.NamespaceDefault, "test-deployment2") + "?propagationPolicy=Orphan", "", integration.Code200, "apps/v1"},
}

View File

@ -16,7 +16,6 @@ go_test(
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/controller/replicaset:go_default_library",
"//pkg/util/slice:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",

View File

@ -40,7 +40,6 @@ import (
"k8s.io/client-go/util/retry"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/pkg/util/slice"
"k8s.io/kubernetes/test/integration/framework"
testutil "k8s.io/kubernetes/test/utils"
)
@ -472,29 +471,12 @@ func TestRSSelectorImmutability(t *testing.T) {
rs := newRS("rs", ns.Name, 0)
createRSsPods(t, clientSet, []*apps.ReplicaSet{rs}, []*v1.Pod{})
// test to ensure extensions/v1beta1 selector is mutable
// TODO: remove the extensions/v1beta1 portion of the test once we stop serving extensions/v1beta1
newSelectorLabels := map[string]string{"changed_name_extensions_v1beta1": "changed_test_extensions_v1beta1"}
rsExt, err := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(rs.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get extensions/v1beta replicaset %s: %v", rs.Name, err)
}
rsExt.Spec.Selector.MatchLabels = newSelectorLabels
rsExt.Spec.Template.Labels = newSelectorLabels
replicaset, err := clientSet.ExtensionsV1beta1().ReplicaSets(ns.Name).Update(rsExt)
if err != nil {
t.Fatalf("failed to update extensions/v1beta1 replicaset %s: %v", replicaset.Name, err)
}
if !reflect.DeepEqual(replicaset.Spec.Selector.MatchLabels, newSelectorLabels) {
t.Errorf("selector should be changed for extensions/v1beta1, expected: %v, got: %v", newSelectorLabels, replicaset.Spec.Selector.MatchLabels)
}
// test to ensure apps/v1 selector is immutable
rsV1, err := clientSet.AppsV1().ReplicaSets(ns.Name).Get(rs.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("failed to get apps/v1 replicaset %s: %v", rs.Name, err)
}
newSelectorLabels = map[string]string{"changed_name_apps_v1": "changed_test_apps_v1"}
newSelectorLabels := map[string]string{"changed_name_apps_v1": "changed_test_apps_v1"}
rsV1.Spec.Selector.MatchLabels = newSelectorLabels
rsV1.Spec.Template.Labels = newSelectorLabels
_, err = clientSet.AppsV1().ReplicaSets(ns.Name).Update(rsV1)
@ -905,60 +887,6 @@ func TestFullyLabeledReplicas(t *testing.T) {
}
}
func TestReplicaSetsExtensionsV1beta1DefaultGCPolicy(t *testing.T) {
s, closeFn, rm, informers, c := rmSetup(t)
defer closeFn()
ns := framework.CreateTestingNamespace("test-default-gc-extensions", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
stopCh := runControllerAndInformers(t, rm, informers, 0)
defer close(stopCh)
rs := newRS("rs", ns.Name, 2)
fakeFinalizer := "kube.io/dummy-finalizer"
rs.Finalizers = []string{fakeFinalizer}
rss, _ := createRSsPods(t, c, []*apps.ReplicaSet{rs}, []*v1.Pod{})
rs = rss[0]
waitRSStable(t, c, rs)
// Verify RS creates 2 pods
podClient := c.CoreV1().Pods(ns.Name)
pods := getPods(t, podClient, labelMap())
if len(pods.Items) != 2 {
t.Fatalf("len(pods) = %d, want 2", len(pods.Items))
}
// Delete via the extensions/v1beta1 endpoint.
err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Delete(rs.Name, nil)
if err != nil {
t.Fatalf("Failed to delete rs: %v", err)
}
// Verify orphan finalizer has been added
rsClient := c.AppsV1().ReplicaSets(ns.Name)
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return slice.ContainsString(newRS.Finalizers, metav1.FinalizerOrphanDependents, nil), nil
}); err != nil {
t.Fatalf("Failed to verify orphan finalizer is added: %v", err)
}
updateRS(t, rsClient, rs.Name, func(rs *apps.ReplicaSet) {
var finalizers []string
// remove fakeFinalizer
for _, finalizer := range rs.Finalizers {
if finalizer != fakeFinalizer {
finalizers = append(finalizers, finalizer)
}
}
rs.Finalizers = finalizers
})
rsClient.Delete(rs.Name, nil)
}
func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) {
s, closeFn, rm, informers, c := rmSetup(t)
defer closeFn()