mirror of
				https://github.com/k3s-io/kubernetes.git
				synced 2025-10-30 21:30:16 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			643 lines
		
	
	
		
			37 KiB
		
	
	
	
		
			Bash
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			643 lines
		
	
	
		
			37 KiB
		
	
	
	
		
			Bash
		
	
	
		
			Executable File
		
	
	
	
	
| #!/usr/bin/env bash
 | |
| 
 | |
| # Copyright 2018 The Kubernetes Authors.
 | |
| #
 | |
| # Licensed under the Apache License, Version 2.0 (the "License");
 | |
| # you may not use this file except in compliance with the License.
 | |
| # You may obtain a copy of the License at
 | |
| #
 | |
| #     http://www.apache.org/licenses/LICENSE-2.0
 | |
| #
 | |
| # Unless required by applicable law or agreed to in writing, software
 | |
| # distributed under the License is distributed on an "AS IS" BASIS,
 | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| # See the License for the specific language governing permissions and
 | |
| # limitations under the License.
 | |
| 
 | |
| set -o errexit
 | |
| set -o nounset
 | |
| set -o pipefail
 | |
| 
 | |
| run_daemonset_tests() {
 | |
|   set -o nounset
 | |
|   set -o errexit
 | |
| 
 | |
|   create_and_use_new_namespace
 | |
|   kube::log::status "Testing kubectl(v1:daemonsets)"
 | |
| 
 | |
|   ### Create a rolling update DaemonSet
 | |
|   # Pre-condition: no DaemonSet exists
 | |
|   kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
 | |
|   # Template Generation should be 1
 | |
|   kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
 | |
|   kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
 | |
|   # Template Generation should stay 1
 | |
|   kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
 | |
|   # Test set commands
 | |
|   kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
 | |
|   kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2'
 | |
|   kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar
 | |
|   kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3'
 | |
|   kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
 | |
|   kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4'
 | |
| 
 | |
|   # Clean up
 | |
|   kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
 | |
| 
 | |
|   set +o nounset
 | |
|   set +o errexit
 | |
| }
 | |
| 
 | |
| run_daemonset_history_tests() {
 | |
|   set -o nounset
 | |
|   set -o errexit
 | |
| 
 | |
|   create_and_use_new_namespace
 | |
|   kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"
 | |
| 
 | |
|   ### Test rolling back a DaemonSet
 | |
|   # Pre-condition: no DaemonSet or its pods exists
 | |
|   kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   # Create a DaemonSet (revision 1)
 | |
|   kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}"
 | |
|   kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
 | |
|   # Rollback to revision 1 - should be no-op
 | |
|   kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
 | |
|   # Update the DaemonSet (revision 2)
 | |
|   kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}"
 | |
|   kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
 | |
|   kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
 | |
|   kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
 | |
|   # Rollback to revision 1 with dry-run - should be no-op
 | |
|   kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
 | |
|   # Rollback to revision 1
 | |
|   kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
 | |
|   kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
 | |
|   # Rollback to revision 1000000 - should fail
 | |
|   output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
 | |
|   kube::test::if_has_string "${output_message}" "unable to find specified revision"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
 | |
|   # Rollback to last revision
 | |
|   kubectl rollout undo daemonset "${kube_flags[@]}"
 | |
|   kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
 | |
|   kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
 | |
|   kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
 | |
|   # Clean up
 | |
|   kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
 | |
| 
 | |
|   set +o nounset
 | |
|   set +o errexit
 | |
| }
 | |
| 
 | |
| run_kubectl_apply_deployments_tests() {
 | |
|   set -o nounset
 | |
|   set -o errexit
 | |
| 
 | |
|   create_and_use_new_namespace
 | |
|   kube::log::status "Testing kubectl apply deployments"
 | |
|   ## kubectl apply should propagate user defined null values
 | |
|   # Pre-Condition: no Deployments, ReplicaSets, Pods exist
 | |
|   kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # apply base deployment
 | |
|   kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
 | |
|   # check right deployment exists
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
 | |
|   # check right labels exists
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
 | |
| 
 | |
|   # apply new deployment with new template labels
 | |
|   kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
 | |
|   # check right labels exists
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
 | |
|   kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
 | |
| 
 | |
|   # cleanup
 | |
|   # need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
 | |
|   kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
 | |
|   # Post-Condition: no Deployments, ReplicaSets, Pods exist
 | |
|   kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
| 
 | |
|   # kubectl apply deployment --overwrite=true --force=true
 | |
|   # Pre-Condition: no deployment exists
 | |
|   kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # apply deployment nginx
 | |
|   kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]}"
 | |
|   # check right deployment exists
 | |
|   kube::test::get_object_assert 'deployment nginx' "{{${id_field}}}" 'nginx'
 | |
|   # apply deployment with new labels and a conflicting resourceVersion
 | |
|   output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]}")
 | |
|   kube::test::if_has_string "${output_message}" 'Error from server (Conflict)'
 | |
|   # apply deployment with --force and --overwrite will succeed
 | |
|   kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true  --force=true --grace-period=10
 | |
|   # check the changed deployment
 | |
|   output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]}" |grep nginx2)
 | |
|   kube::test::if_has_string "${output_message}" '"name": "nginx2"'
 | |
|   # applying a resource (with --force) that is both conflicting and invalid will
 | |
|   # cause the server to only return a "Conflict" error when we attempt to patch.
 | |
|   # This means that we will delete the existing resource after receiving 5 conflict
 | |
|   # errors in a row from the server, and will attempt to create the modified
 | |
|   # resource that we are passing to "apply". Since the modified resource is also
 | |
|   # invalid, we will receive an invalid error when we attempt to create it, after
 | |
|   # having deleted the old resource. Ensure that when this case is reached, the
 | |
|   # old resource is restored once again, and the validation error is printed.
 | |
|   output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]}")
 | |
|   kube::test::if_has_string "${output_message}" 'Invalid value'
 | |
|   # Ensure that the old object has been restored
 | |
|   kube::test::get_object_assert 'deployment nginx' "{{${template_labels}}}" 'nginx2'
 | |
|   # cleanup
 | |
|   kubectl delete deployments --all --grace-period=10
 | |
| 
 | |
|   set +o nounset
 | |
|   set +o errexit
 | |
| }
 | |
| 
 | |
| run_deployment_tests() {
 | |
|   set -o nounset
 | |
|   set -o errexit
 | |
| 
 | |
|   create_and_use_new_namespace
 | |
|   kube::log::status "Testing deployments"
 | |
|   # Test kubectl create deployment (using default - old generator)
 | |
|   kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd
 | |
|   # Post-Condition: Deployment "nginx" is created.
 | |
|   kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
 | |
|   # and old generator was used, iow. old defaults are applied
 | |
|   output_message=$(kubectl get deployment.extensions/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
 | |
|   kube::test::if_has_not_string "${output_message}" '2'
 | |
|   # Ensure we can interact with deployments through extensions and apps endpoints
 | |
|   output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
 | |
|   kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
 | |
|   output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
 | |
|   kube::test::if_has_string "${output_message}" 'apps/v1'
 | |
|   # Clean up
 | |
|   kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
 | |
| 
 | |
|   # Test kubectl create deployment
 | |
|   kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
 | |
|   # Post-Condition: Deployment "nginx" is created.
 | |
|   kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
 | |
|   # and new generator was used, iow. new defaults are applied
 | |
|   output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
 | |
|   kube::test::if_has_string "${output_message}" '2'
 | |
|   # Ensure we can interact with deployments through extensions and apps endpoints
 | |
|   output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
 | |
|   kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
 | |
|   output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
 | |
|   kube::test::if_has_string "${output_message}" 'apps/v1'
 | |
|   # Describe command (resource only) should print detailed information
 | |
|   kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
 | |
|   # Describe command (resource only) should print detailed information
 | |
|   kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
 | |
|   # Clean up
 | |
|   kubectl delete deployment test-nginx-apps "${kube_flags[@]}"
 | |
| 
 | |
|   ### Test kubectl create deployment should not fail validation
 | |
|   # Pre-Condition: No deployment exists.
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}"
 | |
|   # Post-Condition: Deployment "deployment-with-unixuserid" is created.
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:'
 | |
|   # Clean up
 | |
|   kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}"
 | |
| 
 | |
|   ### Test cascading deletion
 | |
|   ## Test that rs is deleted when deployment is deleted.
 | |
|   # Pre-condition: no deployment exists
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Create deployment
 | |
|   kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
 | |
|   # Wait for rs to come up.
 | |
|   kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
 | |
|   # Deleting the deployment should delete the rs.
 | |
|   kubectl delete deployment nginx-deployment "${kube_flags[@]}"
 | |
|   kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
| 
 | |
|   ## Test that rs is not deleted when deployment is deleted with cascade set to false.
 | |
|   # Pre-condition: no deployment and rs exist
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Create deployment
 | |
|   kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd
 | |
|   # Wait for rs to come up.
 | |
|   kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
 | |
|   # Delete the deployment with cascade set to false.
 | |
|   kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
 | |
|   # Wait for the deployment to be deleted and then verify that rs is not
 | |
|   # deleted.
 | |
|   kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
 | |
|   # Cleanup
 | |
|   # Find the name of the rs to be deleted.
 | |
|   output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
 | |
|   kubectl delete rs ${output_message} "${kube_flags[@]}"
 | |
| 
 | |
|   ### Auto scale deployment
 | |
|   # Pre-condition: no deployment exists
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
 | |
|   # autoscale 2~3 pods, no CPU utilization specified
 | |
|   kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
 | |
|   kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
 | |
|   # Clean up
 | |
|   # Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
 | |
|   kubectl delete hpa nginx-deployment "${kube_flags[@]}"
 | |
|   kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}"
 | |
| 
 | |
|   ### Rollback a deployment
 | |
|   # Pre-condition: no deployment exists
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   # Create a deployment (revision 1)
 | |
|   kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   # Rollback to revision 1 - should be no-op
 | |
|   kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   # Update the deployment (revision 2)
 | |
|   kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
 | |
|   # Rollback to revision 1 with dry-run - should be no-op
 | |
|   kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
 | |
|   kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
 | |
|   # Rollback to revision 1
 | |
|   kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
 | |
|   sleep 1
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   # Rollback to revision 1000000 - should be no-op
 | |
|   kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   # Rollback to last revision
 | |
|   kubectl rollout undo deployment nginx "${kube_flags[@]}"
 | |
|   sleep 1
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
 | |
|   # Pause the deployment
 | |
|   kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
 | |
|   # A paused deployment cannot be rolled back
 | |
|   ! kubectl rollout undo deployment nginx "${kube_flags[@]}"
 | |
|   # Resume the deployment
 | |
|   kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
 | |
|   # The resumed deployment can now be rolled back
 | |
|   kubectl rollout undo deployment nginx "${kube_flags[@]}"
 | |
|   # Check that the new replica set has all old revisions stored in an annotation
 | |
|   newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
 | |
|   kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
 | |
|   # Check that trying to watch the status of a superseded revision returns an error
 | |
|   ! kubectl rollout status deployment/nginx --revision=3
 | |
|   cat hack/testdata/deployment-revision1.yaml | ${SED} "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
 | |
|   # Deletion of both deployments should not be blocked
 | |
|   kubectl delete deployment nginx2 "${kube_flags[@]}"
 | |
|   # Clean up
 | |
|   kubectl delete deployment nginx "${kube_flags[@]}"
 | |
| 
 | |
|   ### Set image of a deployment
 | |
|   # Pre-condition: no deployment exists
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Create a deployment
 | |
|   kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
 | |
|   # Set the deployment's image
 | |
|   kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
 | |
|   # Set non-existing container should fail
 | |
|   ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
 | |
|   # Set image of deployments without specifying name
 | |
|   kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
 | |
|   # Set image of a deployment specified by file
 | |
|   kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
 | |
|   # Set image of a local file without talking to the server
 | |
|   kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
 | |
|   # Set image of all containers of the deployment
 | |
|   kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   # Set image of all containners of the deployment again when image not change
 | |
|   kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
 | |
|   # Clean up
 | |
|   kubectl delete deployment nginx-deployment "${kube_flags[@]}"
 | |
| 
 | |
|   ### Set env of a deployment
 | |
|   # Pre-condition: no deployment exists
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Create a deployment
 | |
|   kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
 | |
|   kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}"
 | |
|   kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
 | |
|   kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-config:'
 | |
|   kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:'
 | |
|   # Set env of deployments by configmap from keys
 | |
|   kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]}"
 | |
|   # Assert correct value in deployment env
 | |
|   kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2'
 | |
|   # Assert single value in deployment env
 | |
|   kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
 | |
|   # Set env of deployments by configmap
 | |
|   kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}"
 | |
|   # Assert all values in deployment env
 | |
|   kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2'
 | |
|   # Set env of deployments for all container
 | |
|   kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}"
 | |
|   # Set env of deployments for specific container
 | |
|   kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]}"
 | |
|   # Set env of deployments by secret from keys
 | |
|   kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]}"
 | |
|   # Set env of deployments by secret
 | |
|   kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}"
 | |
|   # Remove specific env of deployment
 | |
|   kubectl set env deployment nginx-deployment env-
 | |
|   # Clean up
 | |
|   kubectl delete deployment nginx-deployment "${kube_flags[@]}"
 | |
|   kubectl delete configmap test-set-env-config "${kube_flags[@]}"
 | |
|   kubectl delete secret test-set-env-secret "${kube_flags[@]}"
 | |
| 
 | |
|   set +o nounset
 | |
|   set +o errexit
 | |
| }
 | |
| 
 | |
| run_statefulset_history_tests() {
 | |
|   set -o nounset
 | |
|   set -o errexit
 | |
| 
 | |
|   create_and_use_new_namespace
 | |
|   kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"
 | |
| 
 | |
|   ### Test rolling back a StatefulSet
 | |
|   # Pre-condition: no statefulset or its pods exists
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   # Create a StatefulSet (revision 1)
 | |
|   kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}"
 | |
|   kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
 | |
|   # Rollback to revision 1 - should be no-op
 | |
|   kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
 | |
|   # Update the statefulset (revision 2)
 | |
|   kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}"
 | |
|   kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
 | |
|   kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
 | |
|   kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
 | |
|   # Rollback to revision 1 with dry-run - should be no-op
 | |
|   kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
 | |
|   # Rollback to revision 1
 | |
|   kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
 | |
|   kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
 | |
|   # Rollback to revision 1000000 - should fail
 | |
|   output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
 | |
|   kube::test::if_has_string "${output_message}" "unable to find specified revision"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
 | |
|   # Rollback to last revision
 | |
|   kubectl rollout undo statefulset "${kube_flags[@]}"
 | |
|   kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
 | |
|   kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
 | |
|   # Clean up - delete newest configuration
 | |
|   kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}"
 | |
|   # Post-condition: no pods from statefulset controller
 | |
|   wait-for-pods-with-label "app=nginx-statefulset" ""
 | |
| 
 | |
|   set +o nounset
 | |
|   set +o errexit
 | |
| }
 | |
| 
 | |
| run_stateful_set_tests() {
 | |
|   set -o nounset
 | |
|   set -o errexit
 | |
| 
 | |
|   create_and_use_new_namespace
 | |
|   kube::log::status "Testing kubectl(v1:statefulsets)"
 | |
| 
 | |
|   ### Create and stop statefulset, make sure it doesn't leak pods
 | |
|   # Pre-condition: no statefulset exists
 | |
|   kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command: create statefulset
 | |
|   kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
 | |
| 
 | |
|   ### Scale statefulset test with current-replicas and replicas
 | |
|   # Pre-condition: 0 replicas
 | |
|   kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
 | |
|   kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
 | |
|   # Command: Scale up
 | |
|   kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
 | |
|   # Post-condition: 1 replica, named nginx-0
 | |
|   kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
 | |
|   kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
 | |
|   # Typically we'd wait and confirm that N>1 replicas are up, but this framework
 | |
|   # doesn't start  the scheduler, so pet-0 will block all others.
 | |
|   # TODO: test robust scaling in an e2e.
 | |
|   wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
 | |
| 
 | |
|   ### Clean up
 | |
|   kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
 | |
|   # Post-condition: no pods from statefulset controller
 | |
|   wait-for-pods-with-label "app=nginx-statefulset" ""
 | |
| 
 | |
|   set +o nounset
 | |
|   set +o errexit
 | |
| 
 | |
| }
 | |
| 
 | |
| run_rs_tests() {
 | |
|   set -o nounset
 | |
|   set -o errexit
 | |
| 
 | |
|   create_and_use_new_namespace
 | |
|   kube::log::status "Testing kubectl(v1:replicasets)"
 | |
| 
 | |
|   ### Create and stop a replica set, make sure it doesn't leak pods
 | |
|   # Pre-condition: no replica set exists
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
 | |
|   kube::log::status "Deleting rs"
 | |
|   kubectl delete rs frontend "${kube_flags[@]}"
 | |
|   # Post-condition: no pods from frontend replica set
 | |
|   kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
| 
 | |
|   ### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
 | |
|   # Pre-condition: no replica set exists
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
 | |
|   kube::log::status "Deleting rs"
 | |
|   kubectl delete rs frontend "${kube_flags[@]}" --cascade=false
 | |
|   # Wait for the rs to be deleted.
 | |
|   kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Post-condition: All 3 pods still remain from frontend replica set
 | |
|   kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
 | |
|   # Cleanup
 | |
|   kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
| 
 | |
|   ### Create replica set frontend from YAML
 | |
|   # Pre-condition: no replica set exists
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
 | |
|   # Post-condition: frontend replica set is created
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
 | |
|   # Describe command should print detailed information
 | |
|   kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
 | |
|   # Describe command should print events information by default
 | |
|   kube::test::describe_object_events_assert rs 'frontend'
 | |
|   # Describe command should not print events information when show-events=false
 | |
|   kube::test::describe_object_events_assert rs 'frontend' false
 | |
|   # Describe command should print events information when show-events=true
 | |
|   kube::test::describe_object_events_assert rs 'frontend' true
 | |
|   # Describe command (resource only) should print detailed information
 | |
|   kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
 | |
|   # Describe command should print events information by default
 | |
|   kube::test::describe_resource_events_assert rs
 | |
|   # Describe command should not print events information when show-events=false
 | |
|   kube::test::describe_resource_events_assert rs false
 | |
|   # Describe command should print events information when show-events=true
 | |
|   kube::test::describe_resource_events_assert rs true
 | |
|   # Describe command (resource only) should print detailed information
 | |
|   kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
 | |
| 
 | |
|   ### Scale replica set frontend with current-replicas and replicas
 | |
|   # Pre-condition: 3 replicas
 | |
|   kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
 | |
|   # Command
 | |
|   kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
 | |
|   # Post-condition: 2 replicas
 | |
|   kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
 | |
| 
 | |
|   # Set up three deploy, two deploy have same label
 | |
|   kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}"
 | |
|   kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}"
 | |
|   kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}"
 | |
|   kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
 | |
|   kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
 | |
|   kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
 | |
|   # Test kubectl scale --selector
 | |
|   kubectl scale deploy --replicas=2 -l run=hello
 | |
|   kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
 | |
|   kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
 | |
|   kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
 | |
|   # Test kubectl scale --all
 | |
|   kubectl scale deploy --replicas=3 --all
 | |
|   kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
 | |
|   kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
 | |
|   kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
 | |
|   # Clean-up
 | |
|   kubectl delete rs frontend "${kube_flags[@]}"
 | |
|   kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}"
 | |
| 
 | |
|   ### Expose replica set as service
 | |
|   kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
 | |
|   # Pre-condition: 3 replicas
 | |
|   kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
 | |
|   # Command
 | |
|   kubectl expose rs frontend --port=80 "${kube_flags[@]}"
 | |
|   # Post-condition: service exists and the port is unnamed
 | |
|   kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
 | |
|   # Create a service using service/v1 generator
 | |
|   kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
 | |
|   # Post-condition: service exists and the port is named default.
 | |
|   kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
 | |
|   # Cleanup services
 | |
|   kubectl delete service frontend{,-2} "${kube_flags[@]}"
 | |
| 
 | |
|   # Test set commands
 | |
|   # Pre-condition: frontend replica set exists at generation 1
 | |
|   kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1'
 | |
|   kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
 | |
|   kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2'
 | |
|   kubectl set env rs/frontend "${kube_flags[@]}" foo=bar
 | |
|   kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3'
 | |
|   kubectl set resources rs/frontend "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
 | |
|   kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4'
 | |
| 
 | |
|   ### Delete replica set with id
 | |
|   # Pre-condition: frontend replica set exists
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
 | |
|   # Command
 | |
|   kubectl delete rs frontend "${kube_flags[@]}"
 | |
|   # Post-condition: no replica set exists
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
| 
 | |
|   ### Create two replica sets
 | |
|   # Pre-condition: no replica set exists
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|   # Command
 | |
|   kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
 | |
|   kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
 | |
|   # Post-condition: frontend and redis-slave
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
 | |
| 
 | |
|   ### Delete multiple replica sets at once
 | |
|   # Pre-condition: frontend and redis-slave
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
 | |
|   # Command
 | |
|   kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
 | |
|   # Post-condition: no replica set exists
 | |
|   kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
| 
 | |
|   if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
 | |
|     ### Auto scale replica set
 | |
|     # Pre-condition: no replica set exists
 | |
|     kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
 | |
|     # Command
 | |
|     kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
 | |
|     kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
 | |
|     # autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
 | |
|     kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
 | |
|     kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
 | |
|     kubectl delete hpa frontend "${kube_flags[@]}"
 | |
|     # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
 | |
|     kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
 | |
|     kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
 | |
|     kubectl delete hpa frontend "${kube_flags[@]}"
 | |
|     # autoscale without specifying --max should fail
 | |
|     ! kubectl autoscale rs frontend "${kube_flags[@]}"
 | |
|     # Clean up
 | |
|     kubectl delete rs frontend "${kube_flags[@]}"
 | |
|   fi
 | |
| 
 | |
|   set +o nounset
 | |
|   set +o errexit
 | |
| }
 |