mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-11-02 23:02:25 +00:00
Merge pull request #87714 from julianvmodesto/use-kubectl-ss-dry-run-flag
Use --dry-run=server in kubectl commands
This commit is contained in:
@@ -37,6 +37,25 @@ run_kubectl_apply_tests() {
|
||||
# Clean up
|
||||
kubectl delete pods test-pod "${kube_flags[@]:?}"
|
||||
|
||||
### set-last-applied
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command: create "test-pod" (doesn't exist) should create this pod without last-applied annotation
|
||||
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# Post-Condition: pod "test-pod" is created
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
|
||||
# Pre-Condition: pod "test-pod" does not have configuration annotation
|
||||
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" || exit 1
|
||||
# Dry-run set-last-applied
|
||||
kubectl apply set-last-applied --dry-run=client -f hack/testdata/pod.yaml --create-annotation=true "${kube_flags[@]:?}"
|
||||
kubectl apply set-last-applied --dry-run=server -f hack/testdata/pod.yaml --create-annotation=true "${kube_flags[@]:?}"
|
||||
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" || exit 1
|
||||
# Command
|
||||
kubectl apply set-last-applied -f hack/testdata/pod.yaml --create-annotation=true "${kube_flags[@]:?}"
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
|
||||
# Clean up
|
||||
kubectl delete pods test-pod "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply should be able to clear defaulted fields.
|
||||
# Pre-Condition: no deployment exists
|
||||
@@ -75,18 +94,21 @@ run_kubectl_apply_tests() {
|
||||
# cleanup
|
||||
kubectl delete pods selector-test-pod
|
||||
|
||||
## kubectl apply --server-dry-run
|
||||
## kubectl apply --dry-run=server
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
# apply dry-run
|
||||
kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
kubectl apply --dry-run=true -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
kubectl apply --dry-run=client -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
kubectl apply --dry-run=server -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# No pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# apply non dry-run creates the pod
|
||||
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# apply changes
|
||||
kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
|
||||
kubectl apply --dry-run=server -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
|
||||
# Post-Condition: label still has initial value
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
|
||||
|
||||
@@ -117,7 +139,7 @@ run_kubectl_apply_tests() {
|
||||
__EOF__
|
||||
|
||||
# Dry-run create the CR
|
||||
kubectl "${kube_flags[@]:?}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
|
||||
kubectl "${kube_flags[@]:?}" apply --dry-run=server -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
|
||||
# Make sure that the CR doesn't exist
|
||||
! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1
|
||||
|
||||
@@ -273,18 +295,18 @@ run_kubectl_apply_tests() {
|
||||
# Clean up
|
||||
kubectl delete pods test-pod "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply --server-dry-run
|
||||
## kubectl apply --dry-run=server
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
# apply dry-run
|
||||
kubectl apply --server-side --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
kubectl apply --server-side --dry-run=server -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# No pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# apply non dry-run creates the pod
|
||||
kubectl apply --server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# apply changes
|
||||
kubectl apply --server-side --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
|
||||
kubectl apply --server-side --dry-run=server -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
|
||||
# Post-Condition: label still has initial value
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
|
||||
|
||||
@@ -315,7 +337,7 @@ run_kubectl_apply_tests() {
|
||||
__EOF__
|
||||
|
||||
# Dry-run create the CR
|
||||
kubectl "${kube_flags[@]:?}" apply --server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
|
||||
kubectl "${kube_flags[@]:?}" apply --server-side --dry-run=server -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
|
||||
# Make sure that the CR doesn't exist
|
||||
! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1
|
||||
|
||||
|
||||
@@ -79,7 +79,8 @@ run_daemonset_history_tests() {
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
|
||||
# Rollback to revision 1 with dry-run - should be no-op
|
||||
kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]:?}"
|
||||
kubectl rollout undo daemonset --dry-run=client "${kube_flags[@]:?}"
|
||||
kubectl rollout undo daemonset --dry-run=server "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
@@ -212,6 +213,10 @@ run_deployment_tests() {
|
||||
### Test kubectl create deployment with image and command
|
||||
# Pre-Condition: No deployment exists.
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Dry-run command
|
||||
kubectl create deployment nginx-with-command --dry-run=client --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
|
||||
kubectl create deployment nginx-with-command --dry-run=server --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create deployment nginx-with-command --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
|
||||
# Post-Condition: Deployment "nginx" is created.
|
||||
@@ -263,9 +268,15 @@ run_deployment_tests() {
|
||||
### Auto scale deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Pre-condition: no hpa exists
|
||||
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Command
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
|
||||
# Dry-run autoscale
|
||||
kubectl-with-retry autoscale deployment nginx-deployment --dry-run=client "${kube_flags[@]:?}" --min=2 --max=3
|
||||
kubectl-with-retry autoscale deployment nginx-deployment --dry-run=server "${kube_flags[@]:?}" --min=2 --max=3
|
||||
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':'
|
||||
# autoscale 2~3 pods, no CPU utilization specified
|
||||
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3
|
||||
kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
|
||||
@@ -289,7 +300,8 @@ run_deployment_tests() {
|
||||
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
# Rollback to revision 1 with dry-run - should be no-op
|
||||
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]:?}" | grep "test-cmd"
|
||||
kubectl rollout undo deployment nginx --dry-run=client "${kube_flags[@]:?}" | grep "test-cmd"
|
||||
kubectl rollout undo deployment nginx --dry-run=server "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
# Rollback to revision 1
|
||||
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}"
|
||||
@@ -337,6 +349,11 @@ run_deployment_tests() {
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Dry-run set the deployment's image
|
||||
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" --dry-run=client "${kube_flags[@]:?}"
|
||||
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" --dry-run=server "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set the deployment's image
|
||||
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
@@ -383,6 +400,10 @@ run_deployment_tests() {
|
||||
kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2'
|
||||
# Assert single value in deployment env
|
||||
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
|
||||
# Dry-run set env
|
||||
kubectl set env deployment nginx-deployment --dry-run=client --from=configmap/test-set-env-config "${kube_flags[@]:?}"
|
||||
kubectl set env deployment nginx-deployment --dry-run=server --from=configmap/test-set-env-config "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
|
||||
# Set env of deployments by configmap
|
||||
kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]:?}"
|
||||
# Assert all values in deployment env
|
||||
@@ -431,7 +452,8 @@ run_statefulset_history_tests() {
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
|
||||
# Rollback to revision 1 with dry-run - should be no-op
|
||||
kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]:?}"
|
||||
kubectl rollout undo statefulset --dry-run=client "${kube_flags[@]:?}"
|
||||
kubectl rollout undo statefulset --dry-run=server "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
@@ -610,8 +632,16 @@ run_rs_tests() {
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '2'
|
||||
kubectl set env rs/frontend "${kube_flags[@]:?}" foo=bar
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3'
|
||||
kubectl set resources rs/frontend --dry-run=client "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
|
||||
kubectl set resources rs/frontend --dry-run=server "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3'
|
||||
kubectl set resources rs/frontend "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4'
|
||||
kubectl set serviceaccount rs/frontend --dry-run=client "${kube_flags[@]:?}" serviceaccount1
|
||||
kubectl set serviceaccount rs/frontend --dry-run=server "${kube_flags[@]:?}" serviceaccount1
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4'
|
||||
kubectl set serviceaccount rs/frontend "${kube_flags[@]:?}" serviceaccount1
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '5'
|
||||
|
||||
### Delete replica set with id
|
||||
# Pre-condition: frontend replica set exists
|
||||
|
||||
@@ -33,6 +33,12 @@ run_job_tests() {
|
||||
# Post-condition: namespace 'test-jobs' is created.
|
||||
kube::test::get_object_assert 'namespaces/test-jobs' "{{$id_field}}" 'test-jobs'
|
||||
|
||||
# Pre-condition: cronjob does not exist
|
||||
kube::test::get_object_assert 'cronjob --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"pi\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Dry-run create CronJob
|
||||
kubectl create cronjob pi --dry-run=client --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
|
||||
kubectl create cronjob pi --dry-run=server --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert 'cronjob' "{{range.items}}{{ if eq $id_field \\\"pi\\\" }}found{{end}}{{end}}:" ':'
|
||||
### Create a cronjob in a specific namespace
|
||||
kubectl create cronjob pi --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
|
||||
# Post-Condition: assertion object exists
|
||||
@@ -47,6 +53,12 @@ run_job_tests() {
|
||||
# Post-condition: The test-job wasn't created actually
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}{{end}}" ''
|
||||
|
||||
# Pre-condition: job does not exist
|
||||
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':'
|
||||
### Dry-run create a job in a specific namespace
|
||||
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs --dry-run=client
|
||||
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs --dry-run=server
|
||||
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':'
|
||||
### Create a job in a specific namespace
|
||||
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs
|
||||
# Post-Condition: assertion object exists
|
||||
|
||||
@@ -40,7 +40,10 @@ run_configmap_tests() {
|
||||
# Pre-condition: configmap test-configmap and test-binary-configmap does not exist
|
||||
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':'
|
||||
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-binary-configmap\\\" }}found{{end}}{{end}}:" ':'
|
||||
|
||||
# Dry-run command
|
||||
kubectl create configmap test-configmap --dry-run=client --from-literal=key1=value1 --namespace=test-configmaps
|
||||
kubectl create configmap test-configmap --dry-run=server --from-literal=key1=value1 --namespace=test-configmaps
|
||||
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Command
|
||||
kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
|
||||
kubectl create configmap test-binary-configmap --from-file <( head -c 256 /dev/urandom ) --namespace=test-configmaps
|
||||
@@ -217,6 +220,10 @@ run_pod_tests() {
|
||||
### Create a generic secret
|
||||
# Pre-condition: no SECRET exists
|
||||
kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Dry-run command
|
||||
kubectl create secret generic test-secret --dry-run=client --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
|
||||
kubectl create secret generic test-secret --dry-run=server --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
|
||||
kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
|
||||
# Post-condition: secret exists and has expected values
|
||||
@@ -235,6 +242,12 @@ run_pod_tests() {
|
||||
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap'
|
||||
|
||||
### Create a pod disruption budget with minAvailable
|
||||
# Pre-condition: pdb does not exist
|
||||
kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-pdb-1\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Dry-run command
|
||||
kubectl create pdb test-pdb-1 --dry-run=client --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
|
||||
kubectl create pdb test-pdb-1 --dry-run=server --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
|
||||
kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-pdb-1\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Command
|
||||
kubectl create pdb test-pdb-1 --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
|
||||
# Post-condition: pdb exists and has expected values
|
||||
@@ -272,6 +285,17 @@ run_pod_tests() {
|
||||
kubectl delete pdb/test-pdb-1 pdb/test-pdb-2 pdb/test-pdb-3 pdb/test-pdb-4 --namespace=test-kubectl-describe-pod
|
||||
kubectl delete namespace test-kubectl-describe-pod
|
||||
|
||||
### Priority Class
|
||||
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Dry-run command
|
||||
kubectl create priorityclass test-priorityclass --dry-run=client
|
||||
kubectl create priorityclass test-priorityclass --dry-run=server
|
||||
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Command
|
||||
kubectl create priorityclass test-priorityclass
|
||||
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" 'found:'
|
||||
kubectl delete priorityclass test-priorityclass
|
||||
|
||||
### Create two PODs
|
||||
# Pre-condition: no POD exists
|
||||
create_and_use_new_namespace
|
||||
@@ -299,6 +323,15 @@ run_pod_tests() {
|
||||
# Post-condition: valid-pod POD is created
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
|
||||
### Dry-run label the valid-pod POD
|
||||
# Pre-condition: valid-pod is not labelled
|
||||
kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl label pods valid-pod new-name=new-valid-pod --dry-run=client "${kube_flags[@]}"
|
||||
kubectl label pods valid-pod new-name=new-valid-pod --dry-run=server "${kube_flags[@]}"
|
||||
# Post-condition: valid-pod is not labelled
|
||||
kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:'
|
||||
|
||||
### Label the valid-pod POD
|
||||
# Pre-condition: valid-pod is not labelled
|
||||
kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:'
|
||||
@@ -315,6 +348,15 @@ run_pod_tests() {
|
||||
# Post-condition: valid pod contains "emptylabel" with no value
|
||||
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" ''
|
||||
|
||||
### Dry-run annotate the valid-pod POD with empty annotation value
|
||||
# Pre-condition: valid-pod does not have annotation "emptyannotation"
|
||||
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '<no value>'
|
||||
# Command
|
||||
kubectl annotate pods valid-pod emptyannotation="" --dry-run=client "${kube_flags[@]}"
|
||||
kubectl annotate pods valid-pod emptyannotation="" --dry-run=server "${kube_flags[@]}"
|
||||
# Post-condition: valid-pod does not have annotation "emptyannotation"
|
||||
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '<no value>'
|
||||
|
||||
### Annotate the valid-pod POD with empty annotation value
|
||||
# Pre-condition: valid-pod does not have annotation "emptyannotation"
|
||||
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '<no value>'
|
||||
@@ -463,6 +505,11 @@ run_pod_tests() {
|
||||
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]'
|
||||
# Post-condition: valid-pod POD has image nginx
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
|
||||
# Dry-run change image
|
||||
kubectl patch "${kube_flags[@]}" pod valid-pod --record --dry-run=client -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "not-nginx"}]}}'
|
||||
kubectl patch "${kube_flags[@]}" pod valid-pod --record --dry-run=server -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "not-nginx"}]}}'
|
||||
# Post-condition: valid-pod POD has image nginx
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
|
||||
# prove that yaml input works too
|
||||
YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n'
|
||||
kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}"
|
||||
@@ -689,7 +736,7 @@ run_create_secret_tests() {
|
||||
|
||||
|
||||
# check to make sure that replace correctly PUTs to a URL
|
||||
kubectl create configmap tester-update-cm -o json --dry-run | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps -f -
|
||||
kubectl create configmap tester-update-cm -o json --dry-run=client | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps -f -
|
||||
output_message=$(kubectl create configmap tester-update-cm --from-literal=key1=config1 -o json --dry-run | kubectl replace "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps/tester-update-cm -f -)
|
||||
# the message should show the body returned which will include a UID not present in the input
|
||||
kube::test::if_has_string "${output_message}" 'uid'
|
||||
@@ -832,6 +879,12 @@ run_service_accounts_tests() {
|
||||
kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts'
|
||||
|
||||
### Create a service account in a specific namespace
|
||||
# Pre-condition: service account does not exist
|
||||
kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \\\"test-service-account\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Dry-run command
|
||||
kubectl create serviceaccount test-service-account --dry-run=client --namespace=test-service-accounts
|
||||
kubectl create serviceaccount test-service-account --dry-run=server --namespace=test-service-accounts
|
||||
kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \\\"test-service-account\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Command
|
||||
kubectl create serviceaccount test-service-account --namespace=test-service-accounts
|
||||
# Post-condition: secret exists and has expected values
|
||||
@@ -893,7 +946,8 @@ run_service_tests() {
|
||||
# prove role=master
|
||||
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
|
||||
# Show dry-run works on running selector
|
||||
kubectl set selector services redis-master role=padawan --dry-run -o yaml "${kube_flags[@]}"
|
||||
kubectl set selector services redis-master role=padawan --dry-run=client -o yaml "${kube_flags[@]}"
|
||||
kubectl set selector services redis-master role=padawan --dry-run=server -o yaml "${kube_flags[@]}"
|
||||
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1
|
||||
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
|
||||
# --resource-version=<current-resource-version> succeeds
|
||||
@@ -994,6 +1048,10 @@ __EOF__
|
||||
### Create an ExternalName service
|
||||
# Pre-condition: Only the default kubernetes service exist
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
||||
# Dry-run command
|
||||
kubectl create service externalname beep-boop --dry-run=client --external-name bar.com
|
||||
kubectl create service externalname beep-boop --dry-run=server --external-name bar.com
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
||||
# Command
|
||||
kubectl create service externalname beep-boop --external-name bar.com
|
||||
# Post-condition: beep-boop service is created
|
||||
@@ -1013,6 +1071,13 @@ __EOF__
|
||||
### Create pod and service
|
||||
# Pre-condition: no pod exists
|
||||
kube::test::wait_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Pre-condition: Only the default kubernetes services exist
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
||||
# Dry-run command
|
||||
kubectl run testmetadata --image=nginx --port=80 --expose --dry-run=client --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } '
|
||||
kubectl run testmetadata --image=nginx --port=80 --expose --dry-run=server --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } '
|
||||
# Check only the default kubernetes services exist
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
|
||||
# Command
|
||||
kubectl run testmetadata --image=nginx --port=80 --expose --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } '
|
||||
# Check result
|
||||
@@ -1305,9 +1370,14 @@ run_namespace_tests() {
|
||||
|
||||
kube::log::status "Testing kubectl(v1:namespaces)"
|
||||
### Create a new namespace
|
||||
# Pre-condition: only the "default" namespace exists
|
||||
# The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test.
|
||||
# kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:'
|
||||
# Pre-condition: test namespace does not exist
|
||||
output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" ' not found'
|
||||
# Dry-run command
|
||||
kubectl create namespace my-namespace --dry-run=client
|
||||
kubectl create namespace my-namespace --dry-run=server
|
||||
output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" ' not found'
|
||||
# Command
|
||||
kubectl create namespace my-namespace
|
||||
# Post-condition: namespace 'my-namespace' is created.
|
||||
@@ -1325,6 +1395,21 @@ run_namespace_tests() {
|
||||
kube::test::if_has_string "${output_message}" 'warning: deleting cluster-scoped resources'
|
||||
kube::test::if_has_string "${output_message}" 'namespace "my-namespace" deleted'
|
||||
|
||||
### Quota
|
||||
kubectl create namespace quotas
|
||||
kube::test::get_object_assert 'namespaces/quotas' "{{$id_field}}" 'quotas'
|
||||
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Dry-run command
|
||||
kubectl create quota test-quota --dry-run=client --namespace=quotas
|
||||
kubectl create quota test-quota --dry-run=server --namespace=quotas
|
||||
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" ':'
|
||||
# Command
|
||||
kubectl create quota test-quota --namespace=quotas
|
||||
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" 'found:'
|
||||
# Clean up
|
||||
kubectl delete quota test-quota --namespace=quotas
|
||||
kubectl delete namespace quotas
|
||||
|
||||
######################
|
||||
# Pods in Namespaces #
|
||||
######################
|
||||
|
||||
@@ -18,6 +18,26 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Runs tests related to kubectl create --dry-run.
|
||||
run_kubectl_create_dry_run_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl create dry-run"
|
||||
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# dry-run create
|
||||
kubectl create --dry-run=client -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
kubectl create --dry-run=server -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# check no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
# Runs tests related to kubectl create --filename(-f) --selector(-l).
|
||||
run_kubectl_create_filter_tests() {
|
||||
set -o nounset
|
||||
|
||||
@@ -22,6 +22,7 @@ run_cluster_management_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing cluster-management commands"
|
||||
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
|
||||
@@ -90,13 +91,15 @@ __EOF__
|
||||
### kubectl cordon update with --dry-run does not mark node unschedulable
|
||||
# Pre-condition: node is schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
kubectl cordon "127.0.0.1" --dry-run
|
||||
kubectl cordon "127.0.0.1" --dry-run=client
|
||||
kubectl cordon "127.0.0.1" --dry-run=server
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
### kubectl drain update with --dry-run does not mark node unschedulable
|
||||
# Pre-condition: node is schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
kubectl drain "127.0.0.1" --dry-run
|
||||
kubectl drain "127.0.0.1" --dry-run=client
|
||||
kubectl drain "127.0.0.1" --dry-run=server
|
||||
# Post-condition: node still exists, node is still schedulable
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
@@ -106,6 +109,11 @@ __EOF__
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
# Pre-condition: test-pod-1 and test-pod-2 exist
|
||||
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
||||
# dry-run command
|
||||
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=client
|
||||
kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=server
|
||||
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
||||
# command
|
||||
kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
|
||||
# only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
|
||||
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
|
||||
@@ -118,7 +126,9 @@ __EOF__
|
||||
### kubectl uncordon update with --dry-run is a no-op
|
||||
# Pre-condition: node is already schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
response=$(kubectl uncordon "127.0.0.1" --dry-run)
|
||||
response=$(kubectl uncordon "127.0.0.1" --dry-run=client)
|
||||
kube::test::if_has_string "${response}" 'already uncordoned'
|
||||
response=$(kubectl uncordon "127.0.0.1" --dry-run=server)
|
||||
kube::test::if_has_string "${response}" 'already uncordoned'
|
||||
# Post-condition: node is still schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
@@ -29,6 +29,14 @@ run_clusterroles_tests() {
|
||||
kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin'
|
||||
kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'
|
||||
|
||||
# Pre-condition: no ClusterRole pod-admin exists
|
||||
output_message=$(! kubectl get clusterrole pod-admin 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'clusterroles.rbac.authorization.k8s.io "pod-admin" not found'
|
||||
# Dry-run test `kubectl create clusterrole`
|
||||
kubectl create "${kube_flags[@]:?}" clusterrole pod-admin --dry-run=client --verb=* --resource=pods
|
||||
kubectl create "${kube_flags[@]:?}" clusterrole pod-admin --dry-run=server --verb=* --resource=pods
|
||||
output_message=$(! kubectl get clusterrole pod-admin 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'clusterroles.rbac.authorization.k8s.io "pod-admin" not found'
|
||||
# test `kubectl create clusterrole`
|
||||
kubectl create "${kube_flags[@]:?}" clusterrole pod-admin --verb=* --resource=pods
|
||||
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
|
||||
@@ -55,10 +63,21 @@ run_clusterroles_tests() {
|
||||
kubectl create "${kube_flags[@]}" clusterrole aggregation-reader --aggregation-rule="foo1=foo2"
|
||||
kube::test::get_object_assert clusterrole/aggregation-reader "{{${id_field:?}}}" 'aggregation-reader'
|
||||
|
||||
# Pre-condition: no ClusterRoleBinding super-admin exists
|
||||
output_message=$(! kubectl get clusterrolebinding super-admin 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found'
|
||||
# Dry-run test `kubectl create clusterrolebinding`
|
||||
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --dry-run=client --clusterrole=admin --user=super-admin
|
||||
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --dry-run=server --clusterrole=admin --user=super-admin
|
||||
output_message=$(! kubectl get clusterrolebinding super-admin 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found'
|
||||
# test `kubectl create clusterrolebinding`
|
||||
# test `kubectl set subject clusterrolebinding`
|
||||
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --clusterrole=admin --user=super-admin
|
||||
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
|
||||
kubectl set subject --dry-run=client "${kube_flags[@]}" clusterrolebinding super-admin --user=foo
|
||||
kubectl set subject --dry-run=server "${kube_flags[@]}" clusterrolebinding super-admin --user=foo
|
||||
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
|
||||
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-admin --user=foo
|
||||
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:'
|
||||
kubectl create "${kube_flags[@]}" clusterrolebinding multi-users --clusterrole=admin --user=user-1 --user=user-2
|
||||
@@ -86,6 +105,10 @@ run_clusterroles_tests() {
|
||||
|
||||
# test `kubectl create rolebinding`
|
||||
# test `kubectl set subject rolebinding`
|
||||
kubectl create "${kube_flags[@]}" rolebinding admin --dry-run=client --clusterrole=admin --user=default-admin
|
||||
kubectl create "${kube_flags[@]}" rolebinding admin --dry-run=server --clusterrole=admin --user=default-admin
|
||||
output_message=$(! kubectl get rolebinding/admin 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" ' not found'
|
||||
kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin
|
||||
kube::test::get_object_assert rolebinding/admin "{{.roleRef.kind}}" 'ClusterRole'
|
||||
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:'
|
||||
@@ -122,6 +145,11 @@ run_role_tests() {
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing role"
|
||||
|
||||
# Dry-run create
|
||||
kubectl create "${kube_flags[@]}" role pod-admin --dry-run=client --verb=* --resource=pods
|
||||
kubectl create "${kube_flags[@]}" role pod-admin --dry-run=server --verb=* --resource=pods
|
||||
output_message=$(! kubectl get role/pod-admin 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" ' not found'
|
||||
# Create Role from command (only resource)
|
||||
kubectl create "${kube_flags[@]}" role pod-admin --verb=* --resource=pods
|
||||
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
|
||||
|
||||
@@ -25,6 +25,12 @@ run_kubectl_run_tests() {
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl run"
|
||||
|
||||
# Command with dry-run
|
||||
kubectl run --dry-run=client nginx-extensions "--image=${IMAGE_NGINX}" "${kube_flags[@]:?}"
|
||||
kubectl run --dry-run=server nginx-extensions "--image=${IMAGE_NGINX}" "${kube_flags[@]:?}"
|
||||
# Post-Condition: no Pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
# Pre-Condition: no Pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
|
||||
Reference in New Issue
Block a user