From dbfb6c01692ef4427399dad823a0f3f2ac42d5ee Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Tue, 23 Feb 2016 13:46:13 -0800 Subject: [PATCH] Set RC's pod template TerminationGracePeriodSeconds to 0 in test-cmd.sh test data --- hack/test-cmd.sh | 18 +++++------ hack/testdata/frontend-controller.yaml | 44 ++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 9 deletions(-) create mode 100644 hack/testdata/frontend-controller.yaml diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index 9da154f1b3c..fd60f03b7ae 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -697,10 +697,10 @@ __EOF__ ## 6. kubectl autoscale --save-config should generate configuration annotation # Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" ! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] # Command: autoscale rc "frontend" - kubectl autoscale -f examples/guestbook/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2 + kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2 # Post-Condition: hpa "frontend" has configuration annotation [[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] # Clean up @@ -989,7 +989,7 @@ __EOF__ # Pre-condition: no replication controller exists kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' # Command - kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" kubectl delete rc frontend "${kube_flags[@]}" # Post-condition: no pods from frontend controller kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" '' @@ -998,7 +998,7 @@ __EOF__ # Pre-condition: no replication controller exists kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' # Command - kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" # Post-condition: frontend replication controller is created kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' # Describe command should print detailed information @@ -1034,7 +1034,7 @@ __EOF__ # Pre-condition: 3 replicas kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' # Command - kubectl scale --replicas=2 -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}" + kubectl scale --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" # Post-condition: 2 replicas kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' # Clean-up @@ -1081,7 +1081,7 @@ __EOF__ kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}" ### Expose replication controller as service - kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" # Pre-condition: 3 replicas kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' # Command @@ -1151,7 +1151,7 @@ __EOF__ # Pre-condition: no replication controller exists kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' # Command - kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" kubectl create -f examples/guestbook/redis-slave-controller.yaml "${kube_flags[@]}" # Post-condition: frontend and redis-slave kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' @@ -1168,10 +1168,10 @@ __EOF__ # Pre-condition: no replication controller exists kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' # Command - kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' # autoscale 1~2 pods, CPU utilization 70%, rc specified by file - kubectl autoscale -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 + kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70' kubectl delete hpa frontend "${kube_flags[@]}" # autoscale 2~3 pods, default CPU utilization (80%), rc specified by name diff --git a/hack/testdata/frontend-controller.yaml b/hack/testdata/frontend-controller.yaml new file mode 100644 index 00000000000..d3eac52f6ee --- /dev/null +++ b/hack/testdata/frontend-controller.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: frontend + # these labels can be applied automatically + # from the labels in the pod template if not set + # labels: + # app: guestbook + # tier: frontend +spec: + # this replicas value is default + # modify it according to your case + replicas: 3 + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + # Setting terminationGracePeriodSeconds to zero to delete pods immediately on delete request, + # since in tests we usually check and expect the pods being deleted right after deletion. + terminationGracePeriodSeconds: 0 + containers: + - name: php-redis + image: gcr.io/google_samples/gb-frontend:v4 + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access environment variables to find service host + # info, comment out the 'value: dns' line above, and uncomment the + # line below. + # value: env + ports: + - containerPort: 80