From 748ad74245778a93a68652cd7e804d6a1e13ad9d Mon Sep 17 00:00:00 2001 From: "Sean R. Sullivan" Date: Fri, 3 Apr 2020 19:50:25 -0700 Subject: [PATCH] Even with build error, kubectl apply should apply all valid resources --- ...ti-resource.yaml => multi-resource-1.yaml} | 0 hack/testdata/multi-resource-2.yaml | 12 +++++ hack/testdata/multi-resource-3.yaml | 30 ++++++++++++ hack/testdata/multi-resource-4.yaml | 20 ++++++++ .../src/k8s.io/kubectl/pkg/cmd/apply/apply.go | 24 ++++------ test/cmd/apply.sh | 47 +++++++++++++++++-- 6 files changed, 114 insertions(+), 19 deletions(-) rename hack/testdata/{multi-resource.yaml => multi-resource-1.yaml} (100%) create mode 100644 hack/testdata/multi-resource-2.yaml create mode 100644 hack/testdata/multi-resource-3.yaml create mode 100644 hack/testdata/multi-resource-4.yaml diff --git a/hack/testdata/multi-resource.yaml b/hack/testdata/multi-resource-1.yaml similarity index 100% rename from hack/testdata/multi-resource.yaml rename to hack/testdata/multi-resource-1.yaml diff --git a/hack/testdata/multi-resource-2.yaml b/hack/testdata/multi-resource-2.yaml new file mode 100644 index 00000000000..4919aa811e9 --- /dev/null +++ b/hack/testdata/multi-resource-2.yaml @@ -0,0 +1,12 @@ +# Simple test that errors should not block apply of valid +# resources. The ConfigMap should successfully apply, while +# the custom resource fails because the CRD is missing. +apiVersion: v1 +kind: ConfigMap +metadata: + name: foo +--- +apiVersion: example.com/v1 +kind: Bogus +metadata: + name: foo diff --git a/hack/testdata/multi-resource-3.yaml b/hack/testdata/multi-resource-3.yaml new file mode 100644 index 00000000000..10929c63a82 --- /dev/null +++ b/hack/testdata/multi-resource-3.yaml @@ -0,0 +1,30 @@ +# Test failures do not block the apply for valid resources. +# pod-a and pod-c should apply, while POD-B is invalid +# because of the capitialization. +apiVersion: v1 +kind: Pod +metadata: + name: pod-a +spec: + containers: + - name: kubernetes-pause + image: k8s.gcr.io/pause:2.0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: POD-B +spec: + containers: + - name: kubernetes-pause + image: k8s.gcr.io/pause:2.0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: pod-c +spec: + containers: + - name: kubernetes-pause + image: k8s.gcr.io/pause:2.0 + diff --git a/hack/testdata/multi-resource-4.yaml b/hack/testdata/multi-resource-4.yaml new file mode 100644 index 00000000000..1c0521dfb8c --- /dev/null +++ b/hack/testdata/multi-resource-4.yaml @@ -0,0 +1,20 @@ +# Tests that initial failures to not block subsequent applies. +# Initial apply for Widget fails, since CRD is not applied yet, +# but the CRD apply should succeed. Subsequent custom resource +# apply of Widget should succeed. +apiVersion: example.com/v1 +kind: Widget +metadata: + name: foo +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: widgets.example.com +spec: + group: example.com + version: v1 + scope: Namespaced + names: + plural: widgets + kind: Widget diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go index 1558243910e..edae087f3f8 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go @@ -316,13 +316,14 @@ func isIncompatibleServerError(err error) bool { return err.(*errors.StatusError).Status().Code == http.StatusUnsupportedMediaType } -// GetObjects returns a (possibly cached) version of all the objects to apply -// as a slice of pointer to resource.Info. The resource.Info contains the object -// and some other denormalized data. This function should not be called until -// AFTER the "complete" and "validate" methods have been called to ensure that -// the ApplyOptions is filled in and valid. Returns an error if the resource -// builder returns an error retrieving the objects. +// GetObjects returns a (possibly cached) version of all the valid objects to apply +// as a slice of pointer to resource.Info and an error if one or more occurred. +// IMPORTANT: This function can return both valid objects AND an error, since +// "ContinueOnError" is set on the builder. This function should not be called +// until AFTER the "complete" and "validate" methods have been called to ensure that +// the ApplyOptions is filled in and valid. func (o *ApplyOptions) GetObjects() ([]*resource.Info, error) { + var err error = nil if !o.objectsCached { // include the uninitialized objects by default if --prune is true // unless explicitly set --include-uninitialized=false @@ -335,17 +336,10 @@ func (o *ApplyOptions) GetObjects() ([]*resource.Info, error) { LabelSelectorParam(o.Selector). Flatten(). Do() - if err := r.Err(); err != nil { - return nil, err - } - infos, err := r.Infos() - if err != nil { - return nil, err - } - o.objects = infos + o.objects, err = r.Infos() o.objectsCached = true } - return o.objects, nil + return o.objects, err } // SetObjects stores the set of objects (as resource.Info) to be diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index 57633334cdc..b1e47afaef7 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -257,21 +257,60 @@ __EOF__ # cleanup kubectl delete --kustomize hack/testdata/kustomize - ## kubectl apply multiple resources with initial failure. + ## kubectl apply multiple resources with one failure during apply phase. # Pre-Condition: namepace does not exist and no POD exists output_message=$(! kubectl get namespace multi-resource-ns 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'namespaces "multi-resource-ns" not found' kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # First pass, namespace is created, but pod is not (since namespace does not exist yet). - output_message=$(! kubectl apply -f hack/testdata/multi-resource.yaml 2>&1 "${kube_flags[@]:?}") + output_message=$(! kubectl apply -f hack/testdata/multi-resource-1.yaml 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'namespaces "multi-resource-ns" not found' output_message=$(! kubectl get pods test-pod -n multi-resource-ns 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "test-pod" not found' # Second pass, pod is created (now that namespace exists). - kubectl apply -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}" + kubectl apply -f hack/testdata/multi-resource-1.yaml "${kube_flags[@]:?}" kube::test::get_object_assert 'pods test-pod -n multi-resource-ns' "{{${id_field}}}" 'test-pod' # cleanup - kubectl delete -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}" + kubectl delete -f hack/testdata/multi-resource-1.yaml "${kube_flags[@]:?}" + + ## kubectl apply multiple resources with one failure during builder phase. + # Pre-Condition: No configmaps + kube::test::get_object_assert configmaps "{{range.items}}{{${id_field:?}}}:{{end}}" '' + # Apply a configmap and a bogus custom resource. + output_message=$(! kubectl apply -f hack/testdata/multi-resource-2.yaml 2>&1 "${kube_flags[@]:?}") + # Should be error message from bogus custom resource. + kube::test::if_has_string "${output_message}" 'no matches for kind "Bogus" in version "example.com/v1"' + # ConfigMap should have been created even with custom resource error. + kube::test::get_object_assert 'configmaps foo' "{{${id_field}}}" 'foo' + # cleanup + kubectl delete configmaps foo "${kube_flags[@]:?}" + + ## kubectl apply multiple resources with one failure during builder phase. + # Pre-Condition: No pods exist. + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' + # Applies three pods, one of which is invalid (POD-B), two succeed (pod-a, pod-c). + output_message=$(! kubectl apply -f hack/testdata/multi-resource-3.yaml 2>&1 "${kube_flags[@]:?}") + kube::test::if_has_string "${output_message}" 'The Pod "POD-B" is invalid' + kube::test::get_object_assert 'pods pod-a' "{{${id_field}}}" 'pod-a' + kube::test::get_object_assert 'pods pod-c' "{{${id_field}}}" 'pod-c' + # cleanup + kubectl delete pod pod-a pod-c "${kube_flags[@]:?}" + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' + + ## kubectl apply multiple resources with one failure during apply phase. + # Pre-Condition: crd does not exist, and custom resource does not exist. + kube::test::get_object_assert crds "{{range.items}}{{${id_field:?}}}:{{end}}" '' + # First pass, custom resource fails, but crd apply succeeds. + output_message=$(! kubectl apply -f hack/testdata/multi-resource-4.yaml 2>&1 "${kube_flags[@]:?}") + kube::test::if_has_string "${output_message}" 'no matches for kind "Widget" in version "example.com/v1"' + output_message=$(! kubectl get widgets foo 2>&1 "${kube_flags[@]:?}") + kube::test::if_has_string "${output_message}" 'widgets.example.com "foo" not found' + kube::test::get_object_assert 'crds widgets.example.com' "{{${id_field}}}" 'widgets.example.com' + # Second pass, custom resource is created (now that crd exists). + kubectl apply -f hack/testdata/multi-resource-4.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert 'widget foo' "{{${id_field}}}" 'foo' + # cleanup + kubectl delete -f hack/testdata/multi-resource-4.yaml "${kube_flags[@]:?}" set +o nounset set +o errexit