mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #89848 from seans3/kubectl-apply-fix
Even with build error, kubectl apply should apply all valid resources
This commit is contained in:
commit
c1f1b1bceb
12
hack/testdata/multi-resource-2.yaml
vendored
Normal file
12
hack/testdata/multi-resource-2.yaml
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
# Simple test that errors should not block apply of valid
|
||||
# resources. The ConfigMap should successfully apply, while
|
||||
# the custom resource fails because the CRD is missing.
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: foo
|
||||
---
|
||||
apiVersion: example.com/v1
|
||||
kind: Bogus
|
||||
metadata:
|
||||
name: foo
|
30
hack/testdata/multi-resource-3.yaml
vendored
Normal file
30
hack/testdata/multi-resource-3.yaml
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
# Test failures do not block the apply for valid resources.
|
||||
# pod-a and pod-c should apply, while POD-B is invalid
|
||||
# because of the capitialization.
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-a
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-pause
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: POD-B
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-pause
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-c
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-pause
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
|
20
hack/testdata/multi-resource-4.yaml
vendored
Normal file
20
hack/testdata/multi-resource-4.yaml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# Tests that initial failures to not block subsequent applies.
|
||||
# Initial apply for Widget fails, since CRD is not applied yet,
|
||||
# but the CRD apply should succeed. Subsequent custom resource
|
||||
# apply of Widget should succeed.
|
||||
apiVersion: example.com/v1
|
||||
kind: Widget
|
||||
metadata:
|
||||
name: foo
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: widgets.example.com
|
||||
spec:
|
||||
group: example.com
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: widgets
|
||||
kind: Widget
|
@ -316,13 +316,14 @@ func isIncompatibleServerError(err error) bool {
|
||||
return err.(*errors.StatusError).Status().Code == http.StatusUnsupportedMediaType
|
||||
}
|
||||
|
||||
// GetObjects returns a (possibly cached) version of all the objects to apply
|
||||
// as a slice of pointer to resource.Info. The resource.Info contains the object
|
||||
// and some other denormalized data. This function should not be called until
|
||||
// AFTER the "complete" and "validate" methods have been called to ensure that
|
||||
// the ApplyOptions is filled in and valid. Returns an error if the resource
|
||||
// builder returns an error retrieving the objects.
|
||||
// GetObjects returns a (possibly cached) version of all the valid objects to apply
|
||||
// as a slice of pointer to resource.Info and an error if one or more occurred.
|
||||
// IMPORTANT: This function can return both valid objects AND an error, since
|
||||
// "ContinueOnError" is set on the builder. This function should not be called
|
||||
// until AFTER the "complete" and "validate" methods have been called to ensure that
|
||||
// the ApplyOptions is filled in and valid.
|
||||
func (o *ApplyOptions) GetObjects() ([]*resource.Info, error) {
|
||||
var err error = nil
|
||||
if !o.objectsCached {
|
||||
// include the uninitialized objects by default if --prune is true
|
||||
// unless explicitly set --include-uninitialized=false
|
||||
@ -335,17 +336,10 @@ func (o *ApplyOptions) GetObjects() ([]*resource.Info, error) {
|
||||
LabelSelectorParam(o.Selector).
|
||||
Flatten().
|
||||
Do()
|
||||
if err := r.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos, err := r.Infos()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.objects = infos
|
||||
o.objects, err = r.Infos()
|
||||
o.objectsCached = true
|
||||
}
|
||||
return o.objects, nil
|
||||
return o.objects, err
|
||||
}
|
||||
|
||||
// SetObjects stores the set of objects (as resource.Info) to be
|
||||
|
@ -257,21 +257,60 @@ __EOF__
|
||||
# cleanup
|
||||
kubectl delete --kustomize hack/testdata/kustomize
|
||||
|
||||
## kubectl apply multiple resources with initial failure.
|
||||
## kubectl apply multiple resources with one failure during apply phase.
|
||||
# Pre-Condition: namepace does not exist and no POD exists
|
||||
output_message=$(! kubectl get namespace multi-resource-ns 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'namespaces "multi-resource-ns" not found'
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# First pass, namespace is created, but pod is not (since namespace does not exist yet).
|
||||
output_message=$(! kubectl apply -f hack/testdata/multi-resource.yaml 2>&1 "${kube_flags[@]:?}")
|
||||
output_message=$(! kubectl apply -f hack/testdata/multi-resource-1.yaml 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'namespaces "multi-resource-ns" not found'
|
||||
output_message=$(! kubectl get pods test-pod -n multi-resource-ns 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "test-pod" not found'
|
||||
# Second pass, pod is created (now that namespace exists).
|
||||
kubectl apply -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}"
|
||||
kubectl apply -f hack/testdata/multi-resource-1.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert 'pods test-pod -n multi-resource-ns' "{{${id_field}}}" 'test-pod'
|
||||
# cleanup
|
||||
kubectl delete -f hack/testdata/multi-resource.yaml "${kube_flags[@]:?}"
|
||||
kubectl delete -f hack/testdata/multi-resource-1.yaml "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply multiple resources with one failure during builder phase.
|
||||
# Pre-Condition: No configmaps
|
||||
kube::test::get_object_assert configmaps "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Apply a configmap and a bogus custom resource.
|
||||
output_message=$(! kubectl apply -f hack/testdata/multi-resource-2.yaml 2>&1 "${kube_flags[@]:?}")
|
||||
# Should be error message from bogus custom resource.
|
||||
kube::test::if_has_string "${output_message}" 'no matches for kind "Bogus" in version "example.com/v1"'
|
||||
# ConfigMap should have been created even with custom resource error.
|
||||
kube::test::get_object_assert 'configmaps foo' "{{${id_field}}}" 'foo'
|
||||
# cleanup
|
||||
kubectl delete configmaps foo "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply multiple resources with one failure during builder phase.
|
||||
# Pre-Condition: No pods exist.
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Applies three pods, one of which is invalid (POD-B), two succeed (pod-a, pod-c).
|
||||
output_message=$(! kubectl apply -f hack/testdata/multi-resource-3.yaml 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'The Pod "POD-B" is invalid'
|
||||
kube::test::get_object_assert 'pods pod-a' "{{${id_field}}}" 'pod-a'
|
||||
kube::test::get_object_assert 'pods pod-c' "{{${id_field}}}" 'pod-c'
|
||||
# cleanup
|
||||
kubectl delete pod pod-a pod-c "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
## kubectl apply multiple resources with one failure during apply phase.
|
||||
# Pre-Condition: crd does not exist, and custom resource does not exist.
|
||||
kube::test::get_object_assert crds "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# First pass, custom resource fails, but crd apply succeeds.
|
||||
output_message=$(! kubectl apply -f hack/testdata/multi-resource-4.yaml 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'no matches for kind "Widget" in version "example.com/v1"'
|
||||
output_message=$(! kubectl get widgets foo 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'widgets.example.com "foo" not found'
|
||||
kube::test::get_object_assert 'crds widgets.example.com' "{{${id_field}}}" 'widgets.example.com'
|
||||
# Second pass, custom resource is created (now that crd exists).
|
||||
kubectl apply -f hack/testdata/multi-resource-4.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert 'widget foo' "{{${id_field}}}" 'foo'
|
||||
# cleanup
|
||||
kubectl delete -f hack/testdata/multi-resource-4.yaml "${kube_flags[@]:?}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
|
Loading…
Reference in New Issue
Block a user