diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index 428268c8ca9..3929e20ee57 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -1838,7 +1838,7 @@ run_non_native_resource_tests() { kubectl "${kube_flags[@]}" delete resources myobj --cascade=true # Make sure it's gone - kube::test::get_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" '' # Test that we can create a new resource of type Foo kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}" @@ -1919,7 +1919,7 @@ run_non_native_resource_tests() { kubectl "${kube_flags[@]}" delete foos test --cascade=true # Make sure it's gone - kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" '' # Test that we can create a new resource of type Bar kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}" diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 30b1872e3af..043b6bc3a47 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -302,12 +302,12 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { found++ // if we're here, it means that cascade=false (not the default), so we should orphan as requested - orphan := true options := &metav1.DeleteOptions{} if o.GracePeriod >= 0 { options = metav1.NewDeleteOptions(int64(o.GracePeriod)) } - options.OrphanDependents = &orphan + policy := metav1.DeletePropagationOrphan + options.PropagationPolicy = &policy return o.deleteResource(info, options) }) if err != nil { @@ -350,8 +350,8 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { } func (o *DeleteOptions) cascadingDeleteResource(info *resource.Info) error { - falseVar := false - return o.deleteResource(info, &metav1.DeleteOptions{OrphanDependents: &falseVar}) + policy := metav1.DeletePropagationForeground + return o.deleteResource(info, &metav1.DeleteOptions{PropagationPolicy: &policy}) } func (o *DeleteOptions) deleteResource(info *resource.Info, deleteOptions *metav1.DeleteOptions) error { diff --git a/pkg/kubectl/cmd/delete_test.go b/pkg/kubectl/cmd/delete_test.go index 38ce65a22c7..55a4d915377 100644 --- a/pkg/kubectl/cmd/delete_test.go +++ b/pkg/kubectl/cmd/delete_test.go @@ -104,17 +104,17 @@ func TestDeleteObjectByTuple(t *testing.T) { } } -func hasExpectedOrphanDependents(body io.ReadCloser, expectedOrphanDependents *bool) bool { - if body == nil || expectedOrphanDependents == nil { - return body == nil && expectedOrphanDependents == nil +func hasExpectedPropagationPolicy(body io.ReadCloser, policy *metav1.DeletionPropagation) bool { + if body == nil || policy == nil { + return body == nil && policy == nil } var parsedBody metav1.DeleteOptions rawBody, _ := ioutil.ReadAll(body) json.Unmarshal(rawBody, &parsedBody) - if parsedBody.OrphanDependents == nil { + if parsedBody.PropagationPolicy == nil { return false } - return *expectedOrphanDependents == *parsedBody.OrphanDependents + return *policy == *parsedBody.PropagationPolicy } // Tests that DeleteOptions.OrphanDependents is appropriately set while deleting objects. @@ -127,13 +127,13 @@ func TestOrphanDependentsInDeleteObject(t *testing.T) { codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - var expectedOrphanDependents *bool + var policy *metav1.DeletionPropagation tf.UnstructuredClient = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { switch p, m, b := req.URL.Path, req.Method, req.Body; { - case p == "/namespaces/test/secrets/mysecret" && m == "DELETE" && hasExpectedOrphanDependents(b, expectedOrphanDependents): + case p == "/namespaces/test/secrets/mysecret" && m == "DELETE" && hasExpectedPropagationPolicy(b, policy): return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil default: @@ -143,9 +143,9 @@ func TestOrphanDependentsInDeleteObject(t *testing.T) { } tf.Namespace = "test" - // DeleteOptions.OrphanDependents should be false, when cascade is true (default). - falseVar := false - expectedOrphanDependents = &falseVar + // DeleteOptions.PropagationPolicy should be Foreground, when cascade is true (default). + foregroundPolicy := metav1.DeletePropagationForeground + policy = &foregroundPolicy streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) cmd.Flags().Set("namespace", "test") @@ -156,8 +156,8 @@ func TestOrphanDependentsInDeleteObject(t *testing.T) { } // Test that delete options should be set to orphan when cascade is false. - trueVar := true - expectedOrphanDependents = &trueVar + orphanPolicy := metav1.DeletePropagationOrphan + policy = &orphanPolicy streams, _, buf, _ = genericclioptions.NewTestIOStreams() cmd = NewCmdDelete(tf, streams) cmd.Flags().Set("namespace", "test") diff --git a/pkg/kubectl/delete.go b/pkg/kubectl/delete.go index 0d1f8a46ea6..adeb829a14a 100644 --- a/pkg/kubectl/delete.go +++ b/pkg/kubectl/delete.go @@ -211,8 +211,10 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout return err } } - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + // Using a background deletion policy because the replication controller + // has already been scaled down. + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return rc.Delete(name, deleteOptions) } @@ -282,8 +284,10 @@ func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Durati } } - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + // Using a background deletion policy because the replica set has already + // been scaled down. + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return rsc.Delete(name, deleteOptions) } @@ -319,8 +323,10 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio return err } - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + // Using a background deletion policy because the daemon set has already + // been scaled down. + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return reaper.client.DaemonSets(namespace).Delete(name, deleteOptions) } @@ -347,8 +353,10 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat // TODO: Cleanup volumes? We don't want to accidentally delete volumes from // stop, so just leave this up to the statefulset. - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + // Using a background deletion policy because the stateful set has already + // been scaled down. + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return statefulsets.Delete(name, deleteOptions) } @@ -394,8 +402,8 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra return utilerrors.NewAggregate(errList) } // once we have all the pods removed we can safely remove the job itself. - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return jobs.Delete(name, deleteOptions) } @@ -415,9 +423,9 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati return err } if deployment.Initializers != nil { - var falseVar = false - nonOrphanOption := metav1.DeleteOptions{OrphanDependents: &falseVar} - return deployments.Delete(name, &nonOrphanOption) + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} + return deployments.Delete(name, deleteOptions) } // Use observedGeneration to determine if the deployment controller noticed the pause. @@ -459,9 +467,9 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati // Delete deployment at the end. // Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry. - var falseVar = false - nonOrphanOption := metav1.DeleteOptions{OrphanDependents: &falseVar} - return deployments.Delete(name, &nonOrphanOption) + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} + return deployments.Delete(name, deleteOptions) } type updateDeploymentFunc func(d *extensions.Deployment) diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index 084dce74d49..d0a88598652 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -549,8 +549,8 @@ func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationContro rc.Name = newName rc.ResourceVersion = "" // First delete the oldName RC and orphan its pods. - trueVar := true - err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &metav1.DeleteOptions{OrphanDependents: &trueVar}) + policy := metav1.DeletePropagationOrphan + err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &metav1.DeleteOptions{PropagationPolicy: &policy}) if err != nil && !errors.IsNotFound(err) { return err }