From 76f85943787a5901a34a314a935712177edd2db0 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sun, 1 Mar 2020 09:34:30 -0800 Subject: [PATCH] more artisanal fixes Most of these could have been refactored automatically but it wouldn't have been uglier. The unsophisticated tooling left lots of unnecessary struct -> pointer -> struct transitions. --- cmd/kubeadm/app/phases/upgrade/health.go | 5 +--- cmd/kubeadm/app/util/apiclient/idempotency.go | 10 ++------ pkg/controller/bootstrap/tokencleaner.go | 4 +-- .../serviceaccount/tokens_controller.go | 10 ++++---- pkg/controller/testutil/test_utils.go | 14 +++++------ .../ttlafterfinished_controller.go | 2 +- pkg/kubelet/status/status_manager.go | 9 ++++--- .../autoregister/autoregister_controller.go | 2 +- staging/src/k8s.io/kubectl/pkg/drain/drain.go | 9 ++++--- test/e2e/apimachinery/garbage_collector.go | 20 ++++++++------- test/e2e/apps/deployment.go | 2 +- test/e2e/auth/audit.go | 2 +- test/e2e/auth/audit_dynamic.go | 2 +- test/e2e/common/volumes.go | 1 - test/e2e/framework/pods.go | 2 +- test/e2e/framework/util.go | 4 +-- test/e2e/network/service.go | 2 +- test/e2e/storage/pd.go | 10 ++++---- test/e2e_node/cpu_manager_test.go | 2 +- test/e2e_node/critical_pod_test.go | 8 +++--- test/e2e_node/device_plugin_test.go | 4 +-- test/e2e_node/dockershim_checkpoint_test.go | 2 +- test/e2e_node/eviction_test.go | 2 +- test/e2e_node/garbage_collector_test.go | 2 +- test/e2e_node/gpu_device_plugin_test.go | 4 +-- test/e2e_node/node_perf_test.go | 2 +- test/e2e_node/resource_metrics_test.go | 4 +-- .../admissionwebhook/admission_test.go | 4 +-- test/integration/daemonset/daemonset_test.go | 2 +- test/integration/evictions/evictions_test.go | 8 +++--- .../garbage_collector_test.go | 25 ++++++++----------- .../volumescheduling/volume_binding_test.go | 8 +++--- test/utils/delete_resources.go | 4 +-- test/utils/runners.go | 4 +-- 34 files changed, 94 insertions(+), 101 deletions(-) diff --git a/cmd/kubeadm/app/phases/upgrade/health.go b/cmd/kubeadm/app/phases/upgrade/health.go index 64e57045409..a0cb49350a4 100644 --- a/cmd/kubeadm/app/phases/upgrade/health.go +++ b/cmd/kubeadm/app/phases/upgrade/health.go @@ -200,10 +200,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) func deleteHealthCheckJob(client clientset.Interface, ns, jobName string) error { klog.V(2).Infof("Deleting Job %q in the namespace %q", jobName, ns) propagation := metav1.DeletePropagationForeground - deleteOptions := &metav1.DeleteOptions{ - PropagationPolicy: &propagation, - } - if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, deleteOptions); err != nil { + if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, metav1.DeleteOptions{PropagationPolicy: &propagation}); err != nil { return errors.Wrapf(err, "could not delete Job %q in the namespace %q", jobName, ns) } return nil diff --git a/cmd/kubeadm/app/util/apiclient/idempotency.go b/cmd/kubeadm/app/util/apiclient/idempotency.go index fa15260e69a..e8ffee8c06c 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency.go @@ -194,19 +194,13 @@ func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) err // DeleteDaemonSetForeground deletes the specified DaemonSet in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted func DeleteDaemonSetForeground(client clientset.Interface, namespace, name string) error { foregroundDelete := metav1.DeletePropagationForeground - deleteOptions := &metav1.DeleteOptions{ - PropagationPolicy: &foregroundDelete, - } - return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, deleteOptions) + return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete}) } // DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted func DeleteDeploymentForeground(client clientset.Interface, namespace, name string) error { foregroundDelete := metav1.DeletePropagationForeground - deleteOptions := &metav1.DeleteOptions{ - PropagationPolicy: &foregroundDelete, - } - return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, deleteOptions) + return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete}) } // CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. diff --git a/pkg/controller/bootstrap/tokencleaner.go b/pkg/controller/bootstrap/tokencleaner.go index 659a4e243ee..35a06a33868 100644 --- a/pkg/controller/bootstrap/tokencleaner.go +++ b/pkg/controller/bootstrap/tokencleaner.go @@ -192,9 +192,9 @@ func (tc *TokenCleaner) evalSecret(o interface{}) { ttl, alreadyExpired := bootstrapsecretutil.GetExpiration(secret, time.Now()) if alreadyExpired { klog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name) - var options *metav1.DeleteOptions + var options metav1.DeleteOptions if len(secret.UID) > 0 { - options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}} + options.Preconditions = &metav1.Preconditions{UID: &secret.UID} } err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, options) // NotFound isn't a real error (it's already been deleted) diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index 896ab4b1660..77559c5cae3 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -342,9 +342,9 @@ func (e *TokensController) deleteTokens(serviceAccount *v1.ServiceAccount) ( /*r } func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry*/ bool, error) { - var opts *metav1.DeleteOptions + var opts metav1.DeleteOptions if len(uid) > 0 { - opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}} + opts.Preconditions = &metav1.Preconditions{UID: &uid} } err := e.client.CoreV1().Secrets(ns).Delete(context.TODO(), name, opts) // NotFound doesn't need a retry (it's already been deleted) @@ -460,9 +460,9 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou if !addedReference { // we weren't able to use the token, try to clean it up. klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err) - deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}} - if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); deleteErr != nil { - klog.Error(deleteErr) // if we fail, just log it + deleteOpts := metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}} + if err := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); err != nil { + klog.Error(err) // if we fail, just log it } } diff --git a/pkg/controller/testutil/test_utils.go b/pkg/controller/testutil/test_utils.go index 3f7b8ab0ea8..4e1361b477a 100644 --- a/pkg/controller/testutil/test_utils.go +++ b/pkg/controller/testutil/test_utils.go @@ -26,28 +26,26 @@ import ( "testing" "time" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" - - "k8s.io/apimachinery/pkg/util/clock" - ref "k8s.io/client-go/tools/reference" - - v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/fake" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + ref "k8s.io/client-go/tools/reference" + "k8s.io/klog" "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" utilnode "k8s.io/kubernetes/pkg/util/node" jsonpatch "github.com/evanphx/json-patch" - "k8s.io/klog" ) var ( @@ -183,7 +181,7 @@ func (m *FakeNodeHandler) List(_ context.Context, opts metav1.ListOptions) (*v1. } // Delete deletes a Node from the fake store. -func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt *metav1.DeleteOptions) error { +func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt metav1.DeleteOptions) error { m.lock.Lock() defer func() { m.RequestCount++ @@ -197,7 +195,7 @@ func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt *metav1.Delet } // DeleteCollection deletes a collection of Nodes from the fake store. -func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt *metav1.DeleteOptions, listOpts metav1.ListOptions) error { +func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt metav1.DeleteOptions, listOpts metav1.ListOptions) error { return nil } diff --git a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go index c9370a43cdb..be2866192b7 100644 --- a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go +++ b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go @@ -230,7 +230,7 @@ func (tc *Controller) processJob(key string) error { } // Cascade deletes the Jobs if TTL truly expires. policy := metav1.DeletePropagationForeground - options := &metav1.DeleteOptions{ + options := metav1.DeleteOptions{ PropagationPolicy: &policy, Preconditions: &metav1.Preconditions{UID: &fresh.UID}, } diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 92d00d495df..9ca4a49ebcf 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -583,9 +583,12 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { // We don't handle graceful deletion of mirror pods. if m.canBeDeleted(pod, status.status) { - deleteOptions := metav1.NewDeleteOptions(0) - // Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace. - deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID)) + deleteOptions := metav1.DeleteOptions{ + GracePeriodSeconds: new(int64), + // Use the pod UID as the precondition for deletion to prevent deleting a + // newly created pod with the same name and namespace. + Preconditions: metav1.NewUIDPreconditions(string(pod.UID)), + } err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions) if err != nil { klog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go index a72a4f0b2ff..48dcd209c64 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go @@ -262,7 +262,7 @@ func (c *autoRegisterController) checkAPIService(name string) (err error) { // we have a spurious APIService that we're managing, delete it (5A,6A) case desired == nil: - opts := &metav1.DeleteOptions{Preconditions: metav1.NewUIDPreconditions(string(curr.UID))} + opts := metav1.DeleteOptions{Preconditions: metav1.NewUIDPreconditions(string(curr.UID))} err := c.apiServiceClient.APIServices().Delete(context.TODO(), curr.Name, opts) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again diff --git a/staging/src/k8s.io/kubectl/pkg/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/drain/drain.go index 1d3819f5765..225c0fcedac 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/drain.go @@ -121,8 +121,8 @@ func CheckEvictionSupport(clientset kubernetes.Interface) (string, error) { return "", nil } -func (d *Helper) makeDeleteOptions() *metav1.DeleteOptions { - deleteOptions := &metav1.DeleteOptions{} +func (d *Helper) makeDeleteOptions() metav1.DeleteOptions { + deleteOptions := metav1.DeleteOptions{} if d.GracePeriodSeconds >= 0 { gracePeriodSeconds := int64(d.GracePeriodSeconds) deleteOptions.GracePeriodSeconds = &gracePeriodSeconds @@ -150,6 +150,8 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error { return err } } + + delOpts := d.makeDeleteOptions() eviction := &policyv1beta1.Eviction{ TypeMeta: metav1.TypeMeta{ APIVersion: policyGroupVersion, @@ -159,8 +161,9 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error { Name: pod.Name, Namespace: pod.Namespace, }, - DeleteOptions: d.makeDeleteOptions(), + DeleteOptions: &delOpts, } + // Remember to change change the URL manipulation func when Eviction's version change return d.Client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction) } diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index edc25da2e17..7c7ffbee063 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -79,19 +79,19 @@ func estimateMaximumPods(c clientset.Interface, min, max int32) int32 { return availablePods } -func getForegroundOptions() *metav1.DeleteOptions { +func getForegroundOptions() metav1.DeleteOptions { policy := metav1.DeletePropagationForeground - return &metav1.DeleteOptions{PropagationPolicy: &policy} + return metav1.DeleteOptions{PropagationPolicy: &policy} } -func getBackgroundOptions() *metav1.DeleteOptions { +func getBackgroundOptions() metav1.DeleteOptions { policy := metav1.DeletePropagationBackground - return &metav1.DeleteOptions{PropagationPolicy: &policy} + return metav1.DeleteOptions{PropagationPolicy: &policy} } -func getOrphanOptions() *metav1.DeleteOptions { +func getOrphanOptions() metav1.DeleteOptions { policy := metav1.DeletePropagationOrphan - return &metav1.DeleteOptions{PropagationPolicy: &policy} + return metav1.DeleteOptions{PropagationPolicy: &policy} } var ( @@ -473,8 +473,9 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err) } ginkgo.By("delete the rc") - deleteOptions := &metav1.DeleteOptions{} - deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) + deleteOptions := metav1.DeleteOptions{ + Preconditions: metav1.NewUIDPreconditions(string(rc.UID)), + } if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } @@ -1101,7 +1102,8 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Logf("created dependent resource %q", dependentName) // Delete the owner and orphan the dependent. - err = resourceClient.Delete(ownerName, getOrphanOptions()) + delOpts := getOrphanOptions() + err = resourceClient.Delete(ownerName, &delOpts) if err != nil { framework.Failf("failed to delete owner resource %q: %v", ownerName, err) } diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index b4f3d028637..8b7027052d3 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -854,7 +854,7 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error { trueVar := true - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar} + deleteOptions := metav1.DeleteOptions{OrphanDependents: &trueVar} deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID)) return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions) } diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 2fa83866601..3fb88cee86d 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -99,7 +99,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch pod") - f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) expectEvents(f, []utils.AuditEvent{ { diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index 45bc30fa8ea..2fc99b61eac 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -211,7 +211,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch pod") - f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) }, []utils.AuditEvent{ { diff --git a/test/e2e/common/volumes.go b/test/e2e/common/volumes.go index 092bad691de..192e5874a18 100644 --- a/test/e2e/common/volumes.go +++ b/test/e2e/common/volumes.go @@ -45,7 +45,6 @@ package common import ( "context" - "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index f3b4c2e37c2..2a25bc70212 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -136,7 +136,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { // DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't // disappear before the timeout, it will fail the test. -func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeout time.Duration) { +func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) { namespace := c.f.Namespace.Name err := c.Delete(context.TODO(), name, options) if err != nil && !apierrors.IsNotFound(err) { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 34417d09d98..2ba9a595269 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -814,7 +814,7 @@ func (f *Framework) MatchContainerOutput( createdPod := podClient.Create(pod) defer func() { ginkgo.By("delete the pod") - podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout) + podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, DefaultPodDeletionTimeout) }() // Wait for client pod to complete. @@ -1181,7 +1181,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns defer ps.Stop() falseVar := false - deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar} + deleteOption := metav1.DeleteOptions{OrphanDependents: &falseVar} startTime := time.Now() if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil { return err diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 64f3363efa2..ff9fd5fc114 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -3341,7 +3341,7 @@ func proxyMode(f *framework.Framework) (string, error) { }, } f.PodClient().CreateSync(pod) - defer f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + defer f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode" stdout, err := framework.RunHostCmd(pod.Namespace, pod.Name, cmd) diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 04d90e11d3d..b6638fd5808 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -99,9 +99,9 @@ var _ = utils.SIGDescribe("Pod Disks", func() { false: "RW", } type testT struct { - descr string // It description - readOnly bool // true means pd is read-only - deleteOpt *metav1.DeleteOptions // pod delete option + descr string // It description + readOnly bool // true means pd is read-only + deleteOpt metav1.DeleteOptions // pod delete option } tests := []testT{ { @@ -112,7 +112,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { { descr: podDefaultGrace, readOnly: false, - deleteOpt: &metav1.DeleteOptions{}, + deleteOpt: metav1.DeleteOptions{}, }, { descr: podImmediateGrace, @@ -122,7 +122,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { { descr: podDefaultGrace, readOnly: true, - deleteOpt: &metav1.DeleteOptions{}, + deleteOpt: metav1.DeleteOptions{}, }, } diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 9351de61b54..eec1c3a8983 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -87,7 +87,7 @@ func deletePods(f *framework.Framework, podNames []string) { delOpts := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - f.PodClient().DeleteSync(podName, &delOpts, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout) } } diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 7f3500c8f57..c23708688e8 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -99,10 +99,10 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C }) ginkgo.AfterEach(func() { // Delete Pods - f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) - f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) - f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) - f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(guaranteedPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(burstablePodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(bestEffortPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) // Log Events logPodEvents(f) logNodeEvents(f) diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 185985e3971..91e749f42c3 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -281,8 +281,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { }, 10*time.Minute, framework.Poll).Should(gomega.BeTrue()) // Cleanup - f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) - f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(pod1.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(pod2.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) }) }) } diff --git a/test/e2e_node/dockershim_checkpoint_test.go b/test/e2e_node/dockershim_checkpoint_test.go index e8046fd93d0..559ac5d98c9 100644 --- a/test/e2e_node/dockershim_checkpoint_test.go +++ b/test/e2e_node/dockershim_checkpoint_test.go @@ -169,7 +169,7 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func()) twist() ginkgo.By("Remove test pod") - f.PodClient().DeleteSync(podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) ginkgo.By("Waiting for checkpoint to be removed") if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) { diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index a4ea7ec5d9f..9b54e50c4a2 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -535,7 +535,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe ginkgo.By("deleting pods") for _, spec := range testSpecs { ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name)) - f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute) + f.PodClient().DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute) } // In case a test fails before verifying that NodeCondition no longer exist on the node, diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index b9833500a5d..0bc8baab2e1 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -245,7 +245,7 @@ func containerGCTest(f *framework.Framework, test testRun) { ginkgo.AfterEach(func() { for _, pod := range test.testPods { ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName)) - f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(pod.podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) } ginkgo.By("Making sure all containers get cleaned up") diff --git a/test/e2e_node/gpu_device_plugin_test.go b/test/e2e_node/gpu_device_plugin_test.go index c1becd18de0..64c6f985d63 100644 --- a/test/e2e_node/gpu_device_plugin_test.go +++ b/test/e2e_node/gpu_device_plugin_test.go @@ -162,8 +162,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi logDevicePluginMetrics() // Cleanup - f.PodClient().DeleteSync(p1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) - f.PodClient().DeleteSync(p2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(p1.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(p2.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) }) }) }) diff --git a/test/e2e_node/node_perf_test.go b/test/e2e_node/node_perf_test.go index 9b7b48c8a48..e0b2089e795 100644 --- a/test/e2e_node/node_perf_test.go +++ b/test/e2e_node/node_perf_test.go @@ -80,7 +80,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() { delOpts := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(pod.Name, delOpts, framework.DefaultPodDeletionTimeout) ginkgo.By("running the post test exec from the workload") err := wl.PostTestExec() framework.ExpectNoError(err) diff --git a/test/e2e_node/resource_metrics_test.go b/test/e2e_node/resource_metrics_test.go index 40676a84419..5c65d3fc715 100644 --- a/test/e2e_node/resource_metrics_test.go +++ b/test/e2e_node/resource_metrics_test.go @@ -96,8 +96,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() { }) ginkgo.AfterEach(func() { ginkgo.By("Deleting test pods") - f.PodClient().DeleteSync(pod0, &metav1.DeleteOptions{}, 10*time.Minute) - f.PodClient().DeleteSync(pod1, &metav1.DeleteOptions{}, 10*time.Minute) + f.PodClient().DeleteSync(pod0, metav1.DeleteOptions{}, 10*time.Minute) + f.PodClient().DeleteSync(pod1, metav1.DeleteOptions{}, 10*time.Minute) if !ginkgo.CurrentGinkgoTestDescription().Failed { return } diff --git a/test/integration/apiserver/admissionwebhook/admission_test.go b/test/integration/apiserver/admissionwebhook/admission_test.go index a4afe1e4ef3..94071f8dfca 100644 --- a/test/integration/apiserver/admissionwebhook/admission_test.go +++ b/test/integration/apiserver/admissionwebhook/admission_test.go @@ -1046,7 +1046,7 @@ func testPodBindingEviction(c *testContext) { background := metav1.DeletePropagationBackground zero := int64(0) - forceDelete := &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background} + forceDelete := metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background} defer func() { err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(context.TODO(), pod.GetName(), forceDelete) if err != nil && !apierrors.IsNotFound(err) { @@ -1073,7 +1073,7 @@ func testPodBindingEviction(c *testContext) { case gvr("", "v1", "pods/eviction"): err = c.clientset.CoreV1().RESTClient().Post().Namespace(pod.GetNamespace()).Resource("pods").Name(pod.GetName()).SubResource("eviction").Body(&policyv1beta1.Eviction{ ObjectMeta: metav1.ObjectMeta{Name: pod.GetName()}, - DeleteOptions: forceDelete, + DeleteOptions: &forceDelete, }).Do(context.TODO()).Error() default: diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index fd87bcbe2a7..3ea7c879648 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -174,7 +174,7 @@ func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) } falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + deleteOptions := metav1.DeleteOptions{OrphanDependents: &falseVar} if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(context.TODO(), ds.Name, deleteOptions); err != nil { t.Errorf("Failed to delete DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err) } diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index 42290d069d1..d3e631fe40f 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -73,7 +73,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { } var gracePeriodSeconds int64 = 30 - deleteOption := &metav1.DeleteOptions{ + deleteOption := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } @@ -192,7 +192,7 @@ func TestTerminalPodEviction(t *testing.T) { } var gracePeriodSeconds int64 = 30 - deleteOption := &metav1.DeleteOptions{ + deleteOption := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, } pod := newPod("test-terminal-pod1") @@ -309,7 +309,7 @@ func newPDB() *v1beta1.PodDisruptionBudget { } } -func newEviction(ns, evictionName string, deleteOption *metav1.DeleteOptions) *v1beta1.Eviction { +func newEviction(ns, evictionName string, deleteOption metav1.DeleteOptions) *v1beta1.Eviction { return &v1beta1.Eviction{ TypeMeta: metav1.TypeMeta{ APIVersion: "Policy/v1beta1", @@ -319,7 +319,7 @@ func newEviction(ns, evictionName string, deleteOption *metav1.DeleteOptions) *v Name: evictionName, Namespace: ns, }, - DeleteOptions: deleteOption, + DeleteOptions: &deleteOption, } } diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index bc19783aebf..772b365a560 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -52,24 +52,24 @@ import ( "k8s.io/kubernetes/test/integration/framework" ) -func getForegroundOptions() *metav1.DeleteOptions { +func getForegroundOptions() metav1.DeleteOptions { policy := metav1.DeletePropagationForeground - return &metav1.DeleteOptions{PropagationPolicy: &policy} + return metav1.DeleteOptions{PropagationPolicy: &policy} } -func getOrphanOptions() *metav1.DeleteOptions { +func getOrphanOptions() metav1.DeleteOptions { var trueVar = true - return &metav1.DeleteOptions{OrphanDependents: &trueVar} + return metav1.DeleteOptions{OrphanDependents: &trueVar} } -func getPropagateOrphanOptions() *metav1.DeleteOptions { +func getPropagateOrphanOptions() metav1.DeleteOptions { policy := metav1.DeletePropagationOrphan - return &metav1.DeleteOptions{PropagationPolicy: &policy} + return metav1.DeleteOptions{PropagationPolicy: &policy} } -func getNonOrphanOptions() *metav1.DeleteOptions { +func getNonOrphanOptions() metav1.DeleteOptions { var falseVar = false - return &metav1.DeleteOptions{OrphanDependents: &falseVar} + return metav1.DeleteOptions{OrphanDependents: &falseVar} } const garbageCollectedPodName = "test.pod.1" @@ -435,7 +435,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) { } } -func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) { +func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) { defer wg.Done() rcClient := clientSet.CoreV1().ReplicationControllers(namespace) podClient := clientSet.CoreV1().Pods(namespace) @@ -461,9 +461,6 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet } orphan := false switch { - case options == nil: - // if there are no deletion options, the default policy for replication controllers is orphan - orphan = true case options.OrphanDependents != nil: // if the deletion options explicitly specify whether to orphan, that controls orphan = *options.OrphanDependents @@ -537,9 +534,9 @@ func TestStressingCascadingDeletion(t *testing.T) { rcUIDs := make(chan types.UID, collections*5) for i := 0; i < collections; i++ { // rc is created with empty finalizers, deleted with nil delete options, pods will remain. - go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, nil, &wg, rcUIDs) + go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, metav1.DeleteOptions{}, &wg, rcUIDs) // rc is created with the orphan finalizer, deleted with nil options, pods will remain. - go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, nil, &wg, rcUIDs) + go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, metav1.DeleteOptions{}, &wg, rcUIDs) // rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted. go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, getNonOrphanOptions(), &wg, rcUIDs) // rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain. diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index a58314067c2..25b4a0b1508 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -56,7 +56,7 @@ type testConfig struct { var ( // Delete API objects immediately deletePeriod = int64(0) - deleteOption = &metav1.DeleteOptions{GracePeriodSeconds: &deletePeriod} + deleteOption = metav1.DeleteOptions{GracePeriodSeconds: &deletePeriod} modeWait = storagev1.VolumeBindingWaitForFirstConsumer modeImmediate = storagev1.VolumeBindingImmediate @@ -847,7 +847,7 @@ func TestRescheduleProvisioning(t *testing.T) { defer func() { close(controllerCh) - deleteTestObjects(clientset, ns, nil) + deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) testCtx.closeFn() }() @@ -931,7 +931,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t stop: textCtx.ctx.Done(), teardown: func() { klog.Infof("test cluster %q start to tear down", ns) - deleteTestObjects(clientset, ns, nil) + deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) cleanupTest(t, textCtx) }, } @@ -983,7 +983,7 @@ func initPVController(t *testing.T, testCtx *testContext, provisionDelaySeconds return ctrl, informerFactory, nil } -func deleteTestObjects(client clientset.Interface, ns string, option *metav1.DeleteOptions) { +func deleteTestObjects(client clientset.Interface, ns string, option metav1.DeleteOptions) { client.CoreV1().Pods(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{}) client.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{}) client.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) diff --git a/test/utils/delete_resources.go b/test/utils/delete_resources.go index 60d1ba44e8c..01c21842286 100644 --- a/test/utils/delete_resources.go +++ b/test/utils/delete_resources.go @@ -32,7 +32,7 @@ import ( extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" ) -func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { +func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error { switch kind { case api.Kind("Pod"): return c.CoreV1().Pods(namespace).Delete(context.TODO(), name, options) @@ -57,7 +57,7 @@ func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam } } -func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { +func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error { deleteFunc := func() (bool, error) { err := deleteResource(c, kind, namespace, name, options) if err == nil || apierrors.IsNotFound(err) { diff --git a/test/utils/runners.go b/test/utils/runners.go index 3d0498cfd21..aaac8bd37d5 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -1565,7 +1565,7 @@ func (config *SecretConfig) Run() error { } func (config *SecretConfig) Stop() error { - if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, &metav1.DeleteOptions{}); err != nil { + if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil { return fmt.Errorf("Error deleting secret: %v", err) } config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name) @@ -1623,7 +1623,7 @@ func (config *ConfigMapConfig) Run() error { } func (config *ConfigMapConfig) Stop() error { - if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, &metav1.DeleteOptions{}); err != nil { + if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil { return fmt.Errorf("Error deleting configmap: %v", err) } config.LogFunc("Deleted configmap %v/%v", config.Namespace, config.Name)