From aaf855c1e699133377ce94c1aac3692c2f0df8ac Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Sun, 1 Mar 2020 10:19:56 -0800 Subject: [PATCH] deref all calls to metav1.NewDeleteOptions that are passed to clients. This is gross but because NewDeleteOptions is used by various parts of storage that still pass around pointers, the return type can't be changed without significant refactoring within the apiserver. I think this would be good to cleanup, but I want to minimize apiserver side changes as much as possible in the client signature refactor. --- pkg/controller/podgc/gc_controller.go | 2 +- test/e2e/apimachinery/etcd_failure.go | 2 +- test/e2e/apimachinery/generated_clientset.go | 2 +- test/e2e/apimachinery/resource_quota.go | 36 +++++++++---------- test/e2e/apps/statefulset.go | 2 +- .../autoscaling/cluster_size_autoscaling.go | 2 +- test/e2e/common/container.go | 2 +- test/e2e/common/container_probe.go | 2 +- test/e2e/common/lifecycle_hook.go | 2 +- test/e2e/common/pods.go | 2 +- test/e2e/framework/network/utils.go | 2 +- test/e2e/network/dns.go | 6 ++-- test/e2e/network/dns_common.go | 10 +++--- test/e2e/network/dual_stack.go | 2 +- test/e2e/node/pre_stop.go | 2 +- test/e2e/scheduling/limit_range.go | 2 +- test/e2e/scheduling/predicates.go | 2 +- test/e2e/scheduling/preemption.go | 8 ++--- test/e2e/storage/drivers/in_tree.go | 2 +- test/e2e/storage/empty_dir_wrapper.go | 4 +-- test/e2e/storage/pd.go | 16 ++++----- test/e2e/storage/pv_protection.go | 6 ++-- test/e2e/storage/pvc_protection.go | 6 ++-- test/e2e/storage/utils/utils.go | 4 +-- test/e2e/windows/density.go | 2 +- test/e2e/windows/dns.go | 2 +- test/e2e_node/mirror_pod_test.go | 4 +-- test/e2e_node/node_problem_detector_linux.go | 4 +-- test/e2e_node/resource_collector.go | 2 +- .../master/transformation_testcase.go | 2 +- test/integration/scheduler/predicates_test.go | 4 +-- test/integration/scheduler/util.go | 2 +- test/integration/util/util.go | 4 +-- 33 files changed, 76 insertions(+), 76 deletions(-) diff --git a/pkg/controller/podgc/gc_controller.go b/pkg/controller/podgc/gc_controller.go index 996ea88fab2..e7a6150eebc 100644 --- a/pkg/controller/podgc/gc_controller.go +++ b/pkg/controller/podgc/gc_controller.go @@ -76,7 +76,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"), deletePod: func(namespace, name string) error { klog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name) - return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.NewDeleteOptions(0)) + return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, *metav1.NewDeleteOptions(0)) }, } diff --git a/test/e2e/apimachinery/etcd_failure.go b/test/e2e/apimachinery/etcd_failure.go index 5d2875038dc..c53e38db19f 100644 --- a/test/e2e/apimachinery/etcd_failure.go +++ b/test/e2e/apimachinery/etcd_failure.go @@ -125,7 +125,7 @@ func checkExistingRCRecovers(f *framework.Framework) { return false, nil } for _, pod := range pods.Items { - err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name) } framework.Logf("apiserver has recovered") diff --git a/test/e2e/apimachinery/generated_clientset.go b/test/e2e/apimachinery/generated_clientset.go index 65476988412..be0d4a4d2a0 100644 --- a/test/e2e/apimachinery/generated_clientset.go +++ b/test/e2e/apimachinery/generated_clientset.go @@ -151,7 +151,7 @@ var _ = SIGDescribe("Generated clientset", func() { ginkgo.By("deleting the pod gracefully") gracePeriod := int64(31) - if err := podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil { + if err := podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(gracePeriod)); err != nil { framework.Failf("Failed to delete pod: %v", err) } diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index 422f4a5ea54..b4826f3e017 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -274,7 +274,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -712,7 +712,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -751,7 +751,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -808,7 +808,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -838,7 +838,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -938,7 +938,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -968,7 +968,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1028,7 +1028,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1067,7 +1067,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1114,7 +1114,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1159,7 +1159,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectError(err) ginkgo.By("Deleting first pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1209,9 +1209,9 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting both pods") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }) @@ -1258,9 +1258,9 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting both pods") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1299,7 +1299,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }) @@ -1333,7 +1333,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1391,7 +1391,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 560c8bcbd71..d92d9db191c 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -763,7 +763,7 @@ var _ = SIGDescribe("StatefulSet", func() { } ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 67ad3dff68e..f8c2ffa53d6 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -1451,7 +1451,7 @@ func drainNode(f *framework.Framework, node *v1.Node) { pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts) framework.ExpectNoError(err) for _, pod := range pods.Items { - err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) } } diff --git a/test/e2e/common/container.go b/test/e2e/common/container.go index a1ad000f5ee..cd5a2be9e90 100644 --- a/test/e2e/common/container.go +++ b/test/e2e/common/container.go @@ -70,7 +70,7 @@ func (cc *ConformanceContainer) Create() { } func (cc *ConformanceContainer) Delete() error { - return cc.PodClient.Delete(context.TODO(), cc.podName, metav1.NewDeleteOptions(0)) + return cc.PodClient.Delete(context.TODO(), cc.podName, *metav1.NewDeleteOptions(0)) } func (cc *ConformanceContainer) IsReady() (bool, error) { diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index 3af84b05b27..62c36116cc2 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -415,7 +415,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, // At the end of the test, clean up by removing the pod. defer func() { ginkgo.By("deleting the pod") - podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) }() ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns)) podClient.Create(pod) diff --git a/test/e2e/common/lifecycle_hook.go b/test/e2e/common/lifecycle_hook.go index a5f226545bd..3ae5511caba 100644 --- a/test/e2e/common/lifecycle_hook.go +++ b/test/e2e/common/lifecycle_hook.go @@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { }, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil()) } ginkgo.By("delete the pod with lifecycle hook") - podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout) + podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout) if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil { ginkgo.By("check prestop hook") gomega.Eventually(func() error { diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index d0b7e9d80d3..0dc11b19665 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -296,7 +296,7 @@ var _ = framework.KubeDescribe("Pods", func() { framework.ExpectNoError(err, "failed to GET scheduled pod") ginkgo.By("deleting the pod gracefully") - err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(30)) + err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err, "failed to delete pod") ginkgo.By("verifying the kubelet observed the termination notice") diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index c71c183f92e..b3ba73fabe3 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -678,7 +678,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector // DeleteNetProxyPod deletes the first endpoint pod and waits for it being removed. func (config *NetworkingTestConfig) DeleteNetProxyPod() { pod := config.EndpointPods[0] - config.getPodClient().Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + config.getPodClient().Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) config.EndpointPods = config.EndpointPods[1:] // wait for pod being deleted. err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index 7ff48f6de28..3c9163fbaef 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -419,7 +419,7 @@ var _ = SIGDescribe("DNS", func() { framework.Logf("Created pod %v", testAgnhostPod) defer func() { framework.Logf("Deleting pod %s...", testAgnhostPod.Name) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err) } }() @@ -478,7 +478,7 @@ var _ = SIGDescribe("DNS", func() { framework.Logf("Created pod %v", testServerPod) defer func() { framework.Logf("Deleting pod %s...", testServerPod.Name) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err) } }() @@ -510,7 +510,7 @@ var _ = SIGDescribe("DNS", func() { framework.Logf("Created pod %v", testUtilsPod) defer func() { framework.Logf("Deleting pod %s...", testUtilsPod.Name) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err) } }() diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index ff0424dc322..70e78610cbd 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -256,7 +256,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { func (t *dnsTestCommon) deleteUtilPod() { podClient := t.c.CoreV1().Pods(t.f.Namespace.Name) - if err := podClient.Delete(context.TODO(), t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := podClient.Delete(context.TODO(), t.utilPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Logf("Delete of pod %v/%v failed: %v", t.utilPod.Namespace, t.utilPod.Name, err) } @@ -273,7 +273,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() { podClient := t.c.CoreV1().Pods(metav1.NamespaceSystem) for _, pod := range pods.Items { - err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "failed to delete pod: %s", pod.Name) } } @@ -382,7 +382,7 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord(namespace string, isIPv6 bo func (t *dnsTestCommon) deleteDNSServerPod() { podClient := t.c.CoreV1().Pods(t.f.Namespace.Name) - if err := podClient.Delete(context.TODO(), t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := podClient.Delete(context.TODO(), t.dnsServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Logf("Delete of pod %v/%v failed: %v", t.utilPod.Namespace, t.dnsServerPod.Name, err) } @@ -577,7 +577,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) defer func() { ginkgo.By("deleting the pod") defer ginkgo.GinkgoRecover() - podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) }() if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) @@ -605,7 +605,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames defer func() { ginkgo.By("deleting the pod") defer ginkgo.GinkgoRecover() - podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) }() if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) diff --git a/test/e2e/network/dual_stack.go b/test/e2e/network/dual_stack.go index 59c3c8c7e83..5bf70d13268 100644 --- a/test/e2e/network/dual_stack.go +++ b/test/e2e/network/dual_stack.go @@ -115,7 +115,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { framework.ExpectEqual(isIPv4(p.Status.PodIPs[0].IP) != isIPv4(p.Status.PodIPs[1].IP), true) ginkgo.By("deleting the pod") - err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(30)) + err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err, "failed to delete pod") }) diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index 2f71205278e..66f9b70740b 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -198,7 +198,7 @@ var _ = SIGDescribe("PreStop", func() { framework.ExpectNoError(err, "failed to GET scheduled pod") ginkgo.By("deleting the pod gracefully") - err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds)) + err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds)) framework.ExpectNoError(err, "failed to delete pod") //wait up to graceful termination period seconds diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index 19c3991111a..975750b547d 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -201,7 +201,7 @@ var _ = SIGDescribe("LimitRange", func() { framework.ExpectError(err) ginkgo.By("Deleting a LimitRange") - err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(context.TODO(), limitRange.Name, metav1.NewDeleteOptions(30)) + err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(context.TODO(), limitRange.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) ginkgo.By("Verifying the LimitRange was deleted") diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 9fbbf9297c4..2e11e22fafa 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -879,7 +879,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string { pod := runPausePod(f, conf) ginkgo.By("Explicitly delete pod here to free the resource it takes.") - err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) return pod.Spec.NodeName diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 9afcd8adde4..50f3a26f1b3 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -73,7 +73,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { ginkgo.AfterEach(func() { for _, pair := range priorityPairs { - cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, metav1.NewDeleteOptions(0)) + cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, *metav1.NewDeleteOptions(0)) } }) @@ -239,7 +239,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { defer func() { // Clean-up the critical pod // Always run cleanup to make sure the pod is properly cleaned up. - err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", metav1.NewDeleteOptions(0)) + err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", *metav1.NewDeleteOptions(0)) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err) } @@ -256,7 +256,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { defer func() { // Clean-up the critical pod - err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", metav1.NewDeleteOptions(0)) + err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }() // Make sure that the lowest priority pod is deleted. @@ -460,7 +460,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { framework.ExpectNoError(err) } for _, pair := range priorityPairs { - cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, metav1.NewDeleteOptions(0)) + cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, *metav1.NewDeleteOptions(0)) } }) diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 9d721e8fa76..d66a20f8951 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -183,7 +183,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf }, func() { framework.ExpectNoError(e2epod.DeletePodWithWait(cs, n.externalProvisionerPod)) clusterRoleBindingName := ns.Name + "--" + "cluster-admin" - cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBindingName, metav1.NewDeleteOptions(0)) + cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBindingName, *metav1.NewDeleteOptions(0)) } } diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 4368e5bba0b..f8e4b58a538 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -156,7 +156,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { framework.Failf("unable to delete configmap %v: %v", configMap.Name, err) } ginkgo.By("Cleaning up the pod") - if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete pod %v: %v", pod.Name, err) } }() @@ -260,7 +260,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { ginkgo.By("Cleaning up the git server pod") - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), gitServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) } ginkgo.By("Cleaning up the git server svc") diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index b3a04a297d6..04d90e11d3d 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -107,7 +107,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { { descr: podImmediateGrace, readOnly: false, - deleteOpt: metav1.NewDeleteOptions(0), + deleteOpt: *metav1.NewDeleteOptions(0), }, { descr: podDefaultGrace, @@ -117,7 +117,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { { descr: podImmediateGrace, readOnly: true, - deleteOpt: metav1.NewDeleteOptions(0), + deleteOpt: *metav1.NewDeleteOptions(0), }, { descr: podDefaultGrace, @@ -151,7 +151,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { framework.ExpectNoError(f.WaitForPodRunningSlow(fmtPod.Name)) ginkgo.By("deleting the fmtPod") - framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod") + framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete fmtPod") framework.Logf("deleted fmtPod %q", fmtPod.Name) ginkgo.By("waiting for PD to detach") framework.ExpectNoError(waitForPDDetach(diskName, host0Name)) @@ -269,7 +269,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { ginkgo.By("defer: cleaning up PD-RW test environment") framework.Logf("defer cleanup errors can usually be ignored") if host0Pod != nil { - podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)) } for _, diskName := range diskNames { detachAndDeletePDs(diskName, []types.NodeName{host0Name}) @@ -305,7 +305,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { verifyPDContentsViaContainer(ns, f, host0Pod.Name, containerName, fileAndContentToVerify) ginkgo.By("deleting host0Pod") - framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete host0Pod") } ginkgo.By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs)) for _, diskName := range diskNames { @@ -360,7 +360,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { ginkgo.By("defer: cleaning up PD-RW test env") framework.Logf("defer cleanup errors can usually be ignored") ginkgo.By("defer: delete host0Pod") - podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)) ginkgo.By("defer: detach and delete PDs") detachAndDeletePDs(diskName, []types.NodeName{host0Name}) if disruptOp == deleteNode || disruptOp == deleteNodeObj { @@ -417,9 +417,9 @@ var _ = utils.SIGDescribe("Pod Disks", func() { } else if disruptOp == deleteNodeObj { ginkgo.By("deleting host0's node api object") - framework.ExpectNoError(nodeClient.Delete(context.TODO(), string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0's node object") + framework.ExpectNoError(nodeClient.Delete(context.TODO(), string(host0Name), *metav1.NewDeleteOptions(0)), "Unable to delete host0's node object") ginkgo.By("deleting host0Pod") - framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod") + framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)), "Unable to delete host0Pod") } else if disruptOp == evictPod { evictTarget := &policyv1beta1.Eviction{ diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index f742c77d920..05f67513b72 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -98,7 +98,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() { ginkgo.By("Deleting the PV") - err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout) }) @@ -114,7 +114,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC") - err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") ginkgo.By("Checking that the PV status is Terminating") @@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { framework.ExpectNotEqual(pv.ObjectMeta.DeletionTimestamp, nil) ginkgo.By("Deleting the PVC that is bound to the PV") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC") diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index 91d81a52b21..b2ab547bd05 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -115,7 +115,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { framework.ExpectNoError(err, "Error terminating and deleting pod") ginkgo.By("Deleting the PVC") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) pvcCreatedAndNotDeleted = false @@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.It("Verify that PVC in active use by a pod is not removed immediately", func() { ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PVC status is Terminating") @@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() { ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PVC status is Terminating") diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index c0caf76d4a1..b6e956e8347 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -299,7 +299,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) if forceDelete { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0)) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0)) } else { err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{}) } @@ -385,7 +385,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) if forceDelete { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0)) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0)) } else { err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{}) } diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index 47a0e7df008..11eb75d26b0 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -265,7 +265,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) + err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) err = e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), diff --git a/test/e2e/windows/dns.go b/test/e2e/windows/dns.go index a05309e32d6..e04cbf525f6 100644 --- a/test/e2e/windows/dns.go +++ b/test/e2e/windows/dns.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("DNS", func() { framework.Logf("Created pod %v", testUtilsPod) defer func() { framework.Logf("Deleting pod %s...", testUtilsPod.Name) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, *metav1.NewDeleteOptions(0)); err != nil { framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err) } }() diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index 52a261b0a4b..78eb02a1b46 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() { uid := pod.UID ginkgo.By("delete the mirror pod with grace period 30s") - err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, metav1.NewDeleteOptions(30)) + err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be recreated") @@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() { uid := pod.UID ginkgo.By("delete the mirror pod with grace period 0s") - err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be recreated") diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index c26ce838221..1075529a97c 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -375,13 +375,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete framework.Logf("Node Problem Detector logs:\n %s", log) } ginkgo.By("Delete the node problem detector") - f.PodClient().Delete(context.TODO(), name, metav1.NewDeleteOptions(0)) + f.PodClient().Delete(context.TODO(), name, *metav1.NewDeleteOptions(0)) ginkgo.By("Wait for the node problem detector to disappear") gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed()) ginkgo.By("Delete the config map") c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, metav1.DeleteOptions{}) ginkgo.By("Clean up the events") - gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed()) + gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), *metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed()) ginkgo.By("Clean up the node condition") patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition)) c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(context.TODO()) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index 6ff1d4008de..6145ce6a7e4 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -376,7 +376,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) + err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), diff --git a/test/integration/master/transformation_testcase.go b/test/integration/master/transformation_testcase.go index 9e3325307dd..29ee0f346ec 100644 --- a/test/integration/master/transformation_testcase.go +++ b/test/integration/master/transformation_testcase.go @@ -98,7 +98,7 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin func (e *transformTest) cleanUp() { os.RemoveAll(e.configDir) - e.restClient.CoreV1().Namespaces().Delete(context.TODO(), e.ns.Name, metav1.NewDeleteOptions(0)) + e.restClient.CoreV1().Namespaces().Delete(context.TODO(), e.ns.Name, *metav1.NewDeleteOptions(0)) e.kubeAPIServer.TearDownFn() } diff --git a/test/integration/scheduler/predicates_test.go b/test/integration/scheduler/predicates_test.go index 50d29db7902..c52be01a733 100644 --- a/test/integration/scheduler/predicates_test.go +++ b/test/integration/scheduler/predicates_test.go @@ -850,7 +850,7 @@ func TestInterPodAffinity(t *testing.T) { t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits) } - err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(context.TODO(), test.pod.Name, metav1.NewDeleteOptions(0)) + err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(context.TODO(), test.pod.Name, *metav1.NewDeleteOptions(0)) if err != nil { t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test) } @@ -865,7 +865,7 @@ func TestInterPodAffinity(t *testing.T) { } else { nsName = testCtx.NS.Name } - err = cs.CoreV1().Pods(nsName).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) + err = cs.CoreV1().Pods(nsName).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) if err != nil { t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test) } diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index ababa4221f6..afcd019d6c6 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -447,7 +447,7 @@ func waitCachedPodsStable(testCtx *testutils.TestContext, pods []*v1.Pod) error // deletePod deletes the given pod in the given namespace. func deletePod(cs clientset.Interface, podName string, nsName string) error { - return cs.CoreV1().Pods(nsName).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) + return cs.CoreV1().Pods(nsName).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0)) } func getPod(cs clientset.Interface, podName string, podNamespace string) (*v1.Pod, error) { diff --git a/test/integration/util/util.go b/test/integration/util/util.go index 172f0970345..4e740f9e40b 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -161,7 +161,7 @@ type TestContext struct { // CleanupNodes cleans all nodes which were created during integration test func CleanupNodes(cs clientset.Interface, t *testing.T) { - err := cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), metav1.ListOptions{}) + err := cs.CoreV1().Nodes().DeleteCollection(context.TODO(), *metav1.NewDeleteOptions(0), metav1.ListOptions{}) if err != nil { t.Errorf("error while deleting all nodes: %v", err) } @@ -194,7 +194,7 @@ func CleanupTest(t *testing.T, testCtx *TestContext) { // CleanupPods deletes the given pods and waits for them to be actually deleted. func CleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) { for _, p := range pods { - err := cs.CoreV1().Pods(p.Namespace).Delete(context.TODO(), p.Name, metav1.NewDeleteOptions(0)) + err := cs.CoreV1().Pods(p.Namespace).Delete(context.TODO(), p.Name, *metav1.NewDeleteOptions(0)) if err != nil && !apierrors.IsNotFound(err) { t.Errorf("error while deleting pod %v/%v: %v", p.Namespace, p.Name, err) }