deref all calls to metav1.NewDeleteOptions that are passed to clients.

This is gross but because NewDeleteOptions is used by various parts of
storage that still pass around pointers, the return type can't be
changed without significant refactoring within the apiserver. I think
this would be good to cleanup, but I want to minimize apiserver side
changes as much as possible in the client signature refactor.
This commit is contained in:
Mike Danese 2020-03-01 10:19:56 -08:00
parent c58e69ec79
commit aaf855c1e6
33 changed files with 76 additions and 76 deletions

View File

@ -76,7 +76,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor
nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"),
deletePod: func(namespace, name string) error {
klog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name)
return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.NewDeleteOptions(0))
return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, *metav1.NewDeleteOptions(0))
},
}

View File

@ -125,7 +125,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
}
framework.Logf("apiserver has recovered")

View File

@ -151,7 +151,7 @@ var _ = SIGDescribe("Generated clientset", func() {
ginkgo.By("deleting the pod gracefully")
gracePeriod := int64(31)
if err := podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil {
if err := podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(gracePeriod)); err != nil {
framework.Failf("Failed to delete pod: %v", err)
}

View File

@ -274,7 +274,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -712,7 +712,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -751,7 +751,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -808,7 +808,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -838,7 +838,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -938,7 +938,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -968,7 +968,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -1028,7 +1028,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -1067,7 +1067,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -1114,7 +1114,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -1159,7 +1159,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectError(err)
ginkgo.By("Deleting first pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -1209,9 +1209,9 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
@ -1258,9 +1258,9 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -1299,7 +1299,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
@ -1333,7 +1333,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@ -1391,7 +1391,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")

View File

@ -763,7 +763,7 @@ var _ = SIGDescribe("StatefulSet", func() {
}
ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")

View File

@ -1451,7 +1451,7 @@ func drainNode(f *framework.Framework, node *v1.Node) {
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts)
framework.ExpectNoError(err)
for _, pod := range pods.Items {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}
}

View File

@ -70,7 +70,7 @@ func (cc *ConformanceContainer) Create() {
}
func (cc *ConformanceContainer) Delete() error {
return cc.PodClient.Delete(context.TODO(), cc.podName, metav1.NewDeleteOptions(0))
return cc.PodClient.Delete(context.TODO(), cc.podName, *metav1.NewDeleteOptions(0))
}
func (cc *ConformanceContainer) IsReady() (bool, error) {

View File

@ -415,7 +415,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
// At the end of the test, clean up by removing the pod.
defer func() {
ginkgo.By("deleting the pod")
podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
}()
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)

View File

@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
}
ginkgo.By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
ginkgo.By("check prestop hook")
gomega.Eventually(func() error {

View File

@ -296,7 +296,7 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.ExpectNoError(err, "failed to GET scheduled pod")
ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(30))
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err, "failed to delete pod")
ginkgo.By("verifying the kubelet observed the termination notice")

View File

@ -678,7 +678,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
// DeleteNetProxyPod deletes the first endpoint pod and waits for it being removed.
func (config *NetworkingTestConfig) DeleteNetProxyPod() {
pod := config.EndpointPods[0]
config.getPodClient().Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
config.getPodClient().Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted.
err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)

View File

@ -419,7 +419,7 @@ var _ = SIGDescribe("DNS", func() {
framework.Logf("Created pod %v", testAgnhostPod)
defer func() {
framework.Logf("Deleting pod %s...", testAgnhostPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil {
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err)
}
}()
@ -478,7 +478,7 @@ var _ = SIGDescribe("DNS", func() {
framework.Logf("Created pod %v", testServerPod)
defer func() {
framework.Logf("Deleting pod %s...", testServerPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
}
}()
@ -510,7 +510,7 @@ var _ = SIGDescribe("DNS", func() {
framework.Logf("Created pod %v", testUtilsPod)
defer func() {
framework.Logf("Deleting pod %s...", testUtilsPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
}
}()

View File

@ -256,7 +256,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
func (t *dnsTestCommon) deleteUtilPod() {
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
if err := podClient.Delete(context.TODO(), t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil {
if err := podClient.Delete(context.TODO(), t.utilPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Logf("Delete of pod %v/%v failed: %v",
t.utilPod.Namespace, t.utilPod.Name, err)
}
@ -273,7 +273,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() {
podClient := t.c.CoreV1().Pods(metav1.NamespaceSystem)
for _, pod := range pods.Items {
err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "failed to delete pod: %s", pod.Name)
}
}
@ -382,7 +382,7 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord(namespace string, isIPv6 bo
func (t *dnsTestCommon) deleteDNSServerPod() {
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
if err := podClient.Delete(context.TODO(), t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
if err := podClient.Delete(context.TODO(), t.dnsServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Logf("Delete of pod %v/%v failed: %v",
t.utilPod.Namespace, t.dnsServerPod.Name, err)
}
@ -577,7 +577,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
defer func() {
ginkgo.By("deleting the pod")
defer ginkgo.GinkgoRecover()
podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
@ -605,7 +605,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
defer func() {
ginkgo.By("deleting the pod")
defer ginkgo.GinkgoRecover()
podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)

View File

@ -115,7 +115,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
framework.ExpectEqual(isIPv4(p.Status.PodIPs[0].IP) != isIPv4(p.Status.PodIPs[1].IP), true)
ginkgo.By("deleting the pod")
err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(30))
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err, "failed to delete pod")
})

View File

@ -198,7 +198,7 @@ var _ = SIGDescribe("PreStop", func() {
framework.ExpectNoError(err, "failed to GET scheduled pod")
ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds))
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds))
framework.ExpectNoError(err, "failed to delete pod")
//wait up to graceful termination period seconds

View File

@ -201,7 +201,7 @@ var _ = SIGDescribe("LimitRange", func() {
framework.ExpectError(err)
ginkgo.By("Deleting a LimitRange")
err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(context.TODO(), limitRange.Name, metav1.NewDeleteOptions(30))
err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(context.TODO(), limitRange.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err)
ginkgo.By("Verifying the LimitRange was deleted")

View File

@ -879,7 +879,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
pod := runPausePod(f, conf)
ginkgo.By("Explicitly delete pod here to free the resource it takes.")
err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
return pod.Spec.NodeName

View File

@ -73,7 +73,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
ginkgo.AfterEach(func() {
for _, pair := range priorityPairs {
cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, metav1.NewDeleteOptions(0))
cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, *metav1.NewDeleteOptions(0))
}
})
@ -239,7 +239,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
defer func() {
// Clean-up the critical pod
// Always run cleanup to make sure the pod is properly cleaned up.
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", metav1.NewDeleteOptions(0))
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", *metav1.NewDeleteOptions(0))
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err)
}
@ -256,7 +256,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
defer func() {
// Clean-up the critical pod
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", metav1.NewDeleteOptions(0))
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}()
// Make sure that the lowest priority pod is deleted.
@ -460,7 +460,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
framework.ExpectNoError(err)
}
for _, pair := range priorityPairs {
cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, metav1.NewDeleteOptions(0))
cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, *metav1.NewDeleteOptions(0))
}
})

View File

@ -183,7 +183,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf
}, func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, n.externalProvisionerPod))
clusterRoleBindingName := ns.Name + "--" + "cluster-admin"
cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBindingName, metav1.NewDeleteOptions(0))
cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBindingName, *metav1.NewDeleteOptions(0))
}
}

View File

@ -156,7 +156,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
}
ginkgo.By("Cleaning up the pod")
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)); err != nil {
if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete pod %v: %v", pod.Name, err)
}
}()
@ -260,7 +260,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
ginkgo.By("Cleaning up the git server pod")
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), gitServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
}
ginkgo.By("Cleaning up the git server svc")

View File

@ -107,7 +107,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
{
descr: podImmediateGrace,
readOnly: false,
deleteOpt: metav1.NewDeleteOptions(0),
deleteOpt: *metav1.NewDeleteOptions(0),
},
{
descr: podDefaultGrace,
@ -117,7 +117,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
{
descr: podImmediateGrace,
readOnly: true,
deleteOpt: metav1.NewDeleteOptions(0),
deleteOpt: *metav1.NewDeleteOptions(0),
},
{
descr: podDefaultGrace,
@ -151,7 +151,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
framework.ExpectNoError(f.WaitForPodRunningSlow(fmtPod.Name))
ginkgo.By("deleting the fmtPod")
framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
framework.Logf("deleted fmtPod %q", fmtPod.Name)
ginkgo.By("waiting for PD to detach")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
@ -269,7 +269,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
ginkgo.By("defer: cleaning up PD-RW test environment")
framework.Logf("defer cleanup errors can usually be ignored")
if host0Pod != nil {
podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0))
podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0))
}
for _, diskName := range diskNames {
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
@ -305,7 +305,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
verifyPDContentsViaContainer(ns, f, host0Pod.Name, containerName, fileAndContentToVerify)
ginkgo.By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
}
ginkgo.By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs))
for _, diskName := range diskNames {
@ -360,7 +360,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
ginkgo.By("defer: cleaning up PD-RW test env")
framework.Logf("defer cleanup errors can usually be ignored")
ginkgo.By("defer: delete host0Pod")
podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0))
podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0))
ginkgo.By("defer: detach and delete PDs")
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
if disruptOp == deleteNode || disruptOp == deleteNodeObj {
@ -417,9 +417,9 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
} else if disruptOp == deleteNodeObj {
ginkgo.By("deleting host0's node api object")
framework.ExpectNoError(nodeClient.Delete(context.TODO(), string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0's node object")
framework.ExpectNoError(nodeClient.Delete(context.TODO(), string(host0Name), *metav1.NewDeleteOptions(0)), "Unable to delete host0's node object")
ginkgo.By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod")
framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, *metav1.NewDeleteOptions(0)), "Unable to delete host0Pod")
} else if disruptOp == evictPod {
evictTarget := &policyv1beta1.Eviction{

View File

@ -98,7 +98,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() {
ginkgo.By("Deleting the PV")
err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.NewDeleteOptions(0))
err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PV")
framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout)
})
@ -114,7 +114,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC")
err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.NewDeleteOptions(0))
err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PV")
ginkgo.By("Checking that the PV status is Terminating")
@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
framework.ExpectNotEqual(pv.ObjectMeta.DeletionTimestamp, nil)
ginkgo.By("Deleting the PVC that is bound to the PV")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0))
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC")
ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC")

View File

@ -115,7 +115,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNoError(err, "Error terminating and deleting pod")
ginkgo.By("Deleting the PVC")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0))
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC")
waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout)
pvcCreatedAndNotDeleted = false
@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.It("Verify that PVC in active use by a pod is not removed immediately", func() {
ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0))
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC")
ginkgo.By("Checking that the PVC status is Terminating")
@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() {
ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0))
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PVC")
ginkgo.By("Checking that the PVC status is Terminating")

View File

@ -299,7 +299,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0))
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
}
@ -385,7 +385,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0))
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
}

View File

@ -265,7 +265,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err)
err = e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),

View File

@ -55,7 +55,7 @@ var _ = SIGDescribe("DNS", func() {
framework.Logf("Created pod %v", testUtilsPod)
defer func() {
framework.Logf("Deleting pod %s...", testUtilsPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err)
}
}()

View File

@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID
ginkgo.By("delete the mirror pod with grace period 30s")
err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, metav1.NewDeleteOptions(30))
err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err)
ginkgo.By("wait for the mirror pod to be recreated")
@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID
ginkgo.By("delete the mirror pod with grace period 0s")
err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("wait for the mirror pod to be recreated")

View File

@ -375,13 +375,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
framework.Logf("Node Problem Detector logs:\n %s", log)
}
ginkgo.By("Delete the node problem detector")
f.PodClient().Delete(context.TODO(), name, metav1.NewDeleteOptions(0))
f.PodClient().Delete(context.TODO(), name, *metav1.NewDeleteOptions(0))
ginkgo.By("Wait for the node problem detector to disappear")
gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed())
ginkgo.By("Delete the config map")
c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, metav1.DeleteOptions{})
ginkgo.By("Clean up the events")
gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed())
gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), *metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed())
ginkgo.By("Clean up the node condition")
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(context.TODO())

View File

@ -376,7 +376,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err)
gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),

View File

@ -98,7 +98,7 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin
func (e *transformTest) cleanUp() {
os.RemoveAll(e.configDir)
e.restClient.CoreV1().Namespaces().Delete(context.TODO(), e.ns.Name, metav1.NewDeleteOptions(0))
e.restClient.CoreV1().Namespaces().Delete(context.TODO(), e.ns.Name, *metav1.NewDeleteOptions(0))
e.kubeAPIServer.TearDownFn()
}

View File

@ -850,7 +850,7 @@ func TestInterPodAffinity(t *testing.T) {
t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits)
}
err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(context.TODO(), test.pod.Name, metav1.NewDeleteOptions(0))
err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(context.TODO(), test.pod.Name, *metav1.NewDeleteOptions(0))
if err != nil {
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
}
@ -865,7 +865,7 @@ func TestInterPodAffinity(t *testing.T) {
} else {
nsName = testCtx.NS.Name
}
err = cs.CoreV1().Pods(nsName).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
err = cs.CoreV1().Pods(nsName).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
if err != nil {
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
}

View File

@ -447,7 +447,7 @@ func waitCachedPodsStable(testCtx *testutils.TestContext, pods []*v1.Pod) error
// deletePod deletes the given pod in the given namespace.
func deletePod(cs clientset.Interface, podName string, nsName string) error {
return cs.CoreV1().Pods(nsName).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
return cs.CoreV1().Pods(nsName).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
}
func getPod(cs clientset.Interface, podName string, podNamespace string) (*v1.Pod, error) {

View File

@ -161,7 +161,7 @@ type TestContext struct {
// CleanupNodes cleans all nodes which were created during integration test
func CleanupNodes(cs clientset.Interface, t *testing.T) {
err := cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), metav1.ListOptions{})
err := cs.CoreV1().Nodes().DeleteCollection(context.TODO(), *metav1.NewDeleteOptions(0), metav1.ListOptions{})
if err != nil {
t.Errorf("error while deleting all nodes: %v", err)
}
@ -194,7 +194,7 @@ func CleanupTest(t *testing.T, testCtx *TestContext) {
// CleanupPods deletes the given pods and waits for them to be actually deleted.
func CleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
for _, p := range pods {
err := cs.CoreV1().Pods(p.Namespace).Delete(context.TODO(), p.Name, metav1.NewDeleteOptions(0))
err := cs.CoreV1().Pods(p.Namespace).Delete(context.TODO(), p.Name, *metav1.NewDeleteOptions(0))
if err != nil && !apierrors.IsNotFound(err) {
t.Errorf("error while deleting pod %v/%v: %v", p.Namespace, p.Name, err)
}