Merge pull request #88708 from mikedanese/deleteopts

Migrate clientset metav1.DeleteOpts to pass-by-value
This commit is contained in:
Kubernetes Prow Robot
2020-03-05 23:09:23 -08:00
committed by GitHub
430 changed files with 1854 additions and 1855 deletions

View File

@@ -87,7 +87,7 @@ func deletePods(f *framework.Framework, podNames []string) {
delOpts := metav1.DeleteOptions{
GracePeriodSeconds: &gp,
}
f.PodClient().DeleteSync(podName, &delOpts, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout)
}
}

View File

@@ -99,10 +99,10 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
})
ginkgo.AfterEach(func() {
// Delete Pods
f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(guaranteedPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(burstablePodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(bestEffortPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
// Log Events
logPodEvents(f)
logNodeEvents(f)

View File

@@ -205,7 +205,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
deleteOptions := metav1.DeleteOptions{
GracePeriodSeconds: &gp,
}
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, deleteOptions)
framework.ExpectNoError(err)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
_, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), dp.Name, getOptions)
@@ -237,7 +237,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))
ginkgo.By("By deleting the pods and waiting for container removal")
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, deleteOptions)
framework.ExpectNoError(err)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
@@ -269,7 +269,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
}, 30*time.Second, framework.Poll).Should(gomega.Equal(devsLen))
ginkgo.By("by deleting the pods and waiting for container removal")
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, deleteOptions)
framework.ExpectNoError(err)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
@@ -281,8 +281,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
}, 10*time.Minute, framework.Poll).Should(gomega.BeTrue())
// Cleanup
f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(pod1.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(pod2.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
})
})
}

View File

@@ -169,7 +169,7 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func())
twist()
ginkgo.By("Remove test pod")
f.PodClient().DeleteSync(podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
ginkgo.By("Waiting for checkpoint to be removed")
if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) {

View File

@@ -929,7 +929,7 @@ func recreateConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error
// deleteConfigMapFunc simply deletes tc.configMap
func deleteConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error {
return f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Delete(context.TODO(), tc.configMap.Name, &metav1.DeleteOptions{})
return f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Delete(context.TODO(), tc.configMap.Name, metav1.DeleteOptions{})
}
// createConfigMapFunc creates tc.configMap and updates the UID and ResourceVersion on tc.configMap

View File

@@ -307,7 +307,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
})
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{})
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
})
specs := []podEvictSpec{
@@ -364,7 +364,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
})
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{})
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
})
specs := []podEvictSpec{
@@ -417,7 +417,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
})
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{})
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
})
specs := []podEvictSpec{
@@ -535,7 +535,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
ginkgo.By("deleting pods")
for _, spec := range testSpecs {
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute)
f.PodClient().DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute)
}
// In case a test fails before verifying that NodeCondition no longer exist on the node,

View File

@@ -245,7 +245,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
ginkgo.AfterEach(func() {
for _, pod := range test.testPods {
ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName))
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(pod.podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
}
ginkgo.By("Making sure all containers get cleaned up")

View File

@@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
continue
}
f.PodClient().Delete(context.TODO(), p.Name, &metav1.DeleteOptions{})
f.PodClient().Delete(context.TODO(), p.Name, metav1.DeleteOptions{})
}
})
@@ -135,7 +135,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
framework.ExpectEqual(devID1, devID2)
ginkgo.By("Deleting device plugin.")
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), devicePluginPod.Name, &metav1.DeleteOptions{})
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), devicePluginPod.Name, metav1.DeleteOptions{})
ginkgo.By("Waiting for GPUs to become unavailable on the local node")
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
@@ -162,8 +162,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
logDevicePluginMetrics()
// Cleanup
f.PodClient().DeleteSync(p1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(p2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(p1.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(p2.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
})
})
})

View File

@@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID
ginkgo.By("delete the mirror pod with grace period 30s")
err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, metav1.NewDeleteOptions(30))
err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err)
ginkgo.By("wait for the mirror pod to be recreated")
@@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID
ginkgo.By("delete the mirror pod with grace period 0s")
err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("wait for the mirror pod to be recreated")

View File

@@ -80,7 +80,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
delOpts := metav1.DeleteOptions{
GracePeriodSeconds: &gp,
}
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(pod.Name, delOpts, framework.DefaultPodDeletionTimeout)
ginkgo.By("running the post test exec from the workload")
err := wl.PostTestExec()
framework.ExpectNoError(err)

View File

@@ -375,13 +375,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
framework.Logf("Node Problem Detector logs:\n %s", log)
}
ginkgo.By("Delete the node problem detector")
f.PodClient().Delete(context.TODO(), name, metav1.NewDeleteOptions(0))
f.PodClient().Delete(context.TODO(), name, *metav1.NewDeleteOptions(0))
ginkgo.By("Wait for the node problem detector to disappear")
gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed())
ginkgo.By("Delete the config map")
c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, nil)
c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, metav1.DeleteOptions{})
ginkgo.By("Clean up the events")
gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed())
gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), *metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed())
ginkgo.By("Clean up the node condition")
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(context.TODO())

View File

@@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
ginkgo.By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
err := f.PodClient().Delete(context.TODO(), guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
err := f.PodClient().Delete(context.TODO(), guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
pod := makePodToVerifyCgroupRemoved("pod" + podUID)
f.PodClient().Create(pod)
@@ -248,7 +248,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
ginkgo.By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
err := f.PodClient().Delete(context.TODO(), bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
err := f.PodClient().Delete(context.TODO(), bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID)
f.PodClient().Create(pod)
@@ -293,7 +293,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
ginkgo.By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
err := f.PodClient().Delete(context.TODO(), burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
err := f.PodClient().Delete(context.TODO(), burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID)
f.PodClient().Create(pod)

View File

@@ -376,7 +376,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err)
gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),

View File

@@ -96,8 +96,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
})
ginkgo.AfterEach(func() {
ginkgo.By("Deleting test pods")
f.PodClient().DeleteSync(pod0, &metav1.DeleteOptions{}, 10*time.Minute)
f.PodClient().DeleteSync(pod1, &metav1.DeleteOptions{}, 10*time.Minute)
f.PodClient().DeleteSync(pod0, metav1.DeleteOptions{}, 10*time.Minute)
f.PodClient().DeleteSync(pod1, metav1.DeleteOptions{}, 10*time.Minute)
if !ginkgo.CurrentGinkgoTestDescription().Failed {
return
}

View File

@@ -496,16 +496,16 @@ func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
}
ginkgo.By("Delete SRIOV device plugin pod %s/%s")
err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(context.TODO(), sd.pod.Name, &deleteOptions)
err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(context.TODO(), sd.pod.Name, deleteOptions)
framework.ExpectNoError(err)
waitForContainerRemoval(sd.pod.Spec.Containers[0].Name, sd.pod.Name, sd.pod.Namespace)
ginkgo.By(fmt.Sprintf("Deleting configMap %v/%v", metav1.NamespaceSystem, sd.configMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), sd.configMap.Name, &deleteOptions)
err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), sd.configMap.Name, deleteOptions)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Deleting serviceAccount %v/%v", metav1.NamespaceSystem, sd.serviceAccount.Name))
err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(context.TODO(), sd.serviceAccount.Name, &deleteOptions)
err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(context.TODO(), sd.serviceAccount.Name, deleteOptions)
framework.ExpectNoError(err)
}

View File

@@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
})
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
gp := int64(1)
f.PodClient().Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
f.PodClient().Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
if err == nil {
break
}