mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-29 21:29:24 +00:00
automated refactor
This commit is contained in:
@@ -205,7 +205,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
|
||||
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, deleteOptions)
|
||||
framework.ExpectNoError(err)
|
||||
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
||||
_, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), dp.Name, getOptions)
|
||||
@@ -237,7 +237,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))
|
||||
|
||||
ginkgo.By("By deleting the pods and waiting for container removal")
|
||||
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
|
||||
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, deleteOptions)
|
||||
framework.ExpectNoError(err)
|
||||
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
||||
|
||||
@@ -269,7 +269,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.Equal(devsLen))
|
||||
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
|
||||
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, deleteOptions)
|
||||
framework.ExpectNoError(err)
|
||||
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
||||
|
||||
|
||||
@@ -929,7 +929,7 @@ func recreateConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error
|
||||
|
||||
// deleteConfigMapFunc simply deletes tc.configMap
|
||||
func deleteConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error {
|
||||
return f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Delete(context.TODO(), tc.configMap.Name, &metav1.DeleteOptions{})
|
||||
return f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Delete(context.TODO(), tc.configMap.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// createConfigMapFunc creates tc.configMap and updates the UID and ResourceVersion on tc.configMap
|
||||
|
||||
@@ -307,7 +307,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{})
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
@@ -364,7 +364,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
||||
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{})
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
@@ -417,7 +417,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
|
||||
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{})
|
||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
specs := []podEvictSpec{
|
||||
|
||||
@@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
continue
|
||||
}
|
||||
|
||||
f.PodClient().Delete(context.TODO(), p.Name, &metav1.DeleteOptions{})
|
||||
f.PodClient().Delete(context.TODO(), p.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
})
|
||||
|
||||
@@ -135,7 +135,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
framework.ExpectEqual(devID1, devID2)
|
||||
|
||||
ginkgo.By("Deleting device plugin.")
|
||||
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), devicePluginPod.Name, &metav1.DeleteOptions{})
|
||||
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), devicePluginPod.Name, metav1.DeleteOptions{})
|
||||
ginkgo.By("Waiting for GPUs to become unavailable on the local node")
|
||||
gomega.Eventually(func() bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
|
||||
@@ -379,7 +379,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
||||
ginkgo.By("Wait for the node problem detector to disappear")
|
||||
gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed())
|
||||
ginkgo.By("Delete the config map")
|
||||
c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, nil)
|
||||
c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, metav1.DeleteOptions{})
|
||||
ginkgo.By("Clean up the events")
|
||||
gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed())
|
||||
ginkgo.By("Clean up the node condition")
|
||||
|
||||
@@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
})
|
||||
ginkgo.By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
err := f.PodClient().Delete(context.TODO(), guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
err := f.PodClient().Delete(context.TODO(), guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
framework.ExpectNoError(err)
|
||||
pod := makePodToVerifyCgroupRemoved("pod" + podUID)
|
||||
f.PodClient().Create(pod)
|
||||
@@ -248,7 +248,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
})
|
||||
ginkgo.By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
err := f.PodClient().Delete(context.TODO(), bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
err := f.PodClient().Delete(context.TODO(), bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
framework.ExpectNoError(err)
|
||||
pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID)
|
||||
f.PodClient().Create(pod)
|
||||
@@ -293,7 +293,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
})
|
||||
ginkgo.By("Checking if the pod cgroup was deleted", func() {
|
||||
gp := int64(1)
|
||||
err := f.PodClient().Delete(context.TODO(), burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
err := f.PodClient().Delete(context.TODO(), burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
framework.ExpectNoError(err)
|
||||
pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID)
|
||||
f.PodClient().Create(pod)
|
||||
|
||||
@@ -489,16 +489,16 @@ func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
|
||||
}
|
||||
|
||||
ginkgo.By("Delete SRIOV device plugin pod %s/%s")
|
||||
err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(context.TODO(), sd.pod.Name, &deleteOptions)
|
||||
err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(context.TODO(), sd.pod.Name, deleteOptions)
|
||||
framework.ExpectNoError(err)
|
||||
waitForContainerRemoval(sd.pod.Spec.Containers[0].Name, sd.pod.Name, sd.pod.Namespace)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting configMap %v/%v", metav1.NamespaceSystem, sd.configMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), sd.configMap.Name, &deleteOptions)
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), sd.configMap.Name, deleteOptions)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting serviceAccount %v/%v", metav1.NamespaceSystem, sd.serviceAccount.Name))
|
||||
err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(context.TODO(), sd.serviceAccount.Name, &deleteOptions)
|
||||
err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(context.TODO(), sd.serviceAccount.Name, deleteOptions)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
|
||||
})
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
gp := int64(1)
|
||||
f.PodClient().Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
f.PodClient().Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user