automated refactor

This commit is contained in:
Mike Danese
2020-03-01 09:24:42 -08:00
parent 86bd06c882
commit c58e69ec79
174 changed files with 404 additions and 398 deletions

View File

@@ -359,7 +359,7 @@ func TestSchedulerExtender(t *testing.T) {
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
defer cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
goodCondition := v1.NodeCondition{
Type: v1.NodeReady,
@@ -418,7 +418,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
t.Fatalf("Failed to schedule using extender, expected machine2, got %v", myPod.Spec.NodeName)
}
var gracePeriod int64
if err := cs.CoreV1().Pods(ns.Name).Delete(context.TODO(), myPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil {
if err := cs.CoreV1().Pods(ns.Name).Delete(context.TODO(), myPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil {
t.Fatalf("Failed to delete pod: %v", err)
}
_, err = cs.CoreV1().Pods(ns.Name).Get(context.TODO(), myPod.Name, metav1.GetOptions{})

View File

@@ -1220,7 +1220,7 @@ func TestPDBInPreemption(t *testing.T) {
// Cleanup
pods = append(pods, preemptor)
testutils.CleanupPods(cs, t, pods)
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
}
}

View File

@@ -62,7 +62,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
for i, test := range []struct {
@@ -304,7 +304,7 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
@@ -341,7 +341,7 @@ func TestUnschedulableNodes(t *testing.T) {
nodeLister := testCtx.InformerFactory.Core().V1().Nodes().Lister()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
goodCondition := v1.NodeCondition{
Type: v1.NodeReady,
@@ -453,7 +453,7 @@ func TestUnschedulableNodes(t *testing.T) {
if err := deletePod(testCtx.ClientSet, myPod.Name, myPod.Namespace); err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
err = testCtx.ClientSet.CoreV1().Nodes().Delete(context.TODO(), schedNode.Name, nil)
err = testCtx.ClientSet.CoreV1().Nodes().Delete(context.TODO(), schedNode.Name, metav1.DeleteOptions{})
if err != nil {
t.Errorf("Failed to delete node: %v", err)
}
@@ -826,7 +826,7 @@ func TestSchedulerInformers(t *testing.T) {
// Cleanup
pods = append(pods, unschedulable)
testutils.CleanupPods(cs, t, pods)
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
}
}

View File

@@ -469,7 +469,7 @@ func noPodsInNamespace(c clientset.Interface, podNamespace string) wait.Conditio
// cleanupPodsInNamespace deletes the pods in the given namespace and waits for them to
// be actually deleted.
func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
if err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), nil, metav1.ListOptions{}); err != nil {
if err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil {
t.Errorf("error while listing pod in namespace %v: %v", ns, err)
return
}