From 243c7ff11b2899d6773086e17b18eec03c999346 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Tue, 17 Jul 2018 14:05:48 -0700 Subject: [PATCH] test(scheduler): add more integration tests for TaintNodesByCondition - refactor test cases to table driven testing style - more scenarios are covered for TaintNodesByCondition - update bazel stuff --- test/integration/scheduler/BUILD | 1 + test/integration/scheduler/taint_test.go | 722 +++++++++++++++++------ test/integration/scheduler/util.go | 39 ++ 3 files changed, 579 insertions(+), 183 deletions(-) diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index 08c91bcf734..7b80b65e614 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -96,6 +96,7 @@ go_library( "//pkg/scheduler/algorithmprovider:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/factory:go_default_library", + "//pkg/util/taints:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/test/integration/scheduler/taint_test.go b/test/integration/scheduler/taint_test.go index dd4153c5449..6dc9c202875 100644 --- a/test/integration/scheduler/taint_test.go +++ b/test/integration/scheduler/taint_test.go @@ -19,7 +19,7 @@ package scheduler // This file tests the Taint feature. import ( - "reflect" + "fmt" "testing" "time" @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" utilfeature "k8s.io/apiserver/pkg/util/feature" restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" "k8s.io/kubernetes/pkg/controller/nodelifecycle" @@ -40,10 +39,28 @@ import ( pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" ) -// TestTaintNodeByCondition verifies: -// 1. MemoryPressure Toleration is added to non-BestEffort Pod by PodTolerationRestriction -// 2. NodeController taints nodes by node condition -// 3. Scheduler allows pod to tolerate node condition taints, e.g. network unavailable +func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: nsName, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "busybox", + Image: "busybox", + Resources: v1.ResourceRequirements{ + Requests: req, + Limits: limit, + }, + }, + }, + }, + } +} + +// TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature. func TestTaintNodeByCondition(t *testing.T) { enabled := utilfeature.DefaultFeatureGate.Enabled("TaintNodesByCondition") defer func() { @@ -76,7 +93,7 @@ func TestTaintNodeByCondition(t *testing.T) { algorithmprovider.ApplyFeatureGates() context = initTestScheduler(t, context, controllerCh, false, nil) - clientset := context.clientSet + cs := context.clientSet informers := context.informerFactory nsName := context.ns.Name @@ -86,7 +103,7 @@ func TestTaintNodeByCondition(t *testing.T) { informers.Core().V1().Nodes(), informers.Extensions().V1beta1().DaemonSets(), nil, // CloudProvider - clientset, + cs, time.Second, // Node monitor grace period time.Second, // Node startup grace period time.Second, // Node monitor period @@ -112,83 +129,369 @@ func TestTaintNodeByCondition(t *testing.T) { // ------------------------------------------- // Test TaintNodeByCondition feature. // ------------------------------------------- + nodeRes := v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4000m"), + v1.ResourceMemory: resource.MustParse("16Gi"), + v1.ResourcePods: resource.MustParse("110"), + } + + podRes := v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), + } + + notReadyToleration := v1.Toleration{ + Key: algorithm.TaintNodeNotReady, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + + unreachableToleration := v1.Toleration{ + Key: algorithm.TaintNodeUnreachable, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + + unschedulableToleration := v1.Toleration{ + Key: algorithm.TaintNodeUnschedulable, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + + outOfDiskToleration := v1.Toleration{ + Key: algorithm.TaintNodeOutOfDisk, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + memoryPressureToleration := v1.Toleration{ Key: algorithm.TaintNodeMemoryPressure, Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoSchedule, } - // Case 1: Add MememoryPressure Toleration for non-BestEffort pod. - burstablePod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "burstable-pod", - Namespace: nsName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ + diskPressureToleration := v1.Toleration{ + Key: algorithm.TaintNodeDiskPressure, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + + networkUnavailableToleration := v1.Toleration{ + Key: algorithm.TaintNodeNetworkUnavailable, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + + pidPressureToleration := v1.Toleration{ + Key: algorithm.TaintNodePIDPressure, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + + bestEffortPod := newPod(nsName, "besteffort-pod", nil, nil) + burstablePod := newPod(nsName, "burstable-pod", podRes, nil) + guaranteePod := newPod(nsName, "guarantee-pod", podRes, podRes) + + type podCase struct { + pod *v1.Pod + tolerations []v1.Toleration + fits bool + } + + // switch to table driven testings + tests := []struct { + name string + existingTaints []v1.Taint + nodeConditions []v1.NodeCondition + unschedulable bool + expectedTaints []v1.Taint + pods []podCase + }{ + { + name: "not-ready node", + nodeConditions: []v1.NodeCondition{ { - Name: "busybox", - Image: "busybox", - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("100m"), - }, + Type: v1.NodeReady, + Status: v1.ConditionFalse, + }, + }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeNotReady, + Effect: v1.TaintEffectNoSchedule, + }, + }, + pods: []podCase{ + { + pod: bestEffortPod, + fits: false, + }, + { + pod: burstablePod, + fits: false, + }, + { + pod: guaranteePod, + fits: false, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{notReadyToleration}, + fits: true, + }, + }, + }, + { + name: "unreachable node", + existingTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeUnreachable, + Effect: v1.TaintEffectNoSchedule, + }, + }, + nodeConditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionUnknown, // node status is "Unknown" + }, + }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeUnreachable, + Effect: v1.TaintEffectNoSchedule, + }, + }, + pods: []podCase{ + { + pod: bestEffortPod, + fits: false, + }, + { + pod: burstablePod, + fits: false, + }, + { + pod: guaranteePod, + fits: false, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{unreachableToleration}, + fits: true, + }, + }, + }, + { + name: "unschedulable node", + unschedulable: true, // node.spec.unschedulable = true + nodeConditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeUnschedulable, + Effect: v1.TaintEffectNoSchedule, + }, + }, + pods: []podCase{ + { + pod: bestEffortPod, + fits: false, + }, + { + pod: burstablePod, + fits: false, + }, + { + pod: guaranteePod, + fits: false, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{unschedulableToleration}, + fits: true, + }, + }, + }, + { + name: "out of disk node", + nodeConditions: []v1.NodeCondition{ + { + Type: v1.NodeOutOfDisk, + Status: v1.ConditionTrue, + }, + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeOutOfDisk, + Effect: v1.TaintEffectNoSchedule, + }, + }, + // In OutOfDisk condition, only pods with toleration can be scheduled. + pods: []podCase{ + { + pod: bestEffortPod, + fits: false, + }, + { + pod: burstablePod, + fits: false, + }, + { + pod: guaranteePod, + fits: false, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{outOfDiskToleration}, + fits: true, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{diskPressureToleration}, + fits: false, + }, + }, + }, + { + name: "memory pressure node", + nodeConditions: []v1.NodeCondition{ + { + Type: v1.NodeMemoryPressure, + Status: v1.ConditionTrue, + }, + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeMemoryPressure, + Effect: v1.TaintEffectNoSchedule, + }, + }, + // In MemoryPressure condition, both Burstable and Guarantee pods are scheduled; + // BestEffort pod with toleration are also scheduled. + pods: []podCase{ + { + pod: bestEffortPod, + fits: false, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{memoryPressureToleration}, + fits: true, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{diskPressureToleration}, + fits: false, + }, + { + pod: burstablePod, + fits: true, + }, + { + pod: guaranteePod, + fits: true, + }, + }, + }, + { + name: "disk pressure node", + nodeConditions: []v1.NodeCondition{ + { + Type: v1.NodeDiskPressure, + Status: v1.ConditionTrue, + }, + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeDiskPressure, + Effect: v1.TaintEffectNoSchedule, + }, + }, + // In DiskPressure condition, only pods with toleration can be scheduled. + pods: []podCase{ + { + pod: bestEffortPod, + fits: false, + }, + { + pod: burstablePod, + fits: false, + }, + { + pod: guaranteePod, + fits: false, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{diskPressureToleration}, + fits: true, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{memoryPressureToleration}, + fits: false, + }, + }, + }, + { + name: "network unavailable and node is ready", + nodeConditions: []v1.NodeCondition{ + { + Type: v1.NodeNetworkUnavailable, + Status: v1.ConditionTrue, + }, + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeNetworkUnavailable, + Effect: v1.TaintEffectNoSchedule, + }, + }, + pods: []podCase{ + { + pod: bestEffortPod, + fits: false, + }, + { + pod: burstablePod, + fits: false, + }, + { + pod: guaranteePod, + fits: false, + }, + { + pod: burstablePod, + tolerations: []v1.Toleration{ + networkUnavailableToleration, }, + fits: true, }, }, }, - } - - burstablePodInServ, err := clientset.CoreV1().Pods(nsName).Create(burstablePod) - if err != nil { - t.Errorf("Case 1: Failed to create pod: %v", err) - } else if !reflect.DeepEqual(burstablePodInServ.Spec.Tolerations, []v1.Toleration{memoryPressureToleration}) { - t.Errorf("Case 1: Unexpected toleration of non-BestEffort pod, expected: %+v, got: %v", - []v1.Toleration{memoryPressureToleration}, - burstablePodInServ.Spec.Tolerations) - } - - // Case 2: No MemoryPressure Toleration for BestEffort pod. - besteffortPod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "best-effort-pod", - Namespace: nsName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "busybox", - Image: "busybox", - }, - }, - }, - } - - besteffortPodInServ, err := clientset.CoreV1().Pods(nsName).Create(besteffortPod) - if err != nil { - t.Errorf("Case 2: Failed to create pod: %v", err) - } else if len(besteffortPodInServ.Spec.Tolerations) != 0 { - t.Errorf("Case 2: Unexpected toleration # of BestEffort pod, expected: 0, got: %v", - len(besteffortPodInServ.Spec.Tolerations)) - } - - // Case 3: Taint Node by NetworkUnavailable condition. - networkUnavailableNode := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node-1", - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("4000m"), - v1.ResourceMemory: resource.MustParse("16Gi"), - v1.ResourcePods: resource.MustParse("110"), - }, - Allocatable: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("4000m"), - v1.ResourceMemory: resource.MustParse("16Gi"), - v1.ResourcePods: resource.MustParse("110"), - }, - Conditions: []v1.NodeCondition{ + { + name: "network unavailable and node is not ready", + nodeConditions: []v1.NodeCondition{ { Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue, @@ -198,121 +501,174 @@ func TestTaintNodeByCondition(t *testing.T) { Status: v1.ConditionFalse, }, }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeNetworkUnavailable, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: algorithm.TaintNodeNotReady, + Effect: v1.TaintEffectNoSchedule, + }, + }, + pods: []podCase{ + { + pod: bestEffortPod, + fits: false, + }, + { + pod: burstablePod, + fits: false, + }, + { + pod: guaranteePod, + fits: false, + }, + { + pod: burstablePod, + tolerations: []v1.Toleration{ + networkUnavailableToleration, + }, + fits: false, + }, + { + pod: burstablePod, + tolerations: []v1.Toleration{ + networkUnavailableToleration, + notReadyToleration, + }, + fits: true, + }, + }, + }, + { + name: "pid pressure node", + nodeConditions: []v1.NodeCondition{ + { + Type: v1.NodePIDPressure, + Status: v1.ConditionTrue, + }, + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodePIDPressure, + Effect: v1.TaintEffectNoSchedule, + }, + }, + pods: []podCase{ + { + pod: bestEffortPod, + fits: false, + }, + { + pod: burstablePod, + fits: false, + }, + { + pod: guaranteePod, + fits: false, + }, + { + pod: bestEffortPod, + tolerations: []v1.Toleration{pidPressureToleration}, + fits: true, + }, + }, + }, + { + name: "multi taints on node", + nodeConditions: []v1.NodeCondition{ + { + Type: v1.NodePIDPressure, + Status: v1.ConditionTrue, + }, + { + Type: v1.NodeMemoryPressure, + Status: v1.ConditionTrue, + }, + { + Type: v1.NodeDiskPressure, + Status: v1.ConditionTrue, + }, + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + expectedTaints: []v1.Taint{ + { + Key: algorithm.TaintNodeDiskPressure, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: algorithm.TaintNodeMemoryPressure, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: algorithm.TaintNodePIDPressure, + Effect: v1.TaintEffectNoSchedule, + }, + }, }, } - nodeInformerCh := make(chan bool) - nodeInformer := informers.Core().V1().Nodes().Informer() - nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(old, cur interface{}) { - curNode := cur.(*v1.Node) - if curNode.Name != "node-1" { - return + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: v1.NodeSpec{ + Unschedulable: test.unschedulable, + Taints: test.existingTaints, + }, + Status: v1.NodeStatus{ + Capacity: nodeRes, + Allocatable: nodeRes, + Conditions: test.nodeConditions, + }, } - for _, taint := range curNode.Spec.Taints { - if taint.Key == algorithm.TaintNodeNetworkUnavailable && - taint.Effect == v1.TaintEffectNoSchedule { - nodeInformerCh <- true - break + + if _, err := cs.CoreV1().Nodes().Create(node); err != nil { + t.Errorf("Failed to create node, err: %v", err) + } + if err := waitForNodeTaints(cs, node, test.expectedTaints); err != nil { + t.Errorf("Failed to taint node, err: %v", err) + } + + var pods []*v1.Pod + for i, p := range test.pods { + pod := p.pod.DeepCopy() + pod.Name = fmt.Sprintf("%s-%d", pod.Name, i) + pod.Spec.Tolerations = p.tolerations + + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod) + if err != nil { + t.Fatalf("Failed to create pod %s/%s, error: %v", + pod.Namespace, pod.Name, err) + } + + pods = append(pods, createdPod) + + if p.fits { + if err := waitForPodToSchedule(cs, createdPod); err != nil { + t.Errorf("Failed to schedule pod %s/%s on the node, err: %v", + pod.Namespace, pod.Name, err) + } + } else { + if err := waitForPodUnschedulable(cs, createdPod); err != nil { + t.Errorf("Unschedulable pod %s/%s gets scheduled on the node, err: %v", + pod.Namespace, pod.Name, err) + } } } - }, - }) - if _, err := clientset.CoreV1().Nodes().Create(networkUnavailableNode); err != nil { - t.Errorf("Case 3: Failed to create node: %v", err) - } else { - select { - case <-time.After(60 * time.Second): - t.Errorf("Case 3: Failed to taint node after 60s.") - case <-nodeInformerCh: - } - } - - // Case 4: Schedule Pod with NetworkUnavailable and NotReady toleration. - networkDaemonPod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "network-daemon-pod", - Namespace: nsName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "busybox", - Image: "busybox", - }, - }, - Tolerations: []v1.Toleration{ - { - Key: algorithm.TaintNodeNetworkUnavailable, - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, - }, - { - Key: algorithm.TaintNodeNotReady, - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, - }, - }, - }, - } - - if _, err := clientset.CoreV1().Pods(nsName).Create(networkDaemonPod); err != nil { - t.Errorf("Case 4: Failed to create pod for network daemon: %v", err) - } else { - if err := waitForPodToScheduleWithTimeout(clientset, networkDaemonPod, time.Second*60); err != nil { - t.Errorf("Case 4: Failed to schedule network daemon pod in 60s.") - } - } - - // Case 5: Taint node by unschedulable condition - unschedulableNode := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node-2", - }, - Spec: v1.NodeSpec{ - Unschedulable: true, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("4000m"), - v1.ResourceMemory: resource.MustParse("16Gi"), - v1.ResourcePods: resource.MustParse("110"), - }, - Allocatable: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("4000m"), - v1.ResourceMemory: resource.MustParse("16Gi"), - v1.ResourcePods: resource.MustParse("110"), - }, - }, - } - - nodeInformerCh2 := make(chan bool) - nodeInformer2 := informers.Core().V1().Nodes().Informer() - nodeInformer2.AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(old, cur interface{}) { - curNode := cur.(*v1.Node) - if curNode.Name != "node-2" { - return - } - - for _, taint := range curNode.Spec.Taints { - if taint.Key == algorithm.TaintNodeUnschedulable && - taint.Effect == v1.TaintEffectNoSchedule { - nodeInformerCh2 <- true - break - } - } - }, - }) - - if _, err := clientset.CoreV1().Nodes().Create(unschedulableNode); err != nil { - t.Errorf("Case 5: Failed to create node: %v", err) - } else { - select { - case <-time.After(60 * time.Second): - t.Errorf("Case 5: Failed to taint node after 60s.") - case <-nodeInformerCh2: - } + cleanupPods(cs, t, pods) + cleanupNodes(cs, t) + }) } } diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index b7035855471..e40994b5d5c 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -51,6 +51,7 @@ import ( _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/scheduler/factory" + taintutils "k8s.io/kubernetes/pkg/util/taints" "k8s.io/kubernetes/test/integration/framework" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -363,6 +364,44 @@ func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, nu return nodes[:], nil } +// nodeTainted return a condition function that returns true if the given node contains +// the taints. +func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc { + return func() (bool, error) { + node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + if len(taints) != len(node.Spec.Taints) { + return false, nil + } + + for _, taint := range taints { + if !taintutils.TaintExists(node.Spec.Taints, &taint) { + return false, nil + } + } + + return true, nil + } +} + +// waitForNodeTaints waits for a node to have the target taints and returns +// an error if it does not have taints within the given timeout. +func waitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error { + return wait.Poll(100*time.Millisecond, 30*time.Second, nodeTainted(cs, node.Name, taints)) +} + +// cleanupNodes deletes all nodes. +func cleanupNodes(cs clientset.Interface, t *testing.T) { + err := cs.CoreV1().Nodes().DeleteCollection( + metav1.NewDeleteOptions(0), metav1.ListOptions{}) + if err != nil { + t.Errorf("error while deleting all nodes: %v", err) + } +} + type pausePodConfig struct { Name string Namespace string