Merge pull request #90662 from nilo19/cleanup/decouple-cloud-testutils

Remove the deps to testutils in pkg/controller/cloud.
This commit is contained in:
Kubernetes Prow Robot 2020-05-09 02:17:51 -07:00 committed by GitHub
commit ba3bf32300
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 303 additions and 273 deletions

View File

@ -39,8 +39,8 @@ go_test(
], ],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/controller/testutil:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",

View File

@ -24,6 +24,7 @@ import (
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
@ -33,21 +34,19 @@ import (
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
fakecloud "k8s.io/cloud-provider/fake" fakecloud "k8s.io/cloud-provider/fake"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/kubernetes/pkg/controller/testutil"
) )
func Test_NodesDeleted(t *testing.T) { func Test_NodesDeleted(t *testing.T) {
testcases := []struct { testcases := []struct {
name string name string
fnh *testutil.FakeNodeHandler
fakeCloud *fakecloud.Cloud fakeCloud *fakecloud.Cloud
deleteNodes []*v1.Node existingNode *v1.Node
expectedNode *v1.Node
expectedDeleted bool
}{ }{
{ {
name: "node is not ready and does not exist", name: "node is not ready and does not exist",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
@ -63,22 +62,14 @@ func Test_NodesDeleted(t *testing.T) {
}, },
}, },
}, },
}, expectedDeleted: true,
DeletedNodes: []*v1.Node{},
Clientset: fake.NewSimpleClientset(),
},
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
ExistsByProviderID: false, ExistsByProviderID: false,
}, },
deleteNodes: []*v1.Node{
testutil.NewNode("node0"),
},
}, },
{ {
name: "node is not ready and provider returns err", name: "node is not ready and provider returns err",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
@ -97,21 +88,34 @@ func Test_NodesDeleted(t *testing.T) {
}, },
}, },
}, },
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
}, },
DeletedNodes: []*v1.Node{}, Spec: v1.NodeSpec{
Clientset: fake.NewSimpleClientset(), ProviderID: "node0",
}, },
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
expectedDeleted: false,
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
ExistsByProviderID: false, ExistsByProviderID: false,
ErrByProviderID: errors.New("err!"), ErrByProviderID: errors.New("err!"),
}, },
deleteNodes: []*v1.Node{},
}, },
{ {
name: "node is not ready but still exists", name: "node is not ready but still exists",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
@ -130,20 +134,33 @@ func Test_NodesDeleted(t *testing.T) {
}, },
}, },
}, },
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
}, },
DeletedNodes: []*v1.Node{}, Spec: v1.NodeSpec{
Clientset: fake.NewSimpleClientset(), ProviderID: "node0",
}, },
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
expectedDeleted: false,
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
ExistsByProviderID: true, ExistsByProviderID: true,
}, },
deleteNodes: []*v1.Node{},
}, },
{ {
name: "node ready condition is unknown, node doesn't exist", name: "node ready condition is unknown, node doesn't exist",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
@ -159,22 +176,14 @@ func Test_NodesDeleted(t *testing.T) {
}, },
}, },
}, },
}, expectedDeleted: true,
DeletedNodes: []*v1.Node{},
Clientset: fake.NewSimpleClientset(),
},
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
ExistsByProviderID: false, ExistsByProviderID: false,
}, },
deleteNodes: []*v1.Node{
testutil.NewNode("node0"),
},
}, },
{ {
name: "node ready condition is unknown, node exists", name: "node ready condition is unknown, node exists",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
@ -190,10 +199,23 @@ func Test_NodesDeleted(t *testing.T) {
}, },
}, },
}, },
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
}, },
DeletedNodes: []*v1.Node{}, Status: v1.NodeStatus{
Clientset: fake.NewSimpleClientset(), Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
}, },
},
},
},
expectedDeleted: false,
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
NodeShutdown: false, NodeShutdown: false,
ExistsByProviderID: true, ExistsByProviderID: true,
@ -201,13 +223,10 @@ func Test_NodesDeleted(t *testing.T) {
types.NodeName("node0"): "foo://12345", types.NodeName("node0"): "foo://12345",
}, },
}, },
deleteNodes: []*v1.Node{},
}, },
{ {
name: "node is ready, but provider said it is deleted (maybe a bug in provider)", name: "node is ready, but provider said it is deleted (maybe a bug in provider)",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
@ -226,30 +245,46 @@ func Test_NodesDeleted(t *testing.T) {
}, },
}, },
}, },
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
}, },
DeletedNodes: []*v1.Node{}, Spec: v1.NodeSpec{
Clientset: fake.NewSimpleClientset(), ProviderID: "node0",
}, },
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
expectedDeleted: false,
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
ExistsByProviderID: false, ExistsByProviderID: false,
}, },
deleteNodes: []*v1.Node{},
}, },
} }
for _, testcase := range testcases { for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) { t.Run(testcase.name, func(t *testing.T) {
informer := informers.NewSharedInformerFactory(testcase.fnh.Clientset, time.Second) clientset := fake.NewSimpleClientset(testcase.existingNode)
informer := informers.NewSharedInformerFactory(clientset, time.Second)
nodeInformer := informer.Core().V1().Nodes() nodeInformer := informer.Core().V1().Nodes()
if err := syncNodeStore(nodeInformer, testcase.fnh); err != nil { if err := syncNodeStore(nodeInformer, clientset); err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
cloudNodeLifecycleController := &CloudNodeLifecycleController{ cloudNodeLifecycleController := &CloudNodeLifecycleController{
nodeLister: nodeInformer.Lister(), nodeLister: nodeInformer.Lister(),
kubeClient: testcase.fnh, kubeClient: clientset,
cloud: testcase.fakeCloud, cloud: testcase.fakeCloud,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-lifecycle-controller"}), recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-lifecycle-controller"}),
nodeMonitorPeriod: 1 * time.Second, nodeMonitorPeriod: 1 * time.Second,
@ -258,10 +293,14 @@ func Test_NodesDeleted(t *testing.T) {
eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartLogging(klog.Infof)
cloudNodeLifecycleController.MonitorNodes() cloudNodeLifecycleController.MonitorNodes()
if !reflect.DeepEqual(testcase.fnh.DeletedNodes, testcase.deleteNodes) { updatedNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), testcase.existingNode.Name, metav1.GetOptions{})
t.Logf("actual nodes: %v", testcase.fnh.DeletedNodes) if testcase.expectedDeleted != apierrors.IsNotFound(err) {
t.Logf("expected nodes: %v", testcase.deleteNodes) t.Fatalf("unexpected error happens when getting the node: %v", err)
t.Error("unexpected deleted nodes") }
if !reflect.DeepEqual(updatedNode, testcase.expectedNode) {
t.Logf("actual nodes: %v", updatedNode)
t.Logf("expected nodes: %v", testcase.expectedNode)
t.Error("unexpected updated nodes")
} }
}) })
} }
@ -270,15 +309,14 @@ func Test_NodesDeleted(t *testing.T) {
func Test_NodesShutdown(t *testing.T) { func Test_NodesShutdown(t *testing.T) {
testcases := []struct { testcases := []struct {
name string name string
fnh *testutil.FakeNodeHandler
fakeCloud *fakecloud.Cloud fakeCloud *fakecloud.Cloud
updatedNodes []*v1.Node existingNode *v1.Node
expectedNode *v1.Node
expectedDeleted bool
}{ }{
{ {
name: "node is not ready and was shutdown, but exists", name: "node is not ready and was shutdown, but exists",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
@ -297,17 +335,7 @@ func Test_NodesShutdown(t *testing.T) {
}, },
}, },
}, },
}, expectedNode: &v1.Node{
UpdatedNodes: []*v1.Node{},
Clientset: fake.NewSimpleClientset(),
},
fakeCloud: &fakecloud.Cloud{
NodeShutdown: true,
ExistsByProviderID: true,
ErrShutdownByProviderID: nil,
},
updatedNodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
@ -329,13 +357,16 @@ func Test_NodesShutdown(t *testing.T) {
}, },
}, },
}, },
expectedDeleted: false,
fakeCloud: &fakecloud.Cloud{
NodeShutdown: true,
ExistsByProviderID: true,
ErrShutdownByProviderID: nil,
}, },
}, },
{ {
name: "node is not ready, but there is error checking if node is shutdown", name: "node is not ready, but there is error checking if node is shutdown",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
@ -351,21 +382,15 @@ func Test_NodesShutdown(t *testing.T) {
}, },
}, },
}, },
}, expectedDeleted: true,
UpdatedNodes: []*v1.Node{},
Clientset: fake.NewSimpleClientset(),
},
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
NodeShutdown: false, NodeShutdown: false,
ErrShutdownByProviderID: errors.New("err!"), ErrShutdownByProviderID: errors.New("err!"),
}, },
updatedNodes: []*v1.Node{},
}, },
{ {
name: "node is not ready and is not shutdown", name: "node is not ready and is not shutdown",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
@ -381,21 +406,15 @@ func Test_NodesShutdown(t *testing.T) {
}, },
}, },
}, },
}, expectedDeleted: true,
UpdatedNodes: []*v1.Node{},
Clientset: fake.NewSimpleClientset(),
},
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
NodeShutdown: false, NodeShutdown: false,
ErrShutdownByProviderID: nil, ErrShutdownByProviderID: nil,
}, },
updatedNodes: []*v1.Node{},
}, },
{ {
name: "node is ready but provider says it's shutdown (maybe a bug by provider)", name: "node is ready but provider says it's shutdown (maybe a bug by provider)",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
@ -411,21 +430,31 @@ func Test_NodesShutdown(t *testing.T) {
}, },
}, },
}, },
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
}, },
UpdatedNodes: []*v1.Node{}, Status: v1.NodeStatus{
Clientset: fake.NewSimpleClientset(), Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
}, },
},
},
},
expectedDeleted: false,
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
NodeShutdown: true, NodeShutdown: true,
ErrShutdownByProviderID: nil, ErrShutdownByProviderID: nil,
}, },
updatedNodes: []*v1.Node{},
}, },
{ {
name: "node is shutdown but provider says it does not exist", name: "node is shutdown but provider says it does not exist",
fnh: &testutil.FakeNodeHandler{ existingNode: &v1.Node{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "node0", Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
@ -441,32 +470,29 @@ func Test_NodesShutdown(t *testing.T) {
}, },
}, },
}, },
}, expectedDeleted: true,
Clientset: fake.NewSimpleClientset(),
UpdatedNodes: []*v1.Node{},
},
fakeCloud: &fakecloud.Cloud{ fakeCloud: &fakecloud.Cloud{
NodeShutdown: true, NodeShutdown: true,
ExistsByProviderID: false, ExistsByProviderID: false,
ErrShutdownByProviderID: nil, ErrShutdownByProviderID: nil,
}, },
updatedNodes: []*v1.Node{}, // should be empty because node does not exist
}, },
} }
for _, testcase := range testcases { for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) { t.Run(testcase.name, func(t *testing.T) {
informer := informers.NewSharedInformerFactory(testcase.fnh.Clientset, time.Second) clientset := fake.NewSimpleClientset(testcase.existingNode)
informer := informers.NewSharedInformerFactory(clientset, time.Second)
nodeInformer := informer.Core().V1().Nodes() nodeInformer := informer.Core().V1().Nodes()
if err := syncNodeStore(nodeInformer, testcase.fnh); err != nil { if err := syncNodeStore(nodeInformer, clientset); err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
cloudNodeLifecycleController := &CloudNodeLifecycleController{ cloudNodeLifecycleController := &CloudNodeLifecycleController{
nodeLister: nodeInformer.Lister(), nodeLister: nodeInformer.Lister(),
kubeClient: testcase.fnh, kubeClient: clientset,
cloud: testcase.fakeCloud, cloud: testcase.fakeCloud,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-lifecycle-controller"}), recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-lifecycle-controller"}),
nodeMonitorPeriod: 1 * time.Second, nodeMonitorPeriod: 1 * time.Second,
@ -475,17 +501,21 @@ func Test_NodesShutdown(t *testing.T) {
eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartLogging(klog.Infof)
cloudNodeLifecycleController.MonitorNodes() cloudNodeLifecycleController.MonitorNodes()
if !reflect.DeepEqual(testcase.fnh.UpdatedNodes, testcase.updatedNodes) { updatedNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), testcase.existingNode.Name, metav1.GetOptions{})
t.Logf("actual nodes: %v", testcase.fnh.UpdatedNodes) if testcase.expectedDeleted != apierrors.IsNotFound(err) {
t.Logf("expected nodes: %v", testcase.updatedNodes) t.Fatalf("unexpected error happens when getting the node: %v", err)
}
if !reflect.DeepEqual(updatedNode, testcase.expectedNode) {
t.Logf("actual nodes: %v", updatedNode)
t.Logf("expected nodes: %v", testcase.expectedNode)
t.Error("unexpected updated nodes") t.Error("unexpected updated nodes")
} }
}) })
} }
} }
func syncNodeStore(nodeinformer coreinformers.NodeInformer, f *testutil.FakeNodeHandler) error { func syncNodeStore(nodeinformer coreinformers.NodeInformer, f *fake.Clientset) error {
nodes, err := f.List(context.TODO(), metav1.ListOptions{}) nodes, err := f.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil { if err != nil {
return err return err
} }