From d1e865ee341ba37469efed8c935e22e2b483eec2 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 22 Feb 2019 10:27:46 -0500 Subject: [PATCH] Update client callers to use explicit versions --- .../app/util/apiclient/idempotency_test.go | 2 +- .../certificate_controller_test.go | 2 +- .../namespaced_resources_deleter_test.go | 8 +- .../podautoscaler/horizontal_test.go | 4 +- .../podautoscaler/legacy_horizontal_test.go | 4 +- pkg/controller/replicaset/replica_set_test.go | 2 +- pkg/controller/testutil/test_utils.go | 2 +- .../attach_detach_controller_test.go | 4 +- pkg/kubectl/cmd/cp/cp.go | 2 +- pkg/kubectl/cmd/scale/scale.go | 2 +- pkg/kubectl/cmd/scale/scalejob_test.go | 8 +- pkg/kubectl/cmd/top/top_node.go | 2 +- pkg/kubectl/cmd/top/top_pod.go | 4 +- pkg/kubectl/describe/versioned/describe.go | 120 +++++++++--------- pkg/kubectl/drain/cordon.go | 2 +- .../polymorphichelpers/helpers_test.go | 2 +- pkg/kubectl/rolling_updater_test.go | 20 +-- pkg/master/client_ca_hook_test.go | 2 +- pkg/master/controller_test.go | 10 +- pkg/master/master_test.go | 4 +- .../ipallocator/controller/repair_test.go | 8 +- .../portallocator/controller/repair_test.go | 8 +- .../nodeinfomanager/nodeinfomanager_test.go | 4 +- pkg/volume/glusterfs/glusterfs.go | 2 +- pkg/volume/rbd/rbd.go | 2 +- pkg/volume/storageos/storageos_test.go | 2 +- .../namespace/autoprovision/admission.go | 2 +- .../admission/namespace/exists/admission.go | 2 +- .../admission/podnodeselector/admission.go | 2 +- .../resourcequota/resource_access.go | 2 +- .../pkg/admission/serviceaccount/admission.go | 2 +- .../test/integration/fixtures/resources.go | 2 +- .../examples/fake-client/main_test.go | 2 +- .../tools/watch/informerwatcher_test.go | 4 +- .../cmd/client-gen/types/helpers.go | 2 +- test/e2e/framework/podlogs/podlogs.go | 11 +- .../defaulttolerationseconds_test.go | 2 +- test/integration/evictions/evictions_test.go | 20 +-- test/integration/metrics/metrics_test.go | 2 +- .../integration/objectmeta/objectmeta_test.go | 4 +- test/integration/pods/pods_test.go | 6 +- test/integration/quota/quota_test.go | 18 +-- .../serviceaccount/service_account_test.go | 50 ++++---- .../statefulset/statefulset_test.go | 4 +- test/integration/statefulset/util.go | 4 +- .../ttlcontroller/ttlcontroller_test.go | 4 +- test/integration/volume/attach_detach_test.go | 30 ++--- .../volume/persistent_volumes_test.go | 22 ++-- 48 files changed, 215 insertions(+), 214 deletions(-) diff --git a/cmd/kubeadm/app/util/apiclient/idempotency_test.go b/cmd/kubeadm/app/util/apiclient/idempotency_test.go index b4ac6a43631..d306f622682 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency_test.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency_test.go @@ -62,7 +62,7 @@ func TestPatchNodeNonErrorCases(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { client := fake.NewSimpleClientset() - _, err := client.Core().Nodes().Create(&tc.node) + _, err := client.CoreV1().Nodes().Create(&tc.node) if err != nil { t.Fatalf("failed to create node to fake client: %v", err) } diff --git a/pkg/controller/certificates/certificate_controller_test.go b/pkg/controller/certificates/certificate_controller_test.go index 8805fbfcef5..daf4689a42e 100644 --- a/pkg/controller/certificates/certificate_controller_test.go +++ b/pkg/controller/certificates/certificate_controller_test.go @@ -47,7 +47,7 @@ func TestCertificateController(t *testing.T) { Reason: "test reason", Message: "test message", }) - _, err := client.Certificates().CertificateSigningRequests().UpdateApproval(csr) + _, err := client.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(csr) if err != nil { return err } diff --git a/pkg/controller/namespace/deletion/namespaced_resources_deleter_test.go b/pkg/controller/namespace/deletion/namespaced_resources_deleter_test.go index ae9770cad07..e7ca633351b 100644 --- a/pkg/controller/namespace/deletion/namespaced_resources_deleter_test.go +++ b/pkg/controller/namespace/deletion/namespaced_resources_deleter_test.go @@ -66,7 +66,7 @@ func TestFinalizeNamespaceFunc(t *testing.T) { }, } d := namespacedResourcesDeleter{ - nsClient: mockClient.Core().Namespaces(), + nsClient: mockClient.CoreV1().Namespaces(), finalizerToken: v1.FinalizerKubernetes, } d.finalizeNamespace(testNamespace) @@ -180,7 +180,7 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *metav1.APIVersio fn := func() ([]*metav1.APIResourceList, error) { return resources, nil } - d := NewNamespacedResourcesDeleter(mockClient.Core().Namespaces(), dynamicClient, mockClient.Core(), fn, v1.FinalizerKubernetes, true) + d := NewNamespacedResourcesDeleter(mockClient.CoreV1().Namespaces(), dynamicClient, mockClient.CoreV1(), fn, v1.FinalizerKubernetes, true) if err := d.Delete(testInput.testNamespace.Name); err != nil { t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err) } @@ -219,7 +219,7 @@ func TestRetryOnConflictError(t *testing.T) { } namespace := &v1.Namespace{} d := namespacedResourcesDeleter{ - nsClient: mockClient.Core().Namespaces(), + nsClient: mockClient.CoreV1().Namespaces(), } _, err := d.retryOnConflictError(namespace, retryOnce) if err != nil { @@ -255,7 +255,7 @@ func TestSyncNamespaceThatIsActive(t *testing.T) { fn := func() ([]*metav1.APIResourceList, error) { return testResources(), nil } - d := NewNamespacedResourcesDeleter(mockClient.Core().Namespaces(), nil, mockClient.Core(), + d := NewNamespacedResourcesDeleter(mockClient.CoreV1().Namespaces(), nil, mockClient.CoreV1(), fn, v1.FinalizerKubernetes, true) err := d.Delete(testNamespace.Name) if err != nil { diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 406c77ffff9..524f6e7c8ae 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -665,9 +665,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform defaultDownscalestabilizationWindow := 5 * time.Minute hpaController := NewHorizontalController( - eventClient.Core(), + eventClient.CoreV1(), testScaleClient, - testClient.Autoscaling(), + testClient.AutoscalingV1(), testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme), metricsClient, informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), diff --git a/pkg/controller/podautoscaler/legacy_horizontal_test.go b/pkg/controller/podautoscaler/legacy_horizontal_test.go index 4f97232a522..dfcda2e294d 100644 --- a/pkg/controller/podautoscaler/legacy_horizontal_test.go +++ b/pkg/controller/podautoscaler/legacy_horizontal_test.go @@ -494,9 +494,9 @@ func (tc *legacyTestCase) runTest(t *testing.T) { defaultDownscaleStabilisationWindow := 5 * time.Minute hpaController := NewHorizontalController( - eventClient.Core(), + eventClient.CoreV1(), testScaleClient, - testClient.Autoscaling(), + testClient.AutoscalingV1(), testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme), metricsClient, informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index 278601d0efa..4c98ef4d88b 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -672,7 +672,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { return true, &apps.ReplicaSet{}, fmt.Errorf("Fake error") }) - fakeRSClient := fakeClient.Apps().ReplicaSets("default") + fakeRSClient := fakeClient.AppsV1().ReplicaSets("default") numReplicas := int32(10) newStatus := apps.ReplicaSetStatus{Replicas: numReplicas} updateReplicaSetStatus(fakeRSClient, rs, newStatus) diff --git a/pkg/controller/testutil/test_utils.go b/pkg/controller/testutil/test_utils.go index 069c2d61a86..be13e4637a9 100644 --- a/pkg/controller/testutil/test_utils.go +++ b/pkg/controller/testutil/test_utils.go @@ -96,7 +96,7 @@ func (m *FakeNodeHandler) GetUpdatedNodesCopy() []*v1.Node { // Core returns fake CoreInterface. func (m *FakeNodeHandler) Core() v1core.CoreV1Interface { - return &FakeLegacyHandler{m.Clientset.Core(), m} + return &FakeLegacyHandler{m.Clientset.CoreV1(), m} } // CoreV1 returns fake CoreV1Interface diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go index ec66fab1f8e..c197ae2dd8e 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go @@ -156,7 +156,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 stopCh := make(chan struct{}) - pods, err := fakeKubeClient.Core().Pods(v1.NamespaceAll).List(metav1.ListOptions{}) + pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } @@ -166,7 +166,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 podInformer.GetIndexer().Add(&podToAdd) podsNum++ } - nodes, err := fakeKubeClient.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := fakeKubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } diff --git a/pkg/kubectl/cmd/cp/cp.go b/pkg/kubectl/cmd/cp/cp.go index 4f6c93e9a1c..0c7f19d903e 100644 --- a/pkg/kubectl/cmd/cp/cp.go +++ b/pkg/kubectl/cmd/cp/cp.go @@ -499,7 +499,7 @@ func (o *CopyOptions) execute(options *exec.ExecOptions) error { } options.Config = o.ClientConfig - options.PodClient = o.Clientset.Core() + options.PodClient = o.Clientset.CoreV1() if err := options.Validate(); err != nil { return err diff --git a/pkg/kubectl/cmd/scale/scale.go b/pkg/kubectl/cmd/scale/scale.go index 997d37cefbb..ef2d32a43db 100644 --- a/pkg/kubectl/cmd/scale/scale.go +++ b/pkg/kubectl/cmd/scale/scale.go @@ -228,7 +228,7 @@ func (o *ScaleOptions) RunScale() error { // go down the legacy jobs path. This can be removed in 3.14 For now, contain it. fmt.Fprintf(o.ErrOut, "%s scale job is DEPRECATED and will be removed in a future version.\n", o.parent) - if err := ScaleJob(info, o.clientSet.Batch(), uint(o.Replicas), precondition, retry, waitForReplicas); err != nil { + if err := ScaleJob(info, o.clientSet.BatchV1(), uint(o.Replicas), precondition, retry, waitForReplicas); err != nil { return err } diff --git a/pkg/kubectl/cmd/scale/scalejob_test.go b/pkg/kubectl/cmd/scale/scalejob_test.go index ec1734cafa4..4ed570aca96 100644 --- a/pkg/kubectl/cmd/scale/scalejob_test.go +++ b/pkg/kubectl/cmd/scale/scalejob_test.go @@ -69,7 +69,7 @@ func (c *errorJobClient) Jobs(namespace string) batchclient.JobInterface { } func TestJobScaleRetry(t *testing.T) { - fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), conflict: true} + fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().BatchV1(), conflict: true} scaler := &JobPsuedoScaler{JobsClient: fake} preconditions := ScalePrecondition{-1, ""} count := uint(3) @@ -103,7 +103,7 @@ func job() *batch.Job { func TestJobScale(t *testing.T) { fakeClientset := fake.NewSimpleClientset(job()) - scaler := &JobPsuedoScaler{JobsClient: fakeClientset.Batch()} + scaler := &JobPsuedoScaler{JobsClient: fakeClientset.BatchV1()} preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" @@ -122,7 +122,7 @@ func TestJobScale(t *testing.T) { } func TestJobScaleInvalid(t *testing.T) { - fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), invalid: true} + fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().BatchV1(), invalid: true} scaler := &JobPsuedoScaler{JobsClient: fake} preconditions := ScalePrecondition{-1, ""} count := uint(3) @@ -150,7 +150,7 @@ func TestJobScaleFailsPreconditions(t *testing.T) { Parallelism: &ten, }, }) - scaler := &JobPsuedoScaler{JobsClient: fake.Batch()} + scaler := &JobPsuedoScaler{JobsClient: fake.BatchV1()} preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" diff --git a/pkg/kubectl/cmd/top/top_node.go b/pkg/kubectl/cmd/top/top_node.go index 48e7f48e0c7..073b2ccdc0c 100644 --- a/pkg/kubectl/cmd/top/top_node.go +++ b/pkg/kubectl/cmd/top/top_node.go @@ -219,7 +219,7 @@ func (o TopNodeOptions) RunTopNode() error { func getNodeMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, resourceName string, selector labels.Selector) (*metricsapi.NodeMetricsList, error) { var err error versionedMetrics := &metricsV1beta1api.NodeMetricsList{} - mc := metricsClient.Metrics() + mc := metricsClient.MetricsV1beta1() nm := mc.NodeMetricses() if resourceName != "" { m, err := nm.Get(resourceName, metav1.GetOptions{}) diff --git a/pkg/kubectl/cmd/top/top_pod.go b/pkg/kubectl/cmd/top/top_pod.go index 1ba4c722166..63938cab376 100644 --- a/pkg/kubectl/cmd/top/top_pod.go +++ b/pkg/kubectl/cmd/top/top_pod.go @@ -205,13 +205,13 @@ func getMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, namespac } versionedMetrics := &metricsv1beta1api.PodMetricsList{} if resourceName != "" { - m, err := metricsClient.Metrics().PodMetricses(ns).Get(resourceName, metav1.GetOptions{}) + m, err := metricsClient.MetricsV1beta1().PodMetricses(ns).Get(resourceName, metav1.GetOptions{}) if err != nil { return nil, err } versionedMetrics.Items = []metricsv1beta1api.PodMetrics{*m} } else { - versionedMetrics, err = metricsClient.Metrics().PodMetricses(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) + versionedMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, err } diff --git a/pkg/kubectl/describe/versioned/describe.go b/pkg/kubectl/describe/versioned/describe.go index f3ca513183a..ab69bdf6997 100644 --- a/pkg/kubectl/describe/versioned/describe.go +++ b/pkg/kubectl/describe/versioned/describe.go @@ -222,7 +222,7 @@ func GenericDescriberFor(mapping *meta.RESTMapping, clientConfig *rest.Config) ( if err != nil { return nil, false } - eventsClient := clientSet.Core() + eventsClient := clientSet.CoreV1() return &genericDescriber{mapping, dynamicClient, eventsClient}, true } @@ -349,11 +349,11 @@ type NamespaceDescriber struct { } func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - ns, err := d.Core().Namespaces().Get(name, metav1.GetOptions{}) + ns, err := d.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) if err != nil { return "", err } - resourceQuotaList, err := d.Core().ResourceQuotas(name).List(metav1.ListOptions{}) + resourceQuotaList, err := d.CoreV1().ResourceQuotas(name).List(metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) { // Server does not support resource quotas. @@ -363,7 +363,7 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings return "", err } } - limitRangeList, err := d.Core().LimitRanges(name).List(metav1.ListOptions{}) + limitRangeList, err := d.CoreV1().LimitRanges(name).List(metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) { // Server does not support limit ranges. @@ -522,7 +522,7 @@ type LimitRangeDescriber struct { } func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - lr := d.Core().LimitRanges(namespace) + lr := d.CoreV1().LimitRanges(namespace) limitRange, err := lr.Get(name, metav1.GetOptions{}) if err != nil { @@ -549,7 +549,7 @@ type ResourceQuotaDescriber struct { } func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - rq := d.Core().ResourceQuotas(namespace) + rq := d.CoreV1().ResourceQuotas(namespace) resourceQuota, err := rq.Get(name, metav1.GetOptions{}) if err != nil { @@ -619,10 +619,10 @@ type PodDescriber struct { } func (d *PodDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pod, err := d.Core().Pods(namespace).Get(name, metav1.GetOptions{}) + pod, err := d.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) if err != nil { if describerSettings.ShowEvents { - eventsInterface := d.Core().Events(namespace) + eventsInterface := d.CoreV1().Events(namespace) selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) options := metav1.ListOptions{FieldSelector: selector.String()} events, err2 := eventsInterface.List(options) @@ -647,7 +647,7 @@ func (d *PodDescriber) Describe(namespace, name string, describerSettings descri if _, isMirrorPod := pod.Annotations[corev1.MirrorPodAnnotationKey]; isMirrorPod { ref.UID = types.UID(pod.Annotations[corev1.MirrorPodAnnotationKey]) } - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, ref) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ref) } } @@ -1263,7 +1263,7 @@ type PersistentVolumeDescriber struct { } func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().PersistentVolumes() + c := d.CoreV1().PersistentVolumes() pv, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -1272,7 +1272,7 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSe var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, pv) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, pv) } return describePersistentVolume(pv, events) @@ -1411,21 +1411,21 @@ type PersistentVolumeClaimDescriber struct { } func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().PersistentVolumeClaims(namespace) + c := d.CoreV1().PersistentVolumeClaims(namespace) pvc, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } - pc := d.Core().Pods(namespace) + pc := d.CoreV1().Pods(namespace) mountPods, err := getMountPods(pc, pvc.Name) if err != nil { return "", err } - events, _ := d.Core().Events(namespace).Search(scheme.Scheme, pvc) + events, _ := d.CoreV1().Events(namespace).Search(scheme.Scheme, pvc) return describePersistentVolumeClaim(pvc, events, mountPods) } @@ -1872,8 +1872,8 @@ type ReplicationControllerDescriber struct { } func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - rc := d.Core().ReplicationControllers(namespace) - pc := d.Core().Pods(namespace) + rc := d.CoreV1().ReplicationControllers(namespace) + pc := d.CoreV1().Pods(namespace) controller, err := rc.Get(name, metav1.GetOptions{}) if err != nil { @@ -1887,7 +1887,7 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string, descri var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, controller) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, controller) } return describeReplicationController(controller, events, running, waiting, succeeded, failed) @@ -1944,8 +1944,8 @@ type ReplicaSetDescriber struct { } func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - rsc := d.Apps().ReplicaSets(namespace) - pc := d.Core().Pods(namespace) + rsc := d.AppsV1().ReplicaSets(namespace) + pc := d.CoreV1().Pods(namespace) rs, err := rsc.Get(name, metav1.GetOptions{}) if err != nil { @@ -1961,7 +1961,7 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, rs) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, rs) } return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr) @@ -2006,14 +2006,14 @@ type JobDescriber struct { } func (d *JobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - job, err := d.Batch().Jobs(namespace).Get(name, metav1.GetOptions{}) + job, err := d.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, job) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, job) } return describeJob(job, events) @@ -2160,8 +2160,8 @@ type DaemonSetDescriber struct { } func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - dc := d.Apps().DaemonSets(namespace) - pc := d.Core().Pods(namespace) + dc := d.AppsV1().DaemonSets(namespace) + pc := d.CoreV1().Pods(namespace) daemon, err := dc.Get(name, metav1.GetOptions{}) if err != nil { @@ -2179,7 +2179,7 @@ func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, daemon) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, daemon) } return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) @@ -2218,7 +2218,7 @@ type SecretDescriber struct { } func (d *SecretDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().Secrets(namespace) + c := d.CoreV1().Secrets(namespace) secret, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -2258,7 +2258,7 @@ type IngressDescriber struct { } func (i *IngressDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := i.Extensions().Ingresses(namespace) + c := i.ExtensionsV1beta1().Ingresses(namespace) ing, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2267,8 +2267,8 @@ func (i *IngressDescriber) Describe(namespace, name string, describerSettings de } func (i *IngressDescriber) describeBackend(ns string, backend *extensionsv1beta1.IngressBackend) string { - endpoints, _ := i.Core().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) - service, _ := i.Core().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) + endpoints, _ := i.CoreV1().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) + service, _ := i.CoreV1().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) spName := "" for i := range service.Spec.Ports { sp := &service.Spec.Ports[i] @@ -2330,7 +2330,7 @@ func (i *IngressDescriber) describeIngress(ing *extensionsv1beta1.Ingress, descr describeIngressAnnotations(w, ing.Annotations) if describerSettings.ShowEvents { - events, _ := i.Core().Events(ing.Namespace).Search(scheme.Scheme, ing) + events, _ := i.CoreV1().Events(ing.Namespace).Search(scheme.Scheme, ing) if events != nil { DescribeEvents(events, w) } @@ -2366,17 +2366,17 @@ type ServiceDescriber struct { } func (d *ServiceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().Services(namespace) + c := d.CoreV1().Services(namespace) service, err := c.Get(name, metav1.GetOptions{}) if err != nil { return "", err } - endpoints, _ := d.Core().Endpoints(namespace).Get(name, metav1.GetOptions{}) + endpoints, _ := d.CoreV1().Endpoints(namespace).Get(name, metav1.GetOptions{}) var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, service) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, service) } return describeService(service, endpoints, events) } @@ -2464,7 +2464,7 @@ type EndpointsDescriber struct { } func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().Endpoints(namespace) + c := d.CoreV1().Endpoints(namespace) ep, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -2473,7 +2473,7 @@ func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, ep) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ep) } return describeEndpoints(ep, events) @@ -2539,7 +2539,7 @@ type ServiceAccountDescriber struct { } func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().ServiceAccounts(namespace) + c := d.CoreV1().ServiceAccounts(namespace) serviceAccount, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -2551,7 +2551,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett // missingSecrets is the set of all secrets present in the // serviceAccount but not present in the set of existing secrets. missingSecrets := sets.NewString() - secrets, err := d.Core().Secrets(namespace).List(metav1.ListOptions{}) + secrets, err := d.CoreV1().Secrets(namespace).List(metav1.ListOptions{}) // errors are tolerated here in order to describe the serviceAccount with all // of the secrets that it references, even if those secrets cannot be fetched. @@ -2585,7 +2585,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(scheme.Scheme, serviceAccount) + events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, serviceAccount) } return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) @@ -2656,7 +2656,7 @@ type RoleDescriber struct { } func (d *RoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - role, err := d.Rbac().Roles(namespace).Get(name, metav1.GetOptions{}) + role, err := d.RbacV1().Roles(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2695,7 +2695,7 @@ type ClusterRoleDescriber struct { } func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - role, err := d.Rbac().ClusterRoles().Get(name, metav1.GetOptions{}) + role, err := d.RbacV1().ClusterRoles().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2751,7 +2751,7 @@ type RoleBindingDescriber struct { } func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - binding, err := d.Rbac().RoleBindings(namespace).Get(name, metav1.GetOptions{}) + binding, err := d.RbacV1().RoleBindings(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2783,7 +2783,7 @@ type ClusterRoleBindingDescriber struct { } func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - binding, err := d.Rbac().ClusterRoleBindings().Get(name, metav1.GetOptions{}) + binding, err := d.RbacV1().ClusterRoleBindings().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2815,7 +2815,7 @@ type NodeDescriber struct { } func (d *NodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - mc := d.Core().Nodes() + mc := d.CoreV1().Nodes() node, err := mc.Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2828,7 +2828,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings descr // in a policy aware setting, users may have access to a node, but not all pods // in that case, we note that the user does not have access to the pods canViewPods := true - nodeNonTerminatedPodsList, err := d.Core().Pods(namespace).List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) + nodeNonTerminatedPodsList, err := d.CoreV1().Pods(namespace).List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) if err != nil { if !errors.IsForbidden(err) { return "", err @@ -2843,7 +2843,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings descr } else { // TODO: We haven't decided the namespace for Node object yet. ref.UID = types.UID(ref.Name) - events, _ = d.Core().Events("").Search(scheme.Scheme, ref) + events, _ = d.CoreV1().Events("").Search(scheme.Scheme, ref) } } @@ -2939,11 +2939,11 @@ type StatefulSetDescriber struct { } func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - ps, err := p.client.Apps().StatefulSets(namespace).Get(name, metav1.GetOptions{}) + ps, err := p.client.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - pc := p.client.Core().Pods(namespace) + pc := p.client.CoreV1().Pods(namespace) selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector) if err != nil { @@ -2957,7 +2957,7 @@ func (p *StatefulSetDescriber) Describe(namespace, name string, describerSetting var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(scheme.Scheme, ps) + events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, ps) } return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed) @@ -2997,7 +2997,7 @@ type CertificateSigningRequestDescriber struct { } func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - csr, err := p.client.Certificates().CertificateSigningRequests().Get(name, metav1.GetOptions{}) + csr, err := p.client.CertificatesV1beta1().CertificateSigningRequests().Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3013,7 +3013,7 @@ func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, de var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(scheme.Scheme, csr) + events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, csr) } return describeCertificateSigningRequest(csr, cr, status, events) @@ -3081,7 +3081,7 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, desc var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.client.Core().Events(namespace).Search(scheme.Scheme, hpa) + events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpa) } return describeHorizontalPodAutoscaler(hpa, events, d) @@ -3411,7 +3411,7 @@ type ConfigMapDescriber struct { } func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Core().ConfigMaps(namespace) + c := d.CoreV1().ConfigMaps(namespace) configMap, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -3431,7 +3431,7 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings w.Write(LEVEL_0, "%s\n", string(v)) } if describerSettings.ShowEvents { - events, err := d.Core().Events(namespace).Search(scheme.Scheme, configMap) + events, err := d.CoreV1().Events(namespace).Search(scheme.Scheme, configMap) if err != nil { return err } @@ -3449,7 +3449,7 @@ type NetworkPolicyDescriber struct { } func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.Networking().NetworkPolicies(namespace) + c := d.NetworkingV1().NetworkPolicies(namespace) networkPolicy, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -3580,14 +3580,14 @@ type StorageClassDescriber struct { } func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - sc, err := s.Storage().StorageClasses().Get(name, metav1.GetOptions{}) + sc, err := s.StorageV1().StorageClasses().Get(name, metav1.GetOptions{}) if err != nil { return "", err } var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(scheme.Scheme, sc) + events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, sc) } return describeStorageClass(sc, events) @@ -3664,14 +3664,14 @@ type PodDisruptionBudgetDescriber struct { } func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) + pdb, err := p.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.Core().Events(namespace).Search(scheme.Scheme, pdb) + events, _ = p.CoreV1().Events(namespace).Search(scheme.Scheme, pdb) } return describePodDisruptionBudget(pdb, events) @@ -3720,7 +3720,7 @@ func (s *PriorityClassDescriber) Describe(namespace, name string, describerSetti var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(scheme.Scheme, pc) + events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, pc) } return describePriorityClass(pc, events) @@ -3749,7 +3749,7 @@ type PodSecurityPolicyDescriber struct { } func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - psp, err := d.Policy().PodSecurityPolicies().Get(name, metav1.GetOptions{}) + psp, err := d.PolicyV1beta1().PodSecurityPolicies().Get(name, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/pkg/kubectl/drain/cordon.go b/pkg/kubectl/drain/cordon.go index e3eb77fdab8..fc33975266e 100644 --- a/pkg/kubectl/drain/cordon.go +++ b/pkg/kubectl/drain/cordon.go @@ -73,7 +73,7 @@ func (c *CordonHelper) UpdateIfRequired(desired bool) bool { // JSON, or if either patch or update calls fail; it will also return a second error // whenever creating a patch has failed func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, error) { - client := clientset.Core().Nodes() + client := clientset.CoreV1().Nodes() oldData, err := json.Marshal(c.node) if err != nil { diff --git a/pkg/kubectl/polymorphichelpers/helpers_test.go b/pkg/kubectl/polymorphichelpers/helpers_test.go index 2aba02e6db8..7afcd90c0fc 100644 --- a/pkg/kubectl/polymorphichelpers/helpers_test.go +++ b/pkg/kubectl/polymorphichelpers/helpers_test.go @@ -174,7 +174,7 @@ func TestGetFirstPod(t *testing.T) { } selector := labels.Set(labelSet).AsSelector() - pod, numPods, err := GetFirstPod(fake.Core(), metav1.NamespaceDefault, selector.String(), 1*time.Minute, test.sortBy) + pod, numPods, err := GetFirstPod(fake.CoreV1(), metav1.NamespaceDefault, selector.String(), 1*time.Minute, test.sortBy) pod.Spec.SecurityContext = nil if !test.expectedErr && err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 824f2ac82c1..ac37bef85c1 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -903,8 +903,8 @@ func TestUpdate_assignOriginalAnnotation(t *testing.T) { newRc := newRc(1, 1) fake := fake.NewSimpleClientset(oldRc) updater := &RollingUpdater{ - rcClient: fake.Core(), - podClient: fake.Core(), + rcClient: fake.CoreV1(), + podClient: fake.CoreV1(), ns: "default", scaleAndWait: func(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error) { return rc, nil @@ -1101,7 +1101,7 @@ func TestRollingUpdater_multipleContainersInPod(t *testing.T) { Container: tt.container, DeploymentKey: tt.deploymentKey, } - updatedRc, err := CreateNewControllerFromCurrentController(fake.Core(), codec, config) + updatedRc, err := CreateNewControllerFromCurrentController(fake.CoreV1(), codec, config) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1177,8 +1177,8 @@ func TestRollingUpdater_cleanupWithClients(t *testing.T) { fake := fake.NewSimpleClientset(objs...) updater := &RollingUpdater{ ns: "default", - rcClient: fake.Core(), - podClient: fake.Core(), + rcClient: fake.CoreV1(), + podClient: fake.CoreV1(), } config := &RollingUpdaterConfig{ Out: ioutil.Discard, @@ -1227,7 +1227,7 @@ func TestRollingUpdater_cleanupWithClients_Rename(t *testing.T) { return false, nil, nil }) - err := Rename(fake.Core(), rcExisting, rc.Name) + err := Rename(fake.CoreV1(), rcExisting, rc.Name) if err != nil { t.Fatal(err) } @@ -1315,7 +1315,7 @@ func TestFindSourceController(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { fakeClient := fake.NewSimpleClientset(tt.list) - ctrl, err := FindSourceController(fakeClient.Core(), "default", tt.name) + ctrl, err := FindSourceController(fakeClient.CoreV1(), "default", tt.name) if tt.expectError && err == nil { t.Errorf("unexpected non-error") } @@ -1425,7 +1425,7 @@ func TestUpdateExistingReplicationController(t *testing.T) { t.Run(tt.name, func(t *testing.T) { buffer := &bytes.Buffer{} fakeClient := fake.NewSimpleClientset(tt.expectedRc) - rc, err := UpdateExistingReplicationController(fakeClient.Core(), fakeClient.Core(), tt.rc, "default", tt.name, tt.deploymentKey, tt.deploymentValue, buffer) + rc, err := UpdateExistingReplicationController(fakeClient.CoreV1(), fakeClient.CoreV1(), tt.rc, "default", tt.name, tt.deploymentKey, tt.deploymentValue, buffer) if !reflect.DeepEqual(rc, tt.expectedRc) { t.Errorf("expected:\n%#v\ngot:\n%#v\n", tt.expectedRc, rc) } @@ -1832,8 +1832,8 @@ func TestRollingUpdater_readyPods(t *testing.T) { updater := &RollingUpdater{ ns: "default", - rcClient: client.Core(), - podClient: client.Core(), + rcClient: client.CoreV1(), + podClient: client.CoreV1(), nowFn: tt.nowFn, } oldReady, newReady, err := updater.readyPods(tt.oldRc, tt.newRc, tt.minReadySeconds) diff --git a/pkg/master/client_ca_hook_test.go b/pkg/master/client_ca_hook_test.go index 1d88dbe0c2c..69baf28266e 100644 --- a/pkg/master/client_ca_hook_test.go +++ b/pkg/master/client_ca_hook_test.go @@ -215,7 +215,7 @@ func TestWriteClientCAs(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { client := fake.NewSimpleClientset(test.preexistingObjs...) - test.hook.tryToWriteClientCAs(client.Core()) + test.hook.tryToWriteClientCAs(client.CoreV1()) actualConfigMaps, updated := getFinalConfigMaps(client) if !reflect.DeepEqual(test.expectedConfigMaps, actualConfigMaps) { diff --git a/pkg/master/controller_test.go b/pkg/master/controller_test.go index cfab7074ca2..682baa00792 100644 --- a/pkg/master/controller_test.go +++ b/pkg/master/controller_test.go @@ -392,7 +392,7 @@ func TestReconcileEndpoints(t *testing.T) { if test.endpoints != nil { fakeClient = fake.NewSimpleClientset(test.endpoints) } - reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.Core()) + reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.CoreV1()) err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, true) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) @@ -510,7 +510,7 @@ func TestReconcileEndpoints(t *testing.T) { if test.endpoints != nil { fakeClient = fake.NewSimpleClientset(test.endpoints) } - reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.Core()) + reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, fakeClient.CoreV1()) err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) @@ -593,7 +593,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) { for _, test := range create_tests { master := Controller{} fakeClient := fake.NewSimpleClientset() - master.ServiceClient = fakeClient.Core() + master.ServiceClient = fakeClient.CoreV1() master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false) creates := []core.CreateAction{} for _, action := range fakeClient.Actions() { @@ -875,7 +875,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) { for _, test := range reconcile_tests { master := Controller{} fakeClient := fake.NewSimpleClientset(test.service) - master.ServiceClient = fakeClient.Core() + master.ServiceClient = fakeClient.CoreV1() err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, true) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) @@ -934,7 +934,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) { for _, test := range non_reconcile_tests { master := Controller{} fakeClient := fake.NewSimpleClientset(test.service) - master.ServiceClient = fakeClient.Core() + master.ServiceClient = fakeClient.CoreV1() err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 63c9a9ea19f..efac9d45f09 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -237,7 +237,7 @@ func makeNodeList(nodes []string, nodeResources apiv1.NodeResources) *apiv1.Node func TestGetNodeAddresses(t *testing.T) { assert := assert.New(t) - fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2"}, apiv1.NodeResources{})).Core().Nodes() + fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2"}, apiv1.NodeResources{})).CoreV1().Nodes() addressProvider := nodeAddressProvider{fakeNodeClient} // Fail case (no addresses associated with nodes) @@ -261,7 +261,7 @@ func TestGetNodeAddresses(t *testing.T) { func TestGetNodeAddressesWithOnlySomeExternalIP(t *testing.T) { assert := assert.New(t) - fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2", "node3"}, apiv1.NodeResources{})).Core().Nodes() + fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2", "node3"}, apiv1.NodeResources{})).CoreV1().Nodes() addressProvider := nodeAddressProvider{fakeNodeClient} // Pass case with 1 External type IP (index == 1) and nodes (indexes 0 & 2) have no External IP. diff --git a/pkg/registry/core/service/ipallocator/controller/repair_test.go b/pkg/registry/core/service/ipallocator/controller/repair_test.go index af2e59b22f4..1410850e684 100644 --- a/pkg/registry/core/service/ipallocator/controller/repair_test.go +++ b/pkg/registry/core/service/ipallocator/controller/repair_test.go @@ -56,7 +56,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "192.168.1.0/24"}, } _, cidr, _ := net.ParseCIDR(ipregistry.item.Range) - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry) if err := r.RunOnce(); err != nil { t.Fatal(err) @@ -69,7 +69,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "192.168.1.0/24"}, updateErr: fmt.Errorf("test error"), } - r = NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) + r = NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry) if err := r.RunOnce(); !strings.Contains(err.Error(), ": test error") { t.Fatal(err) } @@ -97,7 +97,7 @@ func TestRepairLeak(t *testing.T) { }, } - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry) // Run through the "leak detection holdoff" loops. for i := 0; i < (numRepairsBeforeLeakCleanup - 1); i++ { if err := r.RunOnce(); err != nil { @@ -170,7 +170,7 @@ func TestRepairWithExisting(t *testing.T) { Data: dst.Data, }, } - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), cidr, ipregistry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry) if err := r.RunOnce(); err != nil { t.Fatal(err) } diff --git a/pkg/registry/core/service/portallocator/controller/repair_test.go b/pkg/registry/core/service/portallocator/controller/repair_test.go index 0df94f898d7..5043685e339 100644 --- a/pkg/registry/core/service/portallocator/controller/repair_test.go +++ b/pkg/registry/core/service/portallocator/controller/repair_test.go @@ -56,7 +56,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "100-200"}, } pr, _ := net.ParsePortRange(registry.item.Range) - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), *pr, registry) if err := r.RunOnce(); err != nil { t.Fatal(err) @@ -69,7 +69,7 @@ func TestRepair(t *testing.T) { item: &api.RangeAllocation{Range: "100-200"}, updateErr: fmt.Errorf("test error"), } - r = NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) + r = NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), *pr, registry) if err := r.RunOnce(); !strings.Contains(err.Error(), ": test error") { t.Fatal(err) } @@ -97,7 +97,7 @@ func TestRepairLeak(t *testing.T) { }, } - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), *pr, registry) // Run through the "leak detection holdoff" loops. for i := 0; i < (numRepairsBeforeLeakCleanup - 1); i++ { if err := r.RunOnce(); err != nil { @@ -182,7 +182,7 @@ func TestRepairWithExisting(t *testing.T) { Data: dst.Data, }, } - r := NewRepair(0, fakeClient.Core(), fakeClient.Core(), *pr, registry) + r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), *pr, registry) if err := r.RunOnce(); err != nil { t.Fatal(err) } diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go index 14f0fc67f6d..d900acb6e61 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go @@ -605,7 +605,7 @@ func TestInstallCSIDriverExistingAnnotation(t *testing.T) { } // Assert - nodeInfo, err := csiClient.Csi().CSINodeInfos().Get(nodeName, metav1.GetOptions{}) + nodeInfo, err := csiClient.CsiV1alpha1().CSINodeInfos().Get(nodeName, metav1.GetOptions{}) if err != nil { t.Errorf("error getting CSINodeInfo: %v", err) continue @@ -1018,7 +1018,7 @@ func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []t } /* CSINodeInfo validation */ - nodeInfo, err := csiClient.Csi().CSINodeInfos().Get(nodeName, metav1.GetOptions{}) + nodeInfo, err := csiClient.CsiV1alpha1().CSINodeInfos().Get(nodeName, metav1.GetOptions{}) if err != nil { t.Errorf("error getting CSINodeInfo: %v", err) continue diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 47a10cf6f90..d357d0aec92 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -153,7 +153,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu if kubeClient == nil { return nil, fmt.Errorf("failed to get kube client to initialize mounter") } - ep, err := kubeClient.Core().Endpoints(epNamespace).Get(epName, metav1.GetOptions{}) + ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(epName, metav1.GetOptions{}) if err != nil { klog.Errorf("failed to get endpoint %s: %v", epName, err) diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 3678ebd9c11..d62dcbc1740 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -472,7 +472,7 @@ func (plugin *rbdPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/pkg/volume/storageos/storageos_test.go b/pkg/volume/storageos/storageos_test.go index 2b448bc9167..9acd857b803 100644 --- a/pkg/volume/storageos/storageos_test.go +++ b/pkg/volume/storageos/storageos_test.go @@ -171,7 +171,7 @@ func TestPlugin(t *testing.T) { client := fake.NewSimpleClientset() - client.Core().Secrets("default").Create(&v1.Secret{ + client.CoreV1().Secrets("default").Create(&v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: "default", diff --git a/plugin/pkg/admission/namespace/autoprovision/admission.go b/plugin/pkg/admission/namespace/autoprovision/admission.go index 2c95f49e895..54e021df77d 100644 --- a/plugin/pkg/admission/namespace/autoprovision/admission.go +++ b/plugin/pkg/admission/namespace/autoprovision/admission.go @@ -89,7 +89,7 @@ func (p *Provision) Admit(a admission.Attributes, o admission.ObjectInterfaces) Status: corev1.NamespaceStatus{}, } - _, err = p.client.Core().Namespaces().Create(namespace) + _, err = p.client.CoreV1().Namespaces().Create(namespace) if err != nil && !errors.IsAlreadyExists(err) { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/namespace/exists/admission.go b/plugin/pkg/admission/namespace/exists/admission.go index 703081747d1..e14ead89c61 100644 --- a/plugin/pkg/admission/namespace/exists/admission.go +++ b/plugin/pkg/admission/namespace/exists/admission.go @@ -75,7 +75,7 @@ func (e *Exists) Validate(a admission.Attributes, o admission.ObjectInterfaces) } // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not - _, err = e.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) + _, err = e.client.CoreV1().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return err diff --git a/plugin/pkg/admission/podnodeselector/admission.go b/plugin/pkg/admission/podnodeselector/admission.go index 94fe90ec7d0..103397f7576 100644 --- a/plugin/pkg/admission/podnodeselector/admission.go +++ b/plugin/pkg/admission/podnodeselector/admission.go @@ -216,7 +216,7 @@ func (p *podNodeSelector) ValidateInitialization() error { } func (p *podNodeSelector) defaultGetNamespace(name string) (*corev1.Namespace, error) { - namespace, err := p.client.Core().Namespaces().Get(name, metav1.GetOptions{}) + namespace, err := p.client.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("namespace %s does not exist", name) } diff --git a/plugin/pkg/admission/resourcequota/resource_access.go b/plugin/pkg/admission/resourcequota/resource_access.go index f703d478b33..dfdde7e753e 100644 --- a/plugin/pkg/admission/resourcequota/resource_access.go +++ b/plugin/pkg/admission/resourcequota/resource_access.go @@ -125,7 +125,7 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, err // If there is already in-flight List() for a given namespace, we should wait until // it is finished and cache is updated instead of doing the same, also to avoid // throttling - see #22422 for details. - liveList, err := e.client.Core().ResourceQuotas(namespace).List(metav1.ListOptions{}) + liveList, err := e.client.CoreV1().ResourceQuotas(namespace).List(metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go index eb9418805b1..17496491b5f 100644 --- a/plugin/pkg/admission/serviceaccount/admission.go +++ b/plugin/pkg/admission/serviceaccount/admission.go @@ -304,7 +304,7 @@ func (s *serviceAccount) getServiceAccount(namespace string, name string) (*core if i != 0 { time.Sleep(retryInterval) } - serviceAccount, err := s.client.Core().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + serviceAccount, err := s.client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) if err == nil { return serviceAccount, nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go index 7230725d4db..dcd5277e7a9 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go @@ -355,7 +355,7 @@ func isWatchCachePrimed(crd *apiextensionsv1beta1.CustomResourceDefinition, dyna // DeleteCustomResourceDefinition deletes a CRD and waits until it disappears from discovery. func DeleteCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) error { - if err := apiExtensionsClient.Apiextensions().CustomResourceDefinitions().Delete(crd.Name, nil); err != nil { + if err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(crd.Name, nil); err != nil { return err } for _, version := range servedVersions(crd) { diff --git a/staging/src/k8s.io/client-go/examples/fake-client/main_test.go b/staging/src/k8s.io/client-go/examples/fake-client/main_test.go index d77d7ecbc4d..6f83c3e8d5c 100644 --- a/staging/src/k8s.io/client-go/examples/fake-client/main_test.go +++ b/staging/src/k8s.io/client-go/examples/fake-client/main_test.go @@ -61,7 +61,7 @@ func TestFakeClient(t *testing.T) { // Inject an event into the fake client. p := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "my-pod"}} - _, err := client.Core().Pods("test-ns").Create(p) + _, err := client.CoreV1().Pods("test-ns").Create(p) if err != nil { t.Errorf("error injecting pod add: %v", err) } diff --git a/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go b/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go index b5a09f0c321..051898654f1 100644 --- a/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go +++ b/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go @@ -182,10 +182,10 @@ func TestNewInformerWatcher(t *testing.T) { lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return fake.Core().Secrets("").List(options) + return fake.CoreV1().Secrets("").List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return fake.Core().Secrets("").Watch(options) + return fake.CoreV1().Secrets("").Watch(options) }, } _, _, w, done := NewIndexerInformerWatcher(lw, &corev1.Secret{}) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go b/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go index 366275ef54d..59f2fd4449b 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/types/helpers.go @@ -73,7 +73,7 @@ func (a sortableSliceOfVersions) Less(i, j int) bool { } // Determine the default version among versions. If a user calls a group client -// without specifying the version (e.g., c.Core(), instead of c.CoreV1()), the +// without specifying the version (e.g., c.CoreV1(), instead of c.CoreV1()), the // default version will be returned. func defaultVersion(versions []PackageVersion) Version { var versionStrings []string diff --git a/test/e2e/framework/podlogs/podlogs.go b/test/e2e/framework/podlogs/podlogs.go index cf24571d963..90911e885fd 100644 --- a/test/e2e/framework/podlogs/podlogs.go +++ b/test/e2e/framework/podlogs/podlogs.go @@ -27,7 +27,6 @@ import ( "bytes" "context" "fmt" - "github.com/pkg/errors" "io" "os" "path" @@ -35,6 +34,8 @@ import ( "strings" "sync" + "github.com/pkg/errors" + "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -46,7 +47,7 @@ import ( // rpc error: code = Unknown desc = Error: No such container: 41a... // when the pod gets deleted while streaming. func LogsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opts *v1.PodLogOptions) (io.ReadCloser, error) { - req := cs.Core().Pods(ns).GetLogs(pod, opts) + req := cs.CoreV1().Pods(ns).GetLogs(pod, opts) return req.Context(ctx).Stream() } @@ -78,7 +79,7 @@ var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|w // running pods, but that then would have the disadvantage that // already deleted pods aren't covered. func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error { - watcher, err := cs.Core().Pods(ns).Watch(meta.ListOptions{}) + watcher, err := cs.CoreV1().Pods(ns).Watch(meta.ListOptions{}) if err != nil { return errors.Wrap(err, "cannot create Pod event watcher") } @@ -90,7 +91,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO m.Lock() defer m.Unlock() - pods, err := cs.Core().Pods(ns).List(meta.ListOptions{}) + pods, err := cs.CoreV1().Pods(ns).List(meta.ListOptions{}) if err != nil { if to.StatusWriter != nil { fmt.Fprintf(to.StatusWriter, "ERROR: get pod list in %s: %s\n", ns, err) @@ -213,7 +214,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO // WatchPods prints pod status events for a certain namespace or all namespaces // when namespace name is empty. func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer) error { - watcher, err := cs.Core().Pods(ns).Watch(meta.ListOptions{}) + watcher, err := cs.CoreV1().Pods(ns).Watch(meta.ListOptions{}) if err != nil { return errors.Wrap(err, "cannot create Pod event watcher") } diff --git a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go index 0836af197bd..d723cb5416c 100644 --- a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go +++ b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go @@ -57,7 +57,7 @@ func TestAdmission(t *testing.T) { }, } - updatedPod, err := client.Core().Pods(pod.Namespace).Create(&pod) + updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(&pod) if err != nil { t.Fatalf("error creating pod: %v", err) } diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index 640e285da52..b667f451019 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -19,6 +19,7 @@ package evictions import ( "fmt" "net/http/httptest" + "reflect" "sync" "sync/atomic" "testing" @@ -37,7 +38,6 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/controller/disruption" "k8s.io/kubernetes/test/integration/framework" - "reflect" ) const ( @@ -89,7 +89,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { waitToObservePods(t, informers.Core().V1().Pods().Informer(), numOfEvictions, v1.PodRunning) pdb := newPDB() - if _, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { t.Errorf("Failed to create PodDisruptionBudget: %v", err) } @@ -107,7 +107,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { eviction := newEviction(ns.Name, podName, deleteOption) err := wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { - e := clientSet.Policy().Evictions(ns.Name).Evict(eviction) + e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction) switch { case errors.IsTooManyRequests(e): return false, nil @@ -151,7 +151,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { close(errCh) var errList []error - if err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { + if err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { errList = append(errList, fmt.Errorf("Failed to delete PodDisruptionBudget: %v", err)) } for err := range errCh { @@ -202,20 +202,20 @@ func TestTerminalPodEviction(t *testing.T) { waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1, v1.PodSucceeded) pdb := newPDB() - if _, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { t.Errorf("Failed to create PodDisruptionBudget: %v", err) } waitPDBStable(t, clientSet, 1, ns.Name, pdb.Name) - pdbList, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) + pdbList, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) if err != nil { t.Fatalf("Error while listing pod disruption budget") } oldPdb := pdbList.Items[0] eviction := newEviction(ns.Name, pod.Name, deleteOption) err = wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { - e := clientSet.Policy().Evictions(ns.Name).Evict(eviction) + e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction) switch { case errors.IsTooManyRequests(e): return false, nil @@ -230,7 +230,7 @@ func TestTerminalPodEviction(t *testing.T) { if err != nil { t.Fatalf("Eviction of pod failed %v", err) } - pdbList, err = clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) + pdbList, err = clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) if err != nil { t.Fatalf("Error while listing pod disruption budget") } @@ -240,7 +240,7 @@ func TestTerminalPodEviction(t *testing.T) { t.Fatalf("Expected the pdb generation to be of same value %v but got %v", newPdb.Status.ObservedGeneration, oldPdb.Status.ObservedGeneration) } - if err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { + if err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { t.Fatalf("Failed to delete pod disruption budget") } } @@ -364,7 +364,7 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) { if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) { - pdb, err := clientSet.Policy().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{}) + pdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/metrics/metrics_test.go b/test/integration/metrics/metrics_test.go index 969dbe60884..34ed806c911 100644 --- a/test/integration/metrics/metrics_test.go +++ b/test/integration/metrics/metrics_test.go @@ -111,7 +111,7 @@ func TestApiserverMetrics(t *testing.T) { // Make a request to the apiserver to ensure there's at least one data point // for the metrics we're expecting -- otherwise, they won't be exported. client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - if _, err := client.Core().Pods(metav1.NamespaceDefault).List(metav1.ListOptions{}); err != nil { + if _, err := client.CoreV1().Pods(metav1.NamespaceDefault).List(metav1.ListOptions{}); err != nil { t.Fatalf("unexpected error getting pods: %v", err) } diff --git a/test/integration/objectmeta/objectmeta_test.go b/test/integration/objectmeta/objectmeta_test.go index 1da1ddaf5e3..4e4ee32be39 100644 --- a/test/integration/objectmeta/objectmeta_test.go +++ b/test/integration/objectmeta/objectmeta_test.go @@ -41,12 +41,12 @@ func TestIgnoreClusterName(t *testing.T) { ClusterName: "cluster-name-to-ignore", }, } - nsNew, err := client.Core().Namespaces().Create(&ns) + nsNew, err := client.CoreV1().Namespaces().Create(&ns) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) - nsNew, err = client.Core().Namespaces().Update(&ns) + nsNew, err = client.CoreV1().Namespaces().Update(&ns) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) diff --git a/test/integration/pods/pods_test.go b/test/integration/pods/pods_test.go index 8504fbf008f..21c4bdc0d34 100644 --- a/test/integration/pods/pods_test.go +++ b/test/integration/pods/pods_test.go @@ -129,13 +129,13 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) { pod.Spec.ActiveDeadlineSeconds = tc.original pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i) - if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } pod.Spec.ActiveDeadlineSeconds = tc.update - _, err := client.Core().Pods(ns.Name).Update(pod) + _, err := client.CoreV1().Pods(ns.Name).Update(pod) if tc.valid && err != nil { t.Errorf("%v: failed to update pod: %v", tc.name, err) } else if !tc.valid && err == nil { @@ -173,7 +173,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) { }, } - if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index 9f6a9a418bf..891b9dc9d49 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -103,7 +103,7 @@ func TestQuota(t *testing.T) { qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource) informersStarted := make(chan struct{}) resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ - QuotaClient: clientset.Core(), + QuotaClient: clientset.CoreV1(), ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(), ResyncPeriod: controller.NoResyncPeriodFunc, InformerFactory: informers, @@ -151,12 +151,12 @@ func TestQuota(t *testing.T) { } func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) { - w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name})) + w, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } - if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil { + if _, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Create(quota); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -210,12 +210,12 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { }, } - w, err := clientset.Core().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name})) + w, err := clientset.CoreV1().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } - if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil { + if _, err := clientset.CoreV1().ReplicationControllers(namespace).Create(rc); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -239,7 +239,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { return false, nil }) if err != nil { - pods, _ := clientset.Core().Pods(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()}) + pods, _ := clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()}) t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items)) } } @@ -301,7 +301,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { qc := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource) informersStarted := make(chan struct{}) resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ - QuotaClient: clientset.Core(), + QuotaClient: clientset.CoreV1(), ResourceQuotaInformer: informers.Core().V1().ResourceQuotas(), ResyncPeriod: controller.NoResyncPeriodFunc, InformerFactory: informers, @@ -339,7 +339,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { }, }, } - if _, err := clientset.Core().Pods(ns.Name).Create(pod); err == nil { + if _, err := clientset.CoreV1().Pods(ns.Name).Create(pod); err == nil { t.Fatalf("expected error for insufficient quota") } @@ -362,7 +362,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { // attempt to create a new pod once the quota is propagated err = wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { // retry until we succeed (to allow time for all changes to propagate) - if _, err := clientset.Core().Pods(ns.Name).Create(pod); err == nil { + if _, err := clientset.CoreV1().Pods(ns.Name).Create(pod); err == nil { return true, nil } return false, nil diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index fdf8cbdfa32..0baf1a7a89f 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -72,7 +72,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { ns := "test-service-account-creation" // Create namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { t.Fatalf("could not create namespace: %v", err) } @@ -84,7 +84,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { } // Delete service account - err = c.Core().ServiceAccounts(ns).Delete(defaultUser.Name, nil) + err = c.CoreV1().ServiceAccounts(ns).Delete(defaultUser.Name, nil) if err != nil { t.Fatalf("Could not delete default serviceaccount: %v", err) } @@ -110,13 +110,13 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { name := "my-service-account" // Create namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { t.Fatalf("could not create namespace: %v", err) } // Create service account - serviceAccount, err := c.Core().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}}) + serviceAccount, err := c.CoreV1().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -128,7 +128,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Delete token - err = c.Core().Secrets(ns).Delete(token1Name, nil) + err = c.CoreV1().Secrets(ns).Delete(token1Name, nil) if err != nil { t.Fatalf("Could not delete token: %v", err) } @@ -146,12 +146,12 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Trigger creation of a new referenced token - serviceAccount, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + serviceAccount, err = c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } serviceAccount.Secrets = []v1.ObjectReference{} - _, err = c.Core().ServiceAccounts(ns).Update(serviceAccount) + _, err = c.CoreV1().ServiceAccounts(ns).Update(serviceAccount) if err != nil { t.Fatal(err) } @@ -169,7 +169,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Delete service account - err = c.Core().ServiceAccounts(ns).Delete(name, nil) + err = c.CoreV1().ServiceAccounts(ns).Delete(name, nil) if err != nil { t.Fatal(err) } @@ -178,7 +178,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name) err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { // Get all secrets in the namespace - secrets, err := c.Core().Secrets(ns).List(metav1.ListOptions{}) + secrets, err := c.CoreV1().Secrets(ns).List(metav1.ListOptions{}) // Retrieval errors should fail if err != nil { return false, err @@ -207,7 +207,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { ns := "auto-mount-ns" // Create "my" namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -261,7 +261,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { } expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts - createdPod, err := c.Core().Pods(ns).Create(&protoPod) + createdPod, err := c.CoreV1().Pods(ns).Create(&protoPod) if err != nil { t.Fatal(err) } @@ -290,19 +290,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { otherns := "other-ns" // Create "my" namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "other" namespace - _, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}) + _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}) if err != nil && !errors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "ro" user in myns - _, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}) + _, err = c.CoreV1().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -315,13 +315,13 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { roClient := clientset.NewForConfigOrDie(&roClientConfig) doServiceAccountAPIRequests(t, roClient, myns, true, true, false) doServiceAccountAPIRequests(t, roClient, otherns, true, false, false) - err = c.Core().Secrets(myns).Delete(roTokenName, nil) + err = c.CoreV1().Secrets(myns).Delete(roTokenName, nil) if err != nil { t.Fatalf("could not delete token: %v", err) } // wait for delete to be observed and reacted to via watch wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { - sa, err := c.Core().ServiceAccounts(myns).Get(readOnlyServiceAccountName, metav1.GetOptions{}) + sa, err := c.CoreV1().ServiceAccounts(myns).Get(readOnlyServiceAccountName, metav1.GetOptions{}) if err != nil { return false, err } @@ -335,7 +335,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { doServiceAccountAPIRequests(t, roClient, myns, false, false, false) // Create "rw" user in myns - _, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}) + _, err = c.CoreV1().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -489,13 +489,13 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) { if !shouldWait { - return c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + return c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) } var user *v1.ServiceAccount var err error err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { - user, err = c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + user, err = c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if errors.IsNotFound(err) { return false, nil } @@ -512,7 +512,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st token := "" findToken := func() (bool, error) { - user, err := c.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + user, err := c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if errors.IsNotFound(err) { return false, nil } @@ -521,7 +521,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st } for _, ref := range user.Secrets { - secret, err := c.Core().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) + secret, err := c.CoreV1().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { continue } @@ -571,17 +571,17 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string readOps := []testOperation{ func() error { - _, err := c.Core().Secrets(ns).List(metav1.ListOptions{}) + _, err := c.CoreV1().Secrets(ns).List(metav1.ListOptions{}) return err }, func() error { - _, err := c.Core().Pods(ns).List(metav1.ListOptions{}) + _, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) return err }, } writeOps := []testOperation{ - func() error { _, err := c.Core().Secrets(ns).Create(testSecret); return err }, - func() error { return c.Core().Secrets(ns).Delete(testSecret.Name, nil) }, + func() error { _, err := c.CoreV1().Secrets(ns).Create(testSecret); return err }, + func() error { return c.CoreV1().Secrets(ns).Delete(testSecret.Name, nil) }, } for _, op := range readOps { diff --git a/test/integration/statefulset/statefulset_test.go b/test/integration/statefulset/statefulset_test.go index 191d258a3e3..427ac5a4009 100644 --- a/test/integration/statefulset/statefulset_test.go +++ b/test/integration/statefulset/statefulset_test.go @@ -85,7 +85,7 @@ func TestDeletingAndFailedPods(t *testing.T) { waitSTSStable(t, c, sts) // Verify STS creates 2 pods - podClient := c.Core().Pods(ns.Name) + podClient := c.CoreV1().Pods(ns.Name) pods := getPods(t, podClient, labelMap) if len(pods.Items) != 2 { t.Fatalf("len(pods) = %d, want 2", len(pods.Items)) @@ -97,7 +97,7 @@ func TestDeletingAndFailedPods(t *testing.T) { updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { pod.Finalizers = []string{"fake.example.com/blockDeletion"} }) - if err := c.Core().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("error deleting pod %s: %v", deletingPod.Name, err) } diff --git a/test/integration/statefulset/util.go b/test/integration/statefulset/util.go index 447ee759c66..0accec21302 100644 --- a/test/integration/statefulset/util.go +++ b/test/integration/statefulset/util.go @@ -199,7 +199,7 @@ func runControllerAndInformers(sc *statefulset.StatefulSetController, informers } func createHeadlessService(t *testing.T, clientSet clientset.Interface, headlessService *v1.Service) { - _, err := clientSet.Core().Services(headlessService.Namespace).Create(headlessService) + _, err := clientSet.CoreV1().Services(headlessService.Namespace).Create(headlessService) if err != nil { t.Fatalf("failed creating headless service: %v", err) } @@ -216,7 +216,7 @@ func createSTSsPods(t *testing.T, clientSet clientset.Interface, stss []*appsv1. createdSTSs = append(createdSTSs, createdSTS) } for _, pod := range pods { - createdPod, err := clientSet.Core().Pods(pod.Namespace).Create(pod) + createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(pod) if err != nil { t.Fatalf("failed to create pod %s: %v", pod.Name, err) } diff --git a/test/integration/ttlcontroller/ttlcontroller_test.go b/test/integration/ttlcontroller/ttlcontroller_test.go index c0c2c27d6ab..b011a438238 100644 --- a/test/integration/ttlcontroller/ttlcontroller_test.go +++ b/test/integration/ttlcontroller/ttlcontroller_test.go @@ -59,7 +59,7 @@ func createNodes(t *testing.T, client *clientset.Clientset, startIndex, endIndex Name: fmt.Sprintf("node-%d", idx), }, } - if _, err := client.Core().Nodes().Create(node); err != nil { + if _, err := client.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to create node: %v", err) } }(i) @@ -74,7 +74,7 @@ func deleteNodes(t *testing.T, client *clientset.Clientset, startIndex, endIndex go func(idx int) { defer wg.Done() name := fmt.Sprintf("node-%d", idx) - if err := client.Core().Nodes().Delete(name, &metav1.DeleteOptions{}); err != nil { + if err := client.CoreV1().Nodes().Delete(name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete node: %v", err) } }(i) diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index c737c87c6fc..9b908ae21be 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -162,13 +162,13 @@ func TestPodDeletionWithDswp(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -229,13 +229,13 @@ func TestPodUpdateWithWithADC(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -264,7 +264,7 @@ func TestPodUpdateWithWithADC(t *testing.T) { pod.Status.Phase = v1.PodSucceeded - if _, err := testClient.Core().Pods(ns.Name).UpdateStatus(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { t.Errorf("Failed to update pod : %v", err) } @@ -297,13 +297,13 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -332,7 +332,7 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { pod.Status.Phase = v1.PodSucceeded - if _, err := testClient.Core().Pods(ns.Name).UpdateStatus(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { t.Errorf("Failed to update pod : %v", err) } @@ -474,13 +474,13 @@ func TestPodAddedByDswp(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -549,7 +549,7 @@ func TestPVCBoundWithADC(t *testing.T) { }, }, } - if _, err := testClient.Core().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { t.Fatalf("Failed to created node : %v", err) } @@ -557,10 +557,10 @@ func TestPVCBoundWithADC(t *testing.T) { pvcs := []*v1.PersistentVolumeClaim{} for i := 0; i < 3; i++ { pod, pvc := fakePodWithPVC(fmt.Sprintf("fakepod-pvcnotbound-%d", i), fmt.Sprintf("fakepvc-%d", i), namespaceName) - if _, err := testClient.Core().Pods(pod.Namespace).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(pod.Namespace).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } - if _, err := testClient.Core().PersistentVolumeClaims(pvc.Namespace).Create(pvc); err != nil { + if _, err := testClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc); err != nil { t.Errorf("Failed to create pvc : %v", err) } pvcs = append(pvcs, pvc) @@ -568,7 +568,7 @@ func TestPVCBoundWithADC(t *testing.T) { // pod with no pvc podNew := fakePodWithVol(namespaceName) podNew.SetName("fakepod") - if _, err := testClient.Core().Pods(podNew.Namespace).Create(podNew); err != nil { + if _, err := testClient.CoreV1().Pods(podNew.Namespace).Create(podNew); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -608,7 +608,7 @@ func createPVForPVC(t *testing.T, testClient *clientset.Clientset, pvc *v1.Persi StorageClassName: *pvc.Spec.StorageClassName, }, } - if _, err := testClient.Core().PersistentVolumes().Create(pv); err != nil { + if _, err := testClient.CoreV1().PersistentVolumes().Create(pv); err != nil { t.Errorf("Failed to create pv : %v", err) } } diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 63b5702039e..ba1103634b5 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -116,7 +116,7 @@ func TestPersistentVolumeRecycler(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -171,7 +171,7 @@ func TestPersistentVolumeDeleter(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -231,7 +231,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -301,7 +301,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -382,7 +382,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -482,7 +482,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -572,7 +572,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) controllerStopCh := make(chan struct{}) informers.Start(controllerStopCh) @@ -862,7 +862,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes and StorageClasses). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) defer testClient.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{}) storageClass := storage.StorageClass{ @@ -957,7 +957,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.Core().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -1025,7 +1025,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) { // Check if the volume is already in requested phase - volume, err := client.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + volume, err := client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err == nil && volume.Status.Phase == phase { return } @@ -1046,7 +1046,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) { // Check if the claim is already in requested phase - claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) if err == nil && claim.Status.Phase == phase { return }