From 6596a14d3920a0b4157f360f1aaa521f7965bf74 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Tue, 12 Nov 2019 16:43:58 +0800 Subject: [PATCH] add missing alias of api errors under test --- .../pkg/registry/generic/registry/store.go | 2 +- test/e2e/apimachinery/chunking.go | 6 ++--- .../apimachinery/crd_conversion_webhook.go | 4 ++-- test/e2e/apimachinery/garbage_collector.go | 16 ++++++------- test/e2e/apimachinery/namespace.go | 6 ++--- test/e2e/apimachinery/protocol.go | 14 +++++------ test/e2e/apimachinery/resource_quota.go | 22 ++++++++--------- test/e2e/apimachinery/table_conversion.go | 4 ++-- test/e2e/apimachinery/webhook.go | 12 +++++----- test/e2e/apps/cronjob.go | 4 ++-- test/e2e/apps/deployment.go | 6 ++--- test/e2e/apps/job.go | 4 ++-- test/e2e/apps/rc.go | 6 ++--- test/e2e/apps/replica_set.go | 6 ++--- test/e2e/auth/audit_dynamic.go | 4 ++-- .../autoscaling/cluster_size_autoscaling.go | 10 ++++---- test/e2e/common/container.go | 4 ++-- test/e2e/common/lease.go | 4 ++-- test/e2e/common/runtimeclass.go | 3 +-- test/e2e/framework/job/wait.go | 4 ++-- test/e2e/framework/pods.go | 8 +++---- test/e2e/framework/service/jig.go | 4 ++-- test/e2e/framework/service/resource.go | 4 ++-- test/e2e/kubectl/kubectl.go | 2 +- test/e2e/network/fixture.go | 10 ++++---- test/e2e/network/ingress.go | 4 ++-- test/e2e/network/proxy.go | 6 ++--- test/e2e/scheduling/preemption.go | 14 +++++------ test/e2e/servicecatalog/podpreset.go | 6 ++--- test/e2e/storage/csi_mock_volume.go | 8 +++---- test/e2e/storage/drivers/csi.go | 4 ++-- test/e2e/storage/drivers/in_tree.go | 4 ++-- test/e2e/storage/testsuites/ephemeral.go | 4 ++-- test/e2e/storage/testsuites/volumelimits.go | 4 ++-- test/e2e/upgrades/sysctl.go | 4 ++-- test/e2e_node/apparmor_test.go | 4 ++-- test/e2e_node/critical_pod_test.go | 4 ++-- test/e2e_node/eviction_test.go | 8 +++---- test/e2e_node/mirror_pod_test.go | 4 ++-- .../admissionwebhook/admission_test.go | 14 +++++------ .../apiserver/apply/apply_crd_test.go | 12 +++++----- .../integration/apiserver/apply/apply_test.go | 8 +++---- .../apiserver/certreload/certreload_test.go | 4 ++-- .../max_json_patch_operations_test.go | 4 ++-- .../apiserver/max_request_body_bytes_test.go | 17 +++++++------ test/integration/apiserver/patch_test.go | 4 ++-- test/integration/auth/node_test.go | 6 ++--- test/integration/daemonset/daemonset_test.go | 4 ++-- test/integration/dryrun/dryrun_test.go | 10 ++++---- test/integration/evictions/evictions_test.go | 12 +++++----- .../cluster_scoped_owner_test.go | 4 ++-- .../garbage_collector_test.go | 24 +++++++++---------- test/integration/ipamperf/util.go | 4 ++-- test/integration/master/audit_dynamic_test.go | 4 ++-- .../master/synthetic_master_test.go | 10 ++++---- .../integration/replicaset/replicaset_test.go | 4 ++-- .../replicationcontroller_test.go | 4 ++-- test/integration/scheduler/predicates_test.go | 6 ++--- test/integration/scheduler/priorities_test.go | 4 ++-- test/integration/scheduler/util.go | 8 +++---- .../serviceaccount/service_account_test.go | 22 ++++++++--------- test/integration/utils.go | 4 ++-- test/soak/serve_hostnames/serve_hostnames.go | 4 ++-- 63 files changed, 221 insertions(+), 223 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go index b3c55404012..dc287d1ed5b 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -933,7 +933,7 @@ func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.V if err == nil && deleteImmediately && preconditions.ResourceVersion != nil { accessor, err = meta.Accessor(out) if err != nil { - return out, false, kubeerr.NewInternalError(err) + return out, false, apierrors.NewInternalError(err) } resourceVersion := accessor.GetResourceVersion() preconditions.ResourceVersion = &resourceVersion diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index b7da344a2cb..3e0b87a74ff 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -27,7 +27,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/features" @@ -153,11 +153,11 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { framework.Logf("Token %s has not expired yet", firstToken) return false, nil } - if err != nil && !errors.IsResourceExpired(err) { + if err != nil && !apierrors.IsResourceExpired(err) { return false, err } framework.Logf("got error %s", err) - status, ok := err.(errors.APIStatus) + status, ok := err.(apierrors.APIStatus) if !ok { return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err)) } diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index 441bea6998f..185933b94f8 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -26,7 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/intstr" @@ -236,7 +236,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa }, }, }) - if err != nil && errors.IsAlreadyExists(err) { + if err != nil && apierrors.IsAlreadyExists(err) { framework.Logf("role binding %s already exists", roleBindingCRDName) } else { framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace) diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 596a387def6..ac5177f08a9 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -28,7 +28,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -667,7 +667,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Logf("") return false, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -769,7 +769,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Logf("") return false, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -882,7 +882,7 @@ var _ = SIGDescribe("Garbage collector", func() { definition := apiextensionstestserver.NewRandomNameV1CustomResourceDefinition(apiextensionsv1.ClusterScoped) defer func() { err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("failed to delete CustomResourceDefinition: %v", err) } }() @@ -951,7 +951,7 @@ var _ = SIGDescribe("Garbage collector", func() { // Ensure the dependent is deleted. if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { _, err := resourceClient.Get(dependentName, metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { framework.Logf("owner: %#v", persistedOwner) framework.Logf("dependent: %#v", persistedDependent) @@ -963,7 +963,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err == nil { framework.Failf("expected owner resource %q to be deleted", ownerName) } else { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { framework.Failf("unexpected error getting owner resource %q: %v", ownerName, err) } } @@ -985,7 +985,7 @@ var _ = SIGDescribe("Garbage collector", func() { definition := apiextensionstestserver.NewRandomNameV1CustomResourceDefinition(apiextensionsv1.ClusterScoped) defer func() { err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("failed to delete CustomResourceDefinition: %v", err) } }() @@ -1056,7 +1056,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err == nil { return false, nil } - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return false, fmt.Errorf("failed to get owner: %v", err) } return true, nil diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index c0cdefd773b..f77c2f1f47f 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -23,7 +23,7 @@ import ( "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" @@ -121,7 +121,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { _, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { return true, nil } return false, nil @@ -178,7 +178,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { _, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { return true, nil } return false, nil diff --git a/test/e2e/apimachinery/protocol.go b/test/e2e/apimachinery/protocol.go index 13ebccdf6f9..a2c001e62a2 100644 --- a/test/e2e/apimachinery/protocol.go +++ b/test/e2e/apimachinery/protocol.go @@ -23,7 +23,7 @@ import ( g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" @@ -63,16 +63,16 @@ var _ = SIGDescribe("client-go should negotiate", func() { case watch.Added, watch.Modified: // this is allowed case watch.Error: - err := errors.FromObject(evt.Object) - // In Kubernetes 1.17 and earlier, the api server returns both apierrs.StatusReasonExpired and - // apierrs.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent - // and always returns apierrs.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone + err := apierrors.FromObject(evt.Object) + // In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and + // apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent + // and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone // check when we fully drop support for Kubernetes 1.17 servers from reflectors. - if errors.IsGone(err) || errors.IsResourceExpired(err) { + if apierrors.IsGone(err) || apierrors.IsResourceExpired(err) { // this is allowed, since the kubernetes object could be very old break } - if errors.IsUnexpectedObjectError(err) { + if apierrors.IsUnexpectedObjectError(err) { g.Fail(fmt.Sprintf("unexpected object, wanted v1.Status: %#v", evt.Object)) } g.Fail(fmt.Sprintf("unexpected error: %#v", evt.Object)) diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index e01c24ac0e7..40de5ccf425 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -24,7 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -886,7 +886,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Verifying the deleted ResourceQuota") _, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) }) }) @@ -1076,7 +1076,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1115,7 +1115,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1160,7 +1160,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1206,10 +1206,10 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("2") @@ -1261,7 +1261,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1295,7 +1295,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1334,7 +1334,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1714,7 +1714,7 @@ func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName s resourceQuota.Spec.Hard[resourceName] = current _, err = c.CoreV1().ResourceQuotas(ns).Update(resourceQuota) // ignoring conflicts since someone else may already updated it. - if errors.IsConflict(err) { + if apierrors.IsConflict(err) { return false, nil } return false, err diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index 27c294caaa0..2decea9c5e2 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -27,7 +27,7 @@ import ( authorizationv1 "k8s.io/api/authorization/v1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/client-go/util/workqueue" @@ -164,7 +164,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { } err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1;g=meta.k8s.io").Body(sar).Do().Into(table) framework.ExpectError(err, "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar) - framework.ExpectEqual(err.(errors.APIStatus).Status().Code, int32(406)) + framework.ExpectEqual(err.(apierrors.APIStatus).Status().Code, int32(406)) }) }) diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index ad37e9e137a..324b04b0727 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -30,7 +30,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" @@ -739,7 +739,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { }, }, }) - if err != nil && errors.IsAlreadyExists(err) { + if err != nil && apierrors.IsAlreadyExists(err) { framework.Logf("role binding %s already exists", roleBindingName) } else { framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace) @@ -1120,7 +1120,7 @@ func testWebhook(f *framework.Framework) { framework.Failf("expect error %q, got %q", "deadline", err.Error()) } // ensure the pod was not actually created - if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}); !errors.IsNotFound(err) { + if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { framework.Failf("expect notfound error looking for rejected pod, got %v", err) } @@ -1296,7 +1296,7 @@ func testFailClosedWebhook(f *framework.Framework) { } _, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap) framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName) - if !errors.IsInternalError(err) { + if !apierrors.IsInternalError(err) { framework.Failf("expect an internal error, got %#v", err) } } @@ -1661,7 +1661,7 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig return true, nil } // Only retry update on conflict - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return false, err } return false, nil @@ -1683,7 +1683,7 @@ func updateCustomResource(c dynamic.ResourceInterface, ns, name string, update u return true, nil } // Only retry update on conflict - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return false, err } return false, nil diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 7bf1921f0c9..0aa5e8bfd73 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -26,7 +26,7 @@ import ( batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -214,7 +214,7 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Ensuring job was deleted") _, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectError(err) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) ginkgo.By("Ensuring the job is not in the cronjob active list") err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name) diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index f824524685f..13c23813119 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -29,7 +29,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -206,7 +206,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) { framework.Logf("Ensuring deployment %s was deleted", deploymentName) _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) framework.ExpectError(err) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err) @@ -615,7 +615,7 @@ func testIterativeDeployments(f *framework.Framework) { name := podList.Items[p].Name framework.Logf("%02d: deleting deployment pod %q", i, name) err := c.CoreV1().Pods(ns).Delete(name, nil) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } } diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index e3ee4497155..238f54eb095 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -21,7 +21,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchinternal "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/test/e2e/framework" @@ -162,7 +162,7 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Ensuring job was deleted") _, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) }) /* diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 6ed684d04b8..bab2e3b9ce7 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -21,7 +21,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -287,7 +287,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the RC - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } framework.ExpectNoError(err) @@ -323,7 +323,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { pod.Labels = map[string]string{"name": "not-matching-name"} _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) - if err != nil && errors.IsConflict(err) { + if err != nil && apierrors.IsConflict(err) { return false, nil } if err != nil { diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 77497f22e61..0c3f01cc031 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -22,7 +22,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -289,7 +289,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the ReplicaSet - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } framework.ExpectNoError(err) @@ -315,7 +315,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { pod.Labels = map[string]string{"name": "not-matching-name"} _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) - if err != nil && errors.IsConflict(err) { + if err != nil && apierrors.IsConflict(err) { return false, nil } if err != nil { diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index 80f2a62a3dc..1984d070c92 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -25,7 +25,7 @@ import ( auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" @@ -111,7 +111,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { // get pod ip err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) { p, err := f.ClientSet.CoreV1().Pods(namespace).Get("audit-proxy", metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { framework.Logf("waiting for audit-proxy pod to be present") return false, nil } else if err != nil { diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index ad0a6351811..18f0454e2eb 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -31,7 +31,7 @@ import ( v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -1476,7 +1476,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { if err == nil { return nil } - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return err } klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j) @@ -1517,7 +1517,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd if err == nil { return nil } - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return err } klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j) @@ -1692,7 +1692,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa if err == nil { break } - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return err } klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j) @@ -1936,7 +1936,7 @@ func createPriorityClasses(f *framework.Framework) func() { if err != nil { klog.Errorf("Error creating priority class: %v", err) } - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) } return func() { diff --git a/test/e2e/common/container.go b/test/e2e/common/container.go index d00037eafc6..f1a10410770 100644 --- a/test/e2e/common/container.go +++ b/test/e2e/common/container.go @@ -21,7 +21,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -105,7 +105,7 @@ func (cc *ConformanceContainer) Present() (bool, error) { if err == nil { return true, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } return false, err diff --git a/test/e2e/common/lease.go b/test/e2e/common/lease.go index 58db1c8a9ee..74cab28cb86 100644 --- a/test/e2e/common/lease.go +++ b/test/e2e/common/lease.go @@ -23,7 +23,7 @@ import ( coordinationv1 "k8s.io/api/coordination/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -154,6 +154,6 @@ var _ = framework.KubeDescribe("Lease", func() { framework.ExpectNoError(err, "deleting Lease failed") _, err = leaseClient.Get(name, metav1.GetOptions{}) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) }) }) diff --git a/test/e2e/common/runtimeclass.go b/test/e2e/common/runtimeclass.go index 8087bedee3f..95b12ac19f5 100644 --- a/test/e2e/common/runtimeclass.go +++ b/test/e2e/common/runtimeclass.go @@ -21,7 +21,6 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -72,7 +71,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { ginkgo.By("Waiting for the RuntimeClass to disappear") framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) { _, err := rcClient.Get(rcName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil // done } if err != nil { diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index b975460087f..d6b4a02b400 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -21,7 +21,7 @@ import ( batchv1 "k8s.io/api/batch/v1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -91,7 +91,7 @@ func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.D func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error { return wait.Poll(framework.Poll, timeout, func() (bool, error) { _, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index dcfc122cc29..2c39288e595 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -23,7 +23,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" @@ -115,7 +115,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod { } // Update updates the pod object. It retries if there is a conflict, throw out error if -// there is any other errors. name is the pod name, updateFn is the function updating the +// there is any other apierrors. name is the pod name, updateFn is the function updating the // pod object. func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { @@ -129,7 +129,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { Logf("Successfully updated pod %q", name) return true, nil } - if errors.IsConflict(err) { + if apierrors.IsConflict(err) { Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err) return false, nil } @@ -147,7 +147,7 @@ func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeo // disappear before the timeout, it will fail the test. func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options *metav1.DeleteOptions, timeout time.Duration) { err := c.Delete(name, options) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { Failf("Failed to delete pod %q: %v", name, err) } gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(), diff --git a/test/e2e/framework/service/jig.go b/test/e2e/framework/service/jig.go index e387071a517..9d14880e698 100644 --- a/test/e2e/framework/service/jig.go +++ b/test/e2e/framework/service/jig.go @@ -27,7 +27,7 @@ import ( "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -446,7 +446,7 @@ func (j *TestJig) UpdateService(update func(*v1.Service)) (*v1.Service, error) { if err == nil { return j.sanityCheckService(result, service.Spec.Type) } - if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { + if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { return nil, fmt.Errorf("failed to update Service %q: %v", j.Name, err) } } diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go index 91697939ca5..66546862310 100644 --- a/test/e2e/framework/service/resource.go +++ b/test/e2e/framework/service/resource.go @@ -20,7 +20,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" @@ -73,7 +73,7 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update service, err = c.CoreV1().Services(namespace).Update(service) - if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { + if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { return service, err } } diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index fe0992e7f59..52b1baefb7f 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -2660,7 +2660,7 @@ func waitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D _, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: - return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "") + return false, apierrors.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "") } switch rc := event.Object.(type) { case *v1.ReplicationController: diff --git a/test/e2e/network/fixture.go b/test/e2e/network/fixture.go index b6f2521eb83..1b7884300c4 100644 --- a/test/e2e/network/fixture.go +++ b/test/e2e/network/fixture.go @@ -18,7 +18,7 @@ package network import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" @@ -118,7 +118,7 @@ func (t *TestFixture) Cleanup() []error { // First, resize the RC to 0. old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } return err @@ -126,7 +126,7 @@ func (t *TestFixture) Cleanup() []error { x := int32(0) old.Spec.Replicas = &x if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(old); err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } return err @@ -139,7 +139,7 @@ func (t *TestFixture) Cleanup() []error { // TODO(mikedanese): Wait. // Then, delete the RC altogether. if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { errs = append(errs, err) } } @@ -149,7 +149,7 @@ func (t *TestFixture) Cleanup() []error { ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace) err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) if err != nil { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { errs = append(errs, err) } } diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index cb33dbfd026..361968a3868 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/uuid" @@ -792,7 +792,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat } ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName)) err := wait.Poll(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) { - if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) { + if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !apierrors.IsNotFound(err) { framework.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err) return false, nil } diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index 08f79c1ad13..1654809da94 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -27,7 +27,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/net" @@ -226,7 +226,7 @@ var _ = SIGDescribe("Proxy", func() { body, status, d, err := doProxy(f, path, i) if err != nil { - if serr, ok := err.(*errors.StatusError); ok { + if serr, ok := err.(*apierrors.StatusError); ok { recordError(fmt.Sprintf("%v (%v; %v): path %v gave status error: %+v", i, status, d, path, serr.Status())) } else { @@ -322,7 +322,7 @@ func waitForEndpoint(c clientset.Interface, ns, name string) error { registerTimeout := time.Minute for t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) { endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { framework.Logf("Endpoint %s/%s is not ready yet", ns, name) continue } diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 66eeae5a4f9..c0b8cdc6a76 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -26,7 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -79,7 +79,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { var err error for _, pair := range priorityPairs { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) } e2enode.WaitForTotalHealthy(cs, time.Minute) @@ -143,7 +143,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }) // Make sure that the lowest priority pod is deleted. preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) - podDeleted := (err != nil && errors.IsNotFound(err)) || + podDeleted := (err != nil && apierrors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) framework.ExpectEqual(podDeleted, true) // Other pods (mid priority ones) should be present. @@ -198,7 +198,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { // Clean-up the critical pod // Always run cleanup to make sure the pod is properly cleaned up. err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0)) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err) } }() @@ -212,7 +212,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }) // Make sure that the lowest priority pod is deleted. preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) - podDeleted := (err != nil && errors.IsNotFound(err)) || + podDeleted := (err != nil && apierrors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) framework.ExpectEqual(podDeleted, true) // Other pods (mid priority ones) should be present. @@ -301,9 +301,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { _, err := cs.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}) if err != nil { framework.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err) - framework.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err) + framework.Logf("Reason: %v. Msg: %v", apierrors.ReasonForError(err), err) } - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) } }) diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index 1eaa5aba0c4..9e672b80a60 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -23,7 +23,7 @@ import ( "k8s.io/api/core/v1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/watch" @@ -73,7 +73,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { } _, err := createPodPreset(f.ClientSet, f.Namespace.Name, pip) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled") } framework.ExpectNoError(err) @@ -191,7 +191,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { } _, err := createPodPreset(f.ClientSet, f.Namespace.Name, pip) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled") } framework.ExpectNoError(err) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 9375b158498..08d162349ca 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -277,7 +277,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { attachmentName := fmt.Sprintf("csi-%x", attachmentHash) _, err = m.cs.StorageV1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { if !test.disableAttach { framework.ExpectNoError(err, "Expected VolumeAttachment but none was found") } @@ -618,7 +618,7 @@ func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Inte waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) { csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return false, err } attachLimit = getVolumeLimitFromCSINode(csiNode, driverName) @@ -809,7 +809,7 @@ func waitForCSIDriver(cs clientset.Interface, driverName string) error { framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) { _, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{}) - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return err } } diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index f0e9cd1d8c7..0a24a73b251 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -44,7 +44,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" storagev1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" @@ -492,7 +492,7 @@ func waitForCSIDriverRegistrationOnNode(nodeName string, driverName string, cs c return wait.PollImmediate(10*time.Second, csiNodeRegisterTimeout, func() (bool, error) { csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return false, err } for _, driver := range csiNode.Spec.Drivers { diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 087212f28d2..4a4110de265 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -46,7 +46,7 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -319,7 +319,7 @@ func (v *glusterVolume) DeleteVolume() { framework.Logf("Deleting Gluster endpoints %q...", name) err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil) if err != nil { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { framework.Failf("Gluster delete endpoints failed: %v", err) } framework.Logf("Gluster endpoints %q not found, assuming deleted", name) diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index a18ec5a357a..a8981be8763 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -25,7 +25,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -370,7 +370,7 @@ func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) { // Pod was created, feature supported. StopPod(c, pod) return true, nil - case errors.IsInvalid(err): + case apierrors.IsInvalid(err): // "Invalid" because it uses a feature that isn't supported. return false, nil default: diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index 0563394012e..4405335fbf3 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -26,7 +26,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -246,7 +246,7 @@ func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulabl if err == nil { existing++ } else { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { pvNames.Delete(pvName) } else { framework.Logf("Failed to get PV %s: %s", pvName, err) diff --git a/test/e2e/upgrades/sysctl.go b/test/e2e/upgrades/sysctl.go index 27dc034a2e5..c47ebf53543 100644 --- a/test/e2e/upgrades/sysctl.go +++ b/test/e2e/upgrades/sysctl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/ginkgo" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/sysctl" @@ -60,7 +60,7 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade") pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } if err == nil { diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 053fb59d152..9cce7f9c2ac 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -28,7 +28,7 @@ import ( "strings" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" @@ -159,7 +159,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. _, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) { switch e.Type { case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name) + return false, apierrors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name) } switch t := e.Object.(type) { case *v1.Pod: diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 1670a5abf76..3eeafab38d4 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -21,7 +21,7 @@ import ( v1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeapi "k8s.io/kubernetes/pkg/apis/core" @@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C }) _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err) // Create pods, starting with non-critical so that the critical preempts the other pods. f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed}) diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index d398b491c28..21ed8000202 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ }) ginkgo.BeforeEach(func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) @@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser }) ginkgo.BeforeEach(func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) @@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis }) ginkgo.BeforeEach(func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index a45f4128e44..cc0cb27f5b9 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" @@ -177,7 +177,7 @@ func deleteStaticPod(dir, name, namespace string) error { func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error { _, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } return goerrors.New("pod not disappear") diff --git a/test/integration/apiserver/admissionwebhook/admission_test.go b/test/integration/apiserver/admissionwebhook/admission_test.go index 3aea2748c29..2246b1ce277 100644 --- a/test/integration/apiserver/admissionwebhook/admission_test.go +++ b/test/integration/apiserver/admissionwebhook/admission_test.go @@ -40,7 +40,7 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -678,7 +678,7 @@ func testResourceDelete(c *testContext) { // wait for the item to be gone err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } if err == nil { @@ -753,7 +753,7 @@ func testResourceDelete(c *testContext) { // wait for the item to be gone err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } if err == nil { @@ -801,7 +801,7 @@ func testResourceDeletecollection(c *testContext) { // wait for the item to be gone err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } if err == nil { @@ -943,7 +943,7 @@ func testNamespaceDelete(c *testContext) { } // verify namespace is gone obj, err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{}) - if err == nil || !errors.IsNotFound(err) { + if err == nil || !apierrors.IsNotFound(err) { c.t.Errorf("expected namespace to be gone, got %#v, %v", obj, err) } } @@ -1048,7 +1048,7 @@ func testPodBindingEviction(c *testContext) { forceDelete := &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background} defer func() { err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(pod.GetName(), forceDelete) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { c.t.Error(err) return } @@ -1414,7 +1414,7 @@ func createOrGetResource(client dynamic.Interface, gvr schema.GroupVersionResour if err == nil { return obj, nil } - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return nil, err } return client.Resource(gvr).Namespace(ns).Create(stubObj, metav1.CreateOptions{}) diff --git a/test/integration/apiserver/apply/apply_crd_test.go b/test/integration/apiserver/apply/apply_crd_test.go index 2451136f38d..c2a73813402 100644 --- a/test/integration/apiserver/apply/apply_crd_test.go +++ b/test/integration/apiserver/apply/apply_crd_test.go @@ -25,7 +25,7 @@ import ( apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apiextensions-apiserver/test/integration/fixtures" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" genericfeatures "k8s.io/apiserver/pkg/features" @@ -108,7 +108,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -299,7 +299,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -339,7 +339,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result) } - status, ok = err.(*errors.StatusError) + status, ok = err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -504,7 +504,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -698,7 +698,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } diff --git a/test/integration/apiserver/apply/apply_test.go b/test/integration/apiserver/apply/apply_test.go index b1c206db12c..996a18712d0 100644 --- a/test/integration/apiserver/apply/apply_test.go +++ b/test/integration/apiserver/apply/apply_test.go @@ -28,7 +28,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -276,7 +276,7 @@ func TestCreateOnApplyFailsWithUID(t *testing.T) { }`)). Do(). Get() - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { t.Fatalf("Expected conflict error but got: %v", err) } } @@ -348,7 +348,7 @@ func TestApplyUpdateApplyConflictForced(t *testing.T) { if err == nil { t.Fatalf("Expecting to get conflicts when applying object") } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -849,7 +849,7 @@ func TestApplyFailsWithVersionMismatch(t *testing.T) { if err == nil { t.Fatalf("Expecting to get version mismatch when applying object") } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get version mismatch as API error") } diff --git a/test/integration/apiserver/certreload/certreload_test.go b/test/integration/apiserver/certreload/certreload_test.go index a23ae31703c..97bdcfcdfeb 100644 --- a/test/integration/apiserver/certreload/certreload_test.go +++ b/test/integration/apiserver/certreload/certreload_test.go @@ -25,7 +25,7 @@ import ( "testing" "time" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server/dynamiccertificates" @@ -146,7 +146,7 @@ MnVCuBwfwDXCAiEAw/1TA+CjPq9JC5ek1ifR0FybTURjeQqYkKpve1dveps= func waitForConfigMapCAContent(t *testing.T, kubeClient kubernetes.Interface, key, content string, count int) func() (bool, error) { return func() (bool, error) { clusterAuthInfo, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get("extension-apiserver-authentication", metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } if err != nil { diff --git a/test/integration/apiserver/max_json_patch_operations_test.go b/test/integration/apiserver/max_json_patch_operations_test.go index 0785428862b..388a35a9dee 100644 --- a/test/integration/apiserver/max_json_patch_operations_test.go +++ b/test/integration/apiserver/max_json_patch_operations_test.go @@ -22,7 +22,7 @@ import ( "testing" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" @@ -60,7 +60,7 @@ func TestMaxJSONPatchOperations(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } if !strings.Contains(err.Error(), "The allowed maximum operations in a JSON patch is") { diff --git a/test/integration/apiserver/max_request_body_bytes_test.go b/test/integration/apiserver/max_request_body_bytes_test.go index f343a272c0c..f6c2e1b7b63 100644 --- a/test/integration/apiserver/max_request_body_bytes_test.go +++ b/test/integration/apiserver/max_request_body_bytes_test.go @@ -22,7 +22,6 @@ import ( "testing" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -46,7 +45,7 @@ func TestMaxResourceSize(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } @@ -69,7 +68,7 @@ func TestMaxResourceSize(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } @@ -80,7 +79,7 @@ func TestMaxResourceSize(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } @@ -89,7 +88,7 @@ func TestMaxResourceSize(t *testing.T) { patchBody := []byte(`[{"op":"add","path":"/foo","value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}]`) err = rest.Patch(types.JSONPatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). Body(patchBody).Do().Error() - if err != nil && !errors.IsBadRequest(err) { + if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } }) @@ -105,7 +104,7 @@ func TestMaxResourceSize(t *testing.T) { patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.MergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). Body(patchBody).Do().Error() - if err != nil && !errors.IsBadRequest(err) { + if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } }) @@ -121,7 +120,7 @@ func TestMaxResourceSize(t *testing.T) { patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.StrategicMergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). Body(patchBody).Do().Error() - if err != nil && !errors.IsBadRequest(err) { + if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } }) @@ -137,7 +136,7 @@ func TestMaxResourceSize(t *testing.T) { patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). Body(patchBody).Do().Error() - if err != nil && !errors.IsBadRequest(err) { + if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %#v", err) } }) @@ -155,7 +154,7 @@ func TestMaxResourceSize(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } diff --git a/test/integration/apiserver/patch_test.go b/test/integration/apiserver/patch_test.go index 6216d5ad842..cf063cd31b1 100644 --- a/test/integration/apiserver/patch_test.go +++ b/test/integration/apiserver/patch_test.go @@ -25,7 +25,7 @@ import ( "github.com/google/uuid" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -86,7 +86,7 @@ func TestPatchConflicts(t *testing.T) { Do(). Get() - if errors.IsConflict(err) { + if apierrors.IsConflict(err) { t.Logf("tolerated conflict error patching %s: %v", "secrets", err) return } diff --git a/test/integration/auth/node_test.go b/test/integration/auth/node_test.go index 6370aee186c..3c5885c7528 100644 --- a/test/integration/auth/node_test.go +++ b/test/integration/auth/node_test.go @@ -28,7 +28,7 @@ import ( policy "k8s.io/api/policy/v1beta1" storagev1 "k8s.io/api/storage/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -638,14 +638,14 @@ func expect(t *testing.T, f func() error, wantErr func(error) bool) (timeout boo func expectForbidden(t *testing.T, f func() error) { t.Helper() - if ok, err := expect(t, f, errors.IsForbidden); !ok { + if ok, err := expect(t, f, apierrors.IsForbidden); !ok { t.Errorf("Expected forbidden error, got %v", err) } } func expectNotFound(t *testing.T, f func() error) { t.Helper() - if ok, err := expect(t, f, errors.IsNotFound); !ok { + if ok, err := expect(t, f, apierrors.IsNotFound); !ok { t.Errorf("Expected notfound error, got %v", err) } } diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index c8ce944ee45..8a1d8f81b4a 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -25,7 +25,7 @@ import ( apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -310,7 +310,7 @@ func validateDaemonSetPodsAndMarkReady( func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } if err != nil { diff --git a/test/integration/dryrun/dryrun_test.go b/test/integration/dryrun/dryrun_test.go index 730f887f085..d3edf307488 100644 --- a/test/integration/dryrun/dryrun_test.go +++ b/test/integration/dryrun/dryrun_test.go @@ -21,7 +21,7 @@ import ( v1 "k8s.io/api/core/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -55,7 +55,7 @@ func DryRunCreateTest(t *testing.T, rsc dynamic.ResourceInterface, obj *unstruct obj.GroupVersionKind()) } - if _, err := rsc.Get(obj.GetName(), metav1.GetOptions{}); !errors.IsNotFound(err) { + if _, err := rsc.Get(obj.GetName(), metav1.GetOptions{}); !apierrors.IsNotFound(err) { t.Fatalf("object shouldn't exist: %v", err) } } @@ -92,7 +92,7 @@ func getReplicasOrFail(t *testing.T, obj *unstructured.Unstructured) int64 { func DryRunScalePatchTest(t *testing.T, rsc dynamic.ResourceInterface, name string) { obj, err := rsc.Get(name, metav1.GetOptions{}, "scale") - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return } if err != nil { @@ -119,7 +119,7 @@ func DryRunScalePatchTest(t *testing.T, rsc dynamic.ResourceInterface, name stri func DryRunScaleUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) { obj, err := rsc.Get(name, metav1.GetOptions{}, "scale") - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return } if err != nil { @@ -156,7 +156,7 @@ func DryRunUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) } obj.SetAnnotations(map[string]string{"update": "true"}) obj, err = rsc.Update(obj, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}}) - if err == nil || !errors.IsConflict(err) { + if err == nil || !apierrors.IsConflict(err) { break } } diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index a0be6e50287..da3cd0e89bb 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/intstr" @@ -114,9 +114,9 @@ func TestConcurrentEvictionRequests(t *testing.T) { err := wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction) switch { - case errors.IsTooManyRequests(e): + case apierrors.IsTooManyRequests(e): return false, nil - case errors.IsConflict(e): + case apierrors.IsConflict(e): return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e) case e == nil: return true, nil @@ -132,7 +132,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { _, err = clientSet.CoreV1().Pods(ns.Name).Get(podName, metav1.GetOptions{}) switch { - case errors.IsNotFound(err): + case apierrors.IsNotFound(err): atomic.AddUint32(&numberPodsEvicted, 1) // pod was evicted and deleted so return from goroutine immediately return @@ -222,9 +222,9 @@ func TestTerminalPodEviction(t *testing.T) { err = wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction) switch { - case errors.IsTooManyRequests(e): + case apierrors.IsTooManyRequests(e): return false, nil - case errors.IsConflict(e): + case apierrors.IsConflict(e): return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e) case e == nil: return true, nil diff --git a/test/integration/garbagecollector/cluster_scoped_owner_test.go b/test/integration/garbagecollector/cluster_scoped_owner_test.go index 1f8092f7890..b1a68f812b6 100644 --- a/test/integration/garbagecollector/cluster_scoped_owner_test.go +++ b/test/integration/garbagecollector/cluster_scoped_owner_test.go @@ -24,7 +24,7 @@ import ( "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -119,7 +119,7 @@ func TestClusterScopedOwners(t *testing.T) { if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) { _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-missing", metav1.GetOptions{}) switch { - case errors.IsNotFound(err): + case apierrors.IsNotFound(err): return true, nil case err != nil: return false, err diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index 6f3e97f79df..f78060b5ea8 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -28,7 +28,7 @@ import ( apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -698,7 +698,7 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) { if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) { _, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -766,7 +766,7 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) { if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) { _, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -843,7 +843,7 @@ func TestDoubleDeletionWithFinalizer(t *testing.T) { } if err := wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) { _, err := podClient.Get(pod.Name, metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("Failed waiting for pod %q to be deleted", pod.Name) } @@ -950,7 +950,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) { // Ensure the owner is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for owner resource %q to be deleted", owner.GetName()) } @@ -960,7 +960,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) { if err == nil { t.Fatalf("expected dependent %q to be deleted", dependent.GetName()) } else { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { t.Fatalf("unexpected error getting dependent %q: %v", dependent.GetName(), err) } } @@ -1028,7 +1028,7 @@ func TestMixedRelationships(t *testing.T) { // Ensure the owner is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := resourceClient.Get(customOwner.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for owner resource %q to be deleted", customOwner.GetName()) } @@ -1038,7 +1038,7 @@ func TestMixedRelationships(t *testing.T) { if err == nil { t.Fatalf("expected dependent %q to be deleted", coreDependent.GetName()) } else { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { t.Fatalf("unexpected error getting dependent %q: %v", coreDependent.GetName(), err) } } @@ -1052,7 +1052,7 @@ func TestMixedRelationships(t *testing.T) { // Ensure the owner is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := configMapClient.Get(coreOwner.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for owner resource %q to be deleted", coreOwner.GetName()) } @@ -1062,7 +1062,7 @@ func TestMixedRelationships(t *testing.T) { if err == nil { t.Fatalf("expected dependent %q to be deleted", customDependent.GetName()) } else { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { t.Fatalf("unexpected error getting dependent %q: %v", customDependent.GetName(), err) } } @@ -1123,7 +1123,7 @@ func testCRDDeletion(t *testing.T, ctx *testContext, ns *v1.Namespace, definitio // Ensure the owner is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for owner %q to be deleted", owner.GetName()) } @@ -1131,7 +1131,7 @@ func testCRDDeletion(t *testing.T, ctx *testContext, ns *v1.Namespace, definitio // Ensure the dependent is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := configMapClient.Get(dependent.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for dependent %q (owned by %q) to be deleted", dependent.GetName(), owner.GetName()) } diff --git a/test/integration/ipamperf/util.go b/test/integration/ipamperf/util.go index 1b6e25b4874..883f6dd0df3 100644 --- a/test/integration/ipamperf/util.go +++ b/test/integration/ipamperf/util.go @@ -20,7 +20,7 @@ import ( "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -78,7 +78,7 @@ func createNodes(apiURL string, config *Config) error { for i := 0; i < config.NumNodes; i++ { var err error for j := 0; j < maxCreateRetries; j++ { - if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && errors.IsServerTimeout(err) { + if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && apierrors.IsServerTimeout(err) { klog.Infof("Server timeout creating nodes, retrying after %v", retryDelay) time.Sleep(retryDelay) continue diff --git a/test/integration/master/audit_dynamic_test.go b/test/integration/master/audit_dynamic_test.go index 81bdc22979b..c7b27a6bd1c 100644 --- a/test/integration/master/audit_dynamic_test.go +++ b/test/integration/master/audit_dynamic_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" auditinternal "k8s.io/apiserver/pkg/apis/audit" @@ -231,7 +231,7 @@ func sinkHealth(t *testing.T, kubeclient kubernetes.Interface, servers ...*utils // corresponding expected audit event func simpleOp(name string, kubeclient kubernetes.Interface) ([]utils.AuditEvent, error) { _, err := kubeclient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return nil, err } diff --git a/test/integration/master/synthetic_master_test.go b/test/integration/master/synthetic_master_test.go index ec89466fdcd..f9f018f6a6b 100644 --- a/test/integration/master/synthetic_master_test.go +++ b/test/integration/master/synthetic_master_test.go @@ -36,7 +36,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/group" @@ -102,7 +102,7 @@ func TestKubernetesService(t *testing.T) { defer closeFn() coreClient := clientset.NewForConfigOrDie(config.GenericConfig.LoopbackClientConfig) err := wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { - if _, err := coreClient.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}); err != nil && errors.IsNotFound(err) { + if _, err := coreClient.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}); err != nil && apierrors.IsNotFound(err) { return false, nil } else if err != nil { return false, err @@ -682,10 +682,10 @@ func TestServiceAlloc(t *testing.T) { // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return false, err } - return !errors.IsNotFound(err), nil + return !apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("creating kubernetes service timed out") } @@ -864,7 +864,7 @@ func TestUpdateNodeObjects(t *testing.T) { n.Status.Conditions = nil } if _, err := c.Nodes().UpdateStatus(n); err != nil { - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { fmt.Printf("[%d] error after %d: %v\n", node, i, err) break } diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index a549629f9e7..fd3f7ebf9d9 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -26,7 +26,7 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" @@ -681,7 +681,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { if err != nil { // If the pod is not found, it means the RS picks the pod for deletion (it is extra) // Verify there is only one pod in namespace and it has ControllerRef to the RS - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 1 { return false, fmt.Errorf("Expected 1 pod in current namespace, got %d", len(pods.Items)) diff --git a/test/integration/replicationcontroller/replicationcontroller_test.go b/test/integration/replicationcontroller/replicationcontroller_test.go index 214ad6be9a2..ba50d8749ef 100644 --- a/test/integration/replicationcontroller/replicationcontroller_test.go +++ b/test/integration/replicationcontroller/replicationcontroller_test.go @@ -24,7 +24,7 @@ import ( "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" @@ -654,7 +654,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { if err != nil { // If the pod is not found, it means the RC picks the pod for deletion (it is extra) // Verify there is only one pod in namespace and it has ControllerRef to the RC - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 1 { return false, fmt.Errorf("Expected 1 pod in current namespace, got %d", len(pods.Items)) diff --git a/test/integration/scheduler/predicates_test.go b/test/integration/scheduler/predicates_test.go index 95f20d10bc3..0d57e533840 100644 --- a/test/integration/scheduler/predicates_test.go +++ b/test/integration/scheduler/predicates_test.go @@ -22,7 +22,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -835,7 +835,7 @@ func TestInterPodAffinity(t *testing.T) { } testPod, err := cs.CoreV1().Pods(context.ns.Name).Create(test.pod) if err != nil { - if !(test.errorType == "invalidPod" && errors.IsInvalid(err)) { + if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) { t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test) } } @@ -1017,7 +1017,7 @@ func TestEvenPodsSpreadPredicate(t *testing.T) { } } testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(tt.incomingPod) - if err != nil && !errors.IsInvalid(err) { + if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } diff --git a/test/integration/scheduler/priorities_test.go b/test/integration/scheduler/priorities_test.go index 149de57c23e..05d4e6705ee 100644 --- a/test/integration/scheduler/priorities_test.go +++ b/test/integration/scheduler/priorities_test.go @@ -22,7 +22,7 @@ import ( "testing" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -346,7 +346,7 @@ func TestEvenPodsSpreadPriority(t *testing.T) { } } testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(tt.incomingPod) - if err != nil && !errors.IsInvalid(err) { + if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 68f44c89735..42f1bf479c1 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -26,7 +26,7 @@ import ( v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -283,7 +283,7 @@ func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key stri switch { case err == nil && passFunc(n): return true, nil - case errors.IsNotFound(err): + case apierrors.IsNotFound(err): nodes = append(nodes, nil) case err != nil: t.Errorf("Unexpected error: %v", err) @@ -557,7 +557,7 @@ func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) func podDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } if pod.DeletionTimestamp != nil { @@ -737,7 +737,7 @@ func getPod(cs clientset.Interface, podName string, podNamespace string) (*v1.Po func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) { for _, p := range pods { err := cs.CoreV1().Pods(p.Namespace).Delete(p.Name, metav1.NewDeleteOptions(0)) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { t.Errorf("error while deleting pod %v/%v: %v", p.Namespace, p.Name, err) } } diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index bf29cff7d1e..980ff0cd64b 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -32,7 +32,7 @@ import ( v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -207,7 +207,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { // Create "my" namespace _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -290,13 +290,13 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { // Create "my" namespace _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "other" namespace _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -496,7 +496,7 @@ func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWai var err error err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { user, err = c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } if err != nil { @@ -513,7 +513,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st findToken := func() (bool, error) { user, err := c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } if err != nil { @@ -522,7 +522,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st for _, ref := range user.Secrets { secret, err := c.CoreV1().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { continue } if err != nil { @@ -586,8 +586,8 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string for _, op := range readOps { err := op() - unauthorizedError := errors.IsUnauthorized(err) - forbiddenError := errors.IsForbidden(err) + unauthorizedError := apierrors.IsUnauthorized(err) + forbiddenError := apierrors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: @@ -603,8 +603,8 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string for _, op := range writeOps { err := op() - unauthorizedError := errors.IsUnauthorized(err) - forbiddenError := errors.IsForbidden(err) + unauthorizedError := apierrors.IsUnauthorized(err) + forbiddenError := apierrors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: diff --git a/test/integration/utils.go b/test/integration/utils.go index 5a0a6d51b29..e1f6e0092f1 100644 --- a/test/integration/utils.go +++ b/test/integration/utils.go @@ -21,7 +21,7 @@ import ( "time" "google.golang.org/grpc" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/storagebackend" @@ -62,7 +62,7 @@ func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, in if err == nil { return false, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 0f9c8d16b7d..083e792e44e 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -30,7 +30,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -127,7 +127,7 @@ func main() { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { if _, err := client.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}); err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return } }