From fc738cbb1d1aec9bff8e1d42a2a0684a439c1f92 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Tue, 12 Nov 2019 16:18:59 +0800 Subject: [PATCH 1/3] unify alias of api errors under test --- test/e2e/apimachinery/aggregator.go | 4 +-- test/e2e/auth/pod_security_policy.go | 4 +-- test/e2e/cloud/nodes.go | 4 +-- test/e2e/common/runtimeclass.go | 4 +-- test/e2e/framework/ingress/ingress_utils.go | 4 +-- test/e2e/framework/node/wait_test.go | 4 +-- test/e2e/framework/pod/delete.go | 4 +-- test/e2e/framework/pod/wait.go | 6 ++-- test/e2e/framework/psp.go | 8 ++--- test/e2e/framework/pv/pv.go | 10 +++--- test/e2e/framework/skip.go | 4 +-- test/e2e/framework/statefulset/rest.go | 4 +-- test/e2e/framework/util.go | 10 +++--- test/e2e/framework/volume/fixtures.go | 4 +-- test/e2e/kubectl/kubectl.go | 10 +++--- test/e2e/lifecycle/bootstrap/util.go | 8 ++--- test/e2e/node/runtimeclass.go | 4 +-- test/e2e/scheduling/priorities.go | 6 ++-- test/e2e/storage/flexvolume.go | 6 ++-- test/e2e/storage/pvc_protection.go | 6 ++-- test/e2e/storage/testsuites/base.go | 7 ++-- test/e2e/storage/testsuites/provisioning.go | 12 +++---- test/e2e/storage/testsuites/snapshottable.go | 6 ++-- test/e2e/storage/utils/create.go | 4 +-- test/e2e/storage/utils/utils.go | 4 +-- test/e2e/storage/volume_provisioning.go | 6 ++-- test/e2e/storage/vsphere/pv_reclaimpolicy.go | 4 +-- .../storage/vsphere/vsphere_statefulsets.go | 4 +-- test/utils/create_resources.go | 32 +++++++++---------- test/utils/delete_resources.go | 4 +-- test/utils/density_utils.go | 6 ++-- test/utils/runners.go | 18 +++++------ 32 files changed, 110 insertions(+), 111 deletions(-) diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index 9a6b194ca8f..2f2569aa1fc 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -28,7 +28,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -367,7 +367,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl request.SetHeader("Accept", "application/json") _, err := request.DoRaw() if err != nil { - status, ok := err.(*apierrs.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { return false, err } diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index 0fe3d7e30cd..f0929f69bde 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authentication/serviceaccount" @@ -122,7 +122,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { func expectForbidden(err error) { framework.ExpectError(err, "should be forbidden") - framework.ExpectEqual(apierrs.IsForbidden(err), true, "should be forbidden error") + framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error") } func testPrivilegedPods(tester func(pod *v1.Pod)) { diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index 76b9bb331c0..f0eb8511201 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -20,7 +20,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -67,7 +67,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { _, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{}) if err == nil { framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name) - } else if !apierrs.IsNotFound(err) { + } else if !apierrors.IsNotFound(err) { framework.Failf("failed to get node %q err: %q", nodeToDelete.Name, err) } diff --git a/test/e2e/common/runtimeclass.go b/test/e2e/common/runtimeclass.go index 53aead2277f..8087bedee3f 100644 --- a/test/e2e/common/runtimeclass.go +++ b/test/e2e/common/runtimeclass.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/wait" @@ -124,7 +124,7 @@ func expectPodRejection(f *framework.Framework, pod *v1.Pod) { } else { _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectError(err, "should be forbidden") - framework.ExpectEqual(apierrs.IsForbidden(err), true, "should be forbidden error") + framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error") } } diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 3593cfdc3f3..88bc504d93c 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -41,7 +41,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" @@ -522,7 +522,7 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) { framework.DescribeIng(j.Ingress.Namespace) return } - if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { + if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { framework.Failf("failed to update ingress %s/%s: %v", ns, name, err) } } diff --git a/test/e2e/framework/node/wait_test.go b/test/e2e/framework/node/wait_test.go index 050c4661143..7ecf37e7754 100644 --- a/test/e2e/framework/node/wait_test.go +++ b/test/e2e/framework/node/wait_test.go @@ -21,7 +21,7 @@ import ( "testing" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" @@ -157,7 +157,7 @@ func TestCheckReadyForTests(t *testing.T) { expectedErr: "Forced error", }, { desc: "Retryable errors from node list are reported but still return false", - nodeListErr: apierrs.NewTimeoutError("Retryable error", 10), + nodeListErr: apierrors.NewTimeoutError("Retryable error", 10), expected: false, }, } diff --git a/test/e2e/framework/pod/delete.go b/test/e2e/framework/pod/delete.go index 5810615e686..a4a2bfcdf26 100644 --- a/test/e2e/framework/pod/delete.go +++ b/test/e2e/framework/pod/delete.go @@ -23,7 +23,7 @@ import ( "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" clientset "k8s.io/client-go/kubernetes" e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) @@ -55,7 +55,7 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace) err := c.CoreV1().Pods(podNamespace).Delete(podName, nil) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil // assume pod was already deleted } return fmt.Errorf("pod Delete API error: %v", err) diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index 409d736054b..41ab4601afe 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -27,7 +27,7 @@ import ( "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" @@ -213,7 +213,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err) return err } @@ -387,7 +387,7 @@ func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, nam func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error { return wait.PollImmediate(poll, timeout, func() (bool, error) { _, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil // done } if err != nil { diff --git a/test/e2e/framework/psp.go b/test/e2e/framework/psp.go index 06210c668d2..3518b106c2b 100644 --- a/test/e2e/framework/psp.go +++ b/test/e2e/framework/psp.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authentication/serviceaccount" @@ -111,7 +111,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string privilegedPSPOnce.Do(func() { _, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get( podSecurityPolicyPrivileged, metav1.GetOptions{}) - if !apierrs.IsNotFound(err) { + if !apierrors.IsNotFound(err) { // Privileged PSP was already created. ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged) return @@ -119,7 +119,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string psp := privilegedPSP(podSecurityPolicyPrivileged) _, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp) - if !apierrs.IsAlreadyExists(err) { + if !apierrors.IsAlreadyExists(err) { ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) } @@ -134,7 +134,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string Verbs: []string{"use"}, }}, }) - if !apierrs.IsAlreadyExists(err) { + if !apierrors.IsAlreadyExists(err) { ExpectNoError(err, "Failed to create PSP role") } } diff --git a/test/e2e/framework/pv/pv.go b/test/e2e/framework/pv/pv.go index 109edf52ee7..901cc8576ba 100644 --- a/test/e2e/framework/pv/pv.go +++ b/test/e2e/framework/pv/pv.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -186,7 +186,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error { if c != nil && len(pvName) > 0 { framework.Logf("Deleting PersistentVolume %q", pvName) err := c.CoreV1().PersistentVolumes().Delete(pvName, nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("PV Delete API error: %v", err) } } @@ -198,7 +198,7 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin if c != nil && len(pvcName) > 0 { framework.Logf("Deleting PersistentVolumeClaim %q", pvcName) err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("PVC Delete API error: %v", err) } } @@ -275,10 +275,10 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil { return err } - } else if !apierrs.IsNotFound(err) { + } else if !apierrors.IsNotFound(err) { return fmt.Errorf("PVC Get API error: %v", err) } - // delete pvckey from map even if apierrs.IsNotFound above is true and thus the + // delete pvckey from map even if apierrors.IsNotFound above is true and thus the // claim was not actually deleted here delete(claims, pvcKey) deletedPVCs++ diff --git a/test/e2e/framework/skip.go b/test/e2e/framework/skip.go index 4608f4c64f2..e25c6afe684 100644 --- a/test/e2e/framework/skip.go +++ b/test/e2e/framework/skip.go @@ -19,7 +19,7 @@ package framework import ( "fmt" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilversion "k8s.io/apimachinery/pkg/util/version" @@ -65,7 +65,7 @@ func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVers _, err := resourceClient.List(metav1.ListOptions{}) if err != nil { // not all resources support list, so we ignore those - if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) { + if apierrors.IsMethodNotSupported(err) || apierrors.IsNotFound(err) || apierrors.IsForbidden(err) { skipInternalf(1, "Could not find %s resource, skipping test: %#v", gvr, err) } Failf("Unexpected error getting %v: %v", gvr, err) diff --git a/test/e2e/framework/statefulset/rest.go b/test/e2e/framework/statefulset/rest.go index cec76eb88bc..9d18f85b03d 100644 --- a/test/e2e/framework/statefulset/rest.go +++ b/test/e2e/framework/statefulset/rest.go @@ -24,7 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" @@ -256,7 +256,7 @@ func update(c clientset.Interface, ns, name string, update func(ss *appsv1.State if err == nil { return ss } - if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { + if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { e2efwk.Failf("failed to update statefulset %q: %v", name, err) } } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index ef919e7d740..30418b98971 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -46,7 +46,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -346,7 +346,7 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start)) continue } - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { Logf("PersistentVolume %s was removed", pvName) return nil } @@ -365,7 +365,7 @@ func findAvailableNamespaceName(baseName string, c clientset.Interface) (string, // Already taken return false, nil } - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } Logf("Unexpected error while getting namespace: %v", err) @@ -470,7 +470,7 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i case err == nil: Logf("Service %s in namespace %s found.", name, namespace) return exist, nil - case apierrs.IsNotFound(err): + case apierrors.IsNotFound(err): Logf("Service %s in namespace %s disappeared.", name, namespace) return !exist, nil case !testutils.IsRetryableAPIError(err): @@ -1190,7 +1190,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { Logf("%v %s not found: %v", kind, name, err) return nil } diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 5f505670088..cf57ed2a1a0 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -46,7 +46,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" clientset "k8s.io/client-go/kubernetes" @@ -372,7 +372,7 @@ func StartVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { serverPod, err := podClient.Create(serverPod) // ok if the server pod already exists. TODO: make this controllable by callers if err != nil { - if apierrs.IsAlreadyExists(err) { + if apierrors.IsAlreadyExists(err) { framework.Logf("Ignore \"already-exists\" error, re-get pod...") ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName)) serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{}) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index e3f0cfd88ca..fe0992e7f59 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -45,7 +45,7 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1325,9 +1325,9 @@ metadata: framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err) // if the error is API not found or could not find default credentials or TLS handshake timeout, try again - if apierrs.IsNotFound(err) || - apierrs.IsUnauthorized(err) || - apierrs.IsServerTimeout(err) { + if apierrors.IsNotFound(err) || + apierrors.IsUnauthorized(err) || + apierrors.IsServerTimeout(err) { err = nil } return false, err @@ -1969,7 +1969,7 @@ metadata: ginkgo.By("verifying the job " + jobName + " was deleted") _, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) framework.ExpectError(err) - framework.ExpectEqual(apierrs.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) }) }) diff --git a/test/e2e/lifecycle/bootstrap/util.go b/test/e2e/lifecycle/bootstrap/util.go index 661a18692de..f397b87bd49 100644 --- a/test/e2e/lifecycle/bootstrap/util.go +++ b/test/e2e/lifecycle/bootstrap/util.go @@ -22,8 +22,8 @@ import ( "errors" "time" - v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -139,7 +139,7 @@ func WaitForBootstrapTokenSecretToDisappear(c clientset.Interface, tokenID strin return wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) { _, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{}) - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, nil @@ -150,7 +150,7 @@ func WaitForBootstrapTokenSecretToDisappear(c clientset.Interface, tokenID strin func WaitForBootstrapTokenSecretNotDisappear(c clientset.Interface, tokenID string, t time.Duration) error { err := wait.Poll(framework.Poll, t, func() (bool, error) { secret, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{}) - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, errors.New("secret not exists") } if secret != nil { diff --git a/test/e2e/node/runtimeclass.go b/test/e2e/node/runtimeclass.go index 0224bb3be9c..0a95b1f2842 100644 --- a/test/e2e/node/runtimeclass.go +++ b/test/e2e/node/runtimeclass.go @@ -21,7 +21,7 @@ import ( v1 "k8s.io/api/core/v1" nodev1beta1 "k8s.io/api/node/v1beta1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing" "k8s.io/kubernetes/test/e2e/framework" @@ -55,7 +55,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { } _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectError(err, "should be forbidden") - framework.ExpectEqual(apierrs.IsForbidden(err), true, "should be forbidden error") + framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error") }) ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling [NodeFeature:RuntimeHandler] [Disruptive] ", func() { diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index aa290c17a4d..926b2137134 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -28,7 +28,7 @@ import ( _ "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -84,7 +84,7 @@ func addOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData) _, err = c.CoreV1().Nodes().Update(node) if err != nil { - if !apierrs.IsConflict(err) { + if !apierrors.IsConflict(err) { framework.ExpectNoError(err) } else { framework.Logf("Conflict when trying to add/update avoidPods %v to %v with error %v", avoidPods, nodeName, err) @@ -113,7 +113,7 @@ func removeAvoidPodsOffNode(c clientset.Interface, nodeName string) { delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey) _, err = c.CoreV1().Nodes().Update(node) if err != nil { - if !apierrs.IsConflict(err) { + if !apierrors.IsConflict(err) { framework.ExpectNoError(err) } else { framework.Logf("Conflict when trying to remove avoidPods to %v", nodeName) diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index f047b00577c..98d6e3c0bff 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -25,7 +25,7 @@ import ( "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { testFlexVolume(driverInstallAs, config, f) ginkgo.By("waiting for flex client pod to terminate") - if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) { + if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrors.IsNotFound(err) { framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) } @@ -213,7 +213,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { testFlexVolume(driverInstallAs, config, f) ginkgo.By("waiting for flex client pod to terminate") - if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) { + if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrors.IsNotFound(err) { framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) } diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index a3364777248..211e2c7d874 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -22,8 +22,8 @@ import ( "fmt" "time" - v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/util/slice" @@ -41,7 +41,7 @@ func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcNa for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns) return nil } diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 7fceaf84620..59ea6b45ce4 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -30,11 +30,10 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - apierrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" @@ -317,7 +316,7 @@ func (r *VolumeResource) CleanupResource() error { cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume")) } } - return apierrors.NewAggregate(cleanUpErrs) + return utilerrors.NewAggregate(cleanUpErrs) } func createPVCPV( @@ -409,7 +408,7 @@ func isDelayedBinding(sc *storagev1.StorageClass) bool { // deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" func deleteStorageClass(cs clientset.Interface, className string) error { err := cs.StorageV1().StorageClasses().Delete(className, nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return err } return nil diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 8b74641d1bc..5a6d81b6358 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -247,7 +247,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { _, err = client.StorageV1().StorageClasses().Create(class) // The "should provision storage with snapshot data source" test already has created the class. // TODO: make class creation optional and remove the IsAlreadyExists exception - framework.ExpectEqual(err == nil || apierrs.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{}) framework.ExpectNoError(err) defer func() { @@ -263,7 +263,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) // typically this claim has already been deleted err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } }() @@ -670,13 +670,13 @@ func prepareSnapshotDataSourceForProvisioning( cleanupFunc := func() { framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName()) err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Delete(snapshot.GetName(), nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting snapshot %q. Error: %v", snapshot.GetName(), err) } framework.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name) err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(updatedClaim.Name, nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err) } @@ -718,7 +718,7 @@ func preparePVCDataSourceForProvisioning( cleanupFunc := func() { framework.Logf("deleting source PVC %q/%q", sourcePVC.Namespace, sourcePVC.Name) err = client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(sourcePVC.Name, nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting source PVC %q. Error: %v", sourcePVC.Name, err) } } diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 2a1af497263..efdc61a780f 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -23,7 +23,7 @@ import ( "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" @@ -143,7 +143,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) // typically this claim has already been deleted err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) } }() @@ -182,7 +182,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName()) // typically this snapshot has already been deleted err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err) } }() diff --git a/test/e2e/storage/utils/create.go b/test/e2e/storage/utils/create.go index 97625159b20..1d3651f4673 100644 --- a/test/e2e/storage/utils/create.go +++ b/test/e2e/storage/utils/create.go @@ -29,7 +29,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" storagev1beta1 "k8s.io/api/storage/v1beta1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -155,7 +155,7 @@ func CreateItems(f *framework.Framework, items ...interface{}) (func(), error) { // command line flags, because they would also start to apply // to non-namespaced items. for _, destructor := range destructors { - if err := destructor(); err != nil && !apierrs.IsNotFound(err) { + if err := destructor(); err != nil && !apierrors.IsNotFound(err) { framework.Logf("deleting failed: %s", err) } } diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 7fb582976c9..7b7c69e5ba5 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -30,7 +30,7 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -581,7 +581,7 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{}) err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) { _, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{}) - return apierrs.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }) framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index c4f0e5f81c1..29f7ec8e325 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -33,7 +33,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" storagev1beta1 "k8s.io/api/storage/v1beta1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -810,7 +810,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } }() @@ -1039,7 +1039,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]* // deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" func deleteStorageClass(c clientset.Interface, className string) { err := c.StorageV1().StorageClasses().Delete(className, nil) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } } diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index a63a48973ee..264024e9d8e 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -246,7 +246,7 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu ginkgo.By("delete pvc") framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) _, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) - if !apierrs.IsNotFound(err) { + if !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } } diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 40ecb10276a..88c9e257c29 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -21,7 +21,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -112,7 +112,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { for _, sspod := range ssPodsBeforeScaleDown.Items { _, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) if err != nil { - framework.ExpectEqual(apierrs.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) diff --git a/test/utils/create_resources.go b/test/utils/create_resources.go index c40a0672264..21dd9801bfb 100644 --- a/test/utils/create_resources.go +++ b/test/utils/create_resources.go @@ -25,7 +25,7 @@ import ( apps "k8s.io/api/apps/v1" batch "k8s.io/api/batch/v1" "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -52,12 +52,12 @@ func RetryWithExponentialBackOff(fn wait.ConditionFunc) error { func IsRetryableAPIError(err error) bool { // These errors may indicate a transient error that we can retry in tests. - if apierrs.IsInternalError(err) || apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || - apierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) { + if apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsServerTimeout(err) || + apierrors.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) { return true } // If the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. - if _, shouldRetry := apierrs.SuggestsClientDelay(err); shouldRetry { + if _, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry { return true } return false @@ -69,7 +69,7 @@ func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod) } createFunc := func() (bool, error) { _, err := c.CoreV1().Pods(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -86,7 +86,7 @@ func CreateRCWithRetries(c clientset.Interface, namespace string, obj *v1.Replic } createFunc := func() (bool, error) { _, err := c.CoreV1().ReplicationControllers(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -103,7 +103,7 @@ func CreateReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a } createFunc := func() (bool, error) { _, err := c.AppsV1().ReplicaSets(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -120,7 +120,7 @@ func CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *a } createFunc := func() (bool, error) { _, err := c.AppsV1().Deployments(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -137,7 +137,7 @@ func CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *ap } createFunc := func() (bool, error) { _, err := c.AppsV1().DaemonSets(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -154,7 +154,7 @@ func CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Jo } createFunc := func() (bool, error) { _, err := c.BatchV1().Jobs(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -171,7 +171,7 @@ func CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Se } createFunc := func() (bool, error) { _, err := c.CoreV1().Secrets(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -188,7 +188,7 @@ func CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1 } createFunc := func() (bool, error) { _, err := c.CoreV1().ConfigMaps(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -205,7 +205,7 @@ func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.S } createFunc := func() (bool, error) { _, err := c.CoreV1().Services(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -222,7 +222,7 @@ func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj } createFunc := func() (bool, error) { _, err := c.CoreV1().ResourceQuotas(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -239,7 +239,7 @@ func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.Persistent } createFunc := func() (bool, error) { _, err := c.CoreV1().PersistentVolumes().Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { @@ -256,7 +256,7 @@ func CreatePersistentVolumeClaimWithRetries(c clientset.Interface, namespace str } createFunc := func() (bool, error) { _, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(obj) - if err == nil || apierrs.IsAlreadyExists(err) { + if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } if IsRetryableAPIError(err) { diff --git a/test/utils/delete_resources.go b/test/utils/delete_resources.go index 58221f56383..1eb96cddaa6 100644 --- a/test/utils/delete_resources.go +++ b/test/utils/delete_resources.go @@ -21,7 +21,7 @@ package utils import ( "fmt" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clientset "k8s.io/client-go/kubernetes" @@ -59,7 +59,7 @@ func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { deleteFunc := func() (bool, error) { err := deleteResource(c, kind, namespace, name, options) - if err == nil || apierrs.IsNotFound(err) { + if err == nil || apierrors.IsNotFound(err) { return true, nil } if IsRetryableAPIError(err) { diff --git a/test/utils/density_utils.go b/test/utils/density_utils.go index 60a32d50c19..d1b583cc623 100644 --- a/test/utils/density_utils.go +++ b/test/utils/density_utils.go @@ -22,7 +22,7 @@ import ( "time" "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" @@ -44,7 +44,7 @@ func AddLabelsToNode(c clientset.Interface, nodeName string, labels map[string]s for attempt := 0; attempt < retries; attempt++ { _, err = c.CoreV1().Nodes().Patch(nodeName, types.MergePatchType, []byte(patch)) if err != nil { - if !apierrs.IsConflict(err) { + if !apierrors.IsConflict(err) { return err } } else { @@ -76,7 +76,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri } _, err = c.CoreV1().Nodes().Update(node) if err != nil { - if !apierrs.IsConflict(err) { + if !apierrors.IsConflict(err) { return err } else { klog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName) diff --git a/test/utils/runners.go b/test/utils/runners.go index d5bee2dfdee..9a9d4a2f5f5 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -30,7 +30,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1beta1 "k8s.io/api/storage/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -1085,10 +1085,10 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse } _, err := client.StorageV1beta1().CSINodes().Create(csiNode) - if apierrs.IsAlreadyExists(err) { + if apierrors.IsAlreadyExists(err) { // Something created CSINode instance after we checked it did not exist. // Make the caller to re-try PrepareDependentObjects by returning Conflict error - err = apierrs.NewConflict(storagev1beta1.Resource("csinodes"), nodeName, err) + err = apierrors.NewConflict(storagev1beta1.Resource("csinodes"), nodeName, err) } return err } @@ -1121,7 +1121,7 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode, func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { csiNode, err := client.StorageV1beta1().CSINodes().Get(node.Name, metav1.GetOptions{}) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { return s.createCSINode(node.Name, client) } return err @@ -1132,7 +1132,7 @@ func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { csiNode, err := client.StorageV1beta1().CSINodes().Get(nodeName, metav1.GetOptions{}) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } return err @@ -1158,7 +1158,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo if _, err = client.CoreV1().Nodes().Patch(node.Name, types.MergePatchType, []byte(patch)); err == nil { break } - if !apierrs.IsConflict(err) { + if !apierrors.IsConflict(err) { return fmt.Errorf("Error while applying patch %v to Node %v: %v", string(patch), node.Name, err) } time.Sleep(100 * time.Millisecond) @@ -1171,7 +1171,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo if err = strategy.PrepareDependentObjects(node, client); err == nil { break } - if !apierrs.IsConflict(err) { + if !apierrors.IsConflict(err) { return fmt.Errorf("Error while preparing objects for node %s: %s", node.Name, err) } time.Sleep(100 * time.Millisecond) @@ -1196,7 +1196,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare if _, err = client.CoreV1().Nodes().Update(updatedNode); err == nil { break } - if !apierrs.IsConflict(err) { + if !apierrors.IsConflict(err) { return fmt.Errorf("Error when updating Node %v: %v", nodeName, err) } time.Sleep(100 * time.Millisecond) @@ -1210,7 +1210,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare if err == nil { break } - if !apierrs.IsConflict(err) { + if !apierrors.IsConflict(err) { return fmt.Errorf("Error when cleaning up Node %v objects: %v", nodeName, err) } time.Sleep(100 * time.Millisecond) From 5bc0e26c1902e7e28abfc828de19a2dbb3e492c2 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Tue, 12 Nov 2019 16:26:59 +0800 Subject: [PATCH 2/3] unify alias of api errors under pkg and staging --- .../rootcacertpublisher/publisher.go | 4 +- .../serviceaccounts_controller.go | 10 ++--- .../volume/persistentvolume/provision_test.go | 4 +- .../volume/persistentvolume/pv_controller.go | 14 +++---- .../persistentvolume/testing/testing.go | 6 +-- .../pvc_protection_controller.go | 4 +- .../pvprotection/pv_protection_controller.go | 4 +- .../localsubjectaccessreview/rest.go | 10 ++--- .../authorization/subjectaccessreview/rest.go | 6 +-- .../core/service/allocator/storage/storage.go | 6 +-- pkg/volume/csi/csi_attacher.go | 8 ++-- pkg/volume/csi/csi_attacher_test.go | 12 +++--- pkg/volume/csi/csi_mounter.go | 4 +- pkg/volume/csi/csi_plugin.go | 6 +-- .../apiserver/pkg/endpoints/apiserver_test.go | 16 ++++---- .../pkg/registry/generic/registry/store.go | 26 ++++++------ .../apiserver/pkg/storage/etcd3/watcher.go | 8 ++-- .../k8s.io/client-go/tools/cache/reflector.go | 12 +++--- .../client-go/tools/cache/reflector_test.go | 6 +-- .../kubectl/pkg/cmd/apply/apply_test.go | 6 +-- staging/src/k8s.io/kubectl/pkg/cmd/get/get.go | 4 +- .../k8s.io/kubectl/pkg/cmd/util/helpers.go | 16 ++++---- .../k8s.io/kubectl/pkg/scale/scale_test.go | 40 +++++++++---------- 23 files changed, 116 insertions(+), 116 deletions(-) diff --git a/pkg/controller/certificates/rootcacertpublisher/publisher.go b/pkg/controller/certificates/rootcacertpublisher/publisher.go index 33cb5e8dc0e..9f9e1aa685c 100644 --- a/pkg/controller/certificates/rootcacertpublisher/publisher.go +++ b/pkg/controller/certificates/rootcacertpublisher/publisher.go @@ -22,7 +22,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -177,7 +177,7 @@ func (c *Publisher) syncNamespace(ns string) error { cm, err := c.cmLister.ConfigMaps(ns).Get(RootCACertConfigMapName) switch { - case apierrs.IsNotFound(err): + case apierrors.IsNotFound(err): _, err := c.client.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: RootCACertConfigMapName, diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller.go b/pkg/controller/serviceaccount/serviceaccounts_controller.go index 83214feb9df..1967f44749b 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -188,7 +188,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error { }() ns, err := c.nsLister.Get(key) - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } if err != nil { @@ -204,7 +204,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error { switch _, err := c.saLister.ServiceAccounts(ns.Name).Get(sa.Name); { case err == nil: continue - case apierrs.IsNotFound(err): + case apierrors.IsNotFound(err): case err != nil: return err } @@ -212,9 +212,9 @@ func (c *ServiceAccountsController) syncNamespace(key string) error { // TODO eliminate this once the fake client can handle creation without NS sa.Namespace = ns.Name - if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) { + if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrors.IsAlreadyExists(err) { // we can safely ignore terminating namespace errors - if !apierrs.HasStatusCause(err, v1.NamespaceTerminatingCause) { + if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { createFailures = append(createFailures, err) } } diff --git a/pkg/controller/volume/persistentvolume/provision_test.go b/pkg/controller/volume/persistentvolume/provision_test.go index 671339ebf18..d5c14ed6e76 100644 --- a/pkg/controller/volume/persistentvolume/provision_test.go +++ b/pkg/controller/volume/persistentvolume/provision_test.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" @@ -423,7 +423,7 @@ func TestProvisionSync(t *testing.T) { // Inject errors to simulate crashed API server during // kubeclient.PersistentVolumes.Create() {Verb: "create", Resource: "persistentvolumes", Error: errors.New("Mock creation error1")}, - {Verb: "create", Resource: "persistentvolumes", Error: apierrs.NewAlreadyExists(api.Resource("persistentvolumes"), "")}, + {Verb: "create", Resource: "persistentvolumes", Error: apierrors.NewAlreadyExists(api.Resource("persistentvolumes"), "")}, }, wrapTestWithPluginCalls( nil, // recycle calls diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 1a5cfc2284e..7b157ce5c53 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" @@ -534,16 +534,16 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) // updated to Released state when PVC does not exist. if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed { obj, err = ctrl.claimLister.PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return err } - found = !apierrs.IsNotFound(err) + found = !apierrors.IsNotFound(err) if !found { obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name, metav1.GetOptions{}) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return err } - found = !apierrs.IsNotFound(err) + found = !apierrors.IsNotFound(err) } } } @@ -1391,7 +1391,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( pvName := ctrl.getProvisionedVolumeNameForClaim(claim) volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { klog.V(3).Infof("error reading persistent volume %q: %v", pvName, err) return pluginName, err } @@ -1489,7 +1489,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) var newVol *v1.PersistentVolume - if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { + if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrors.IsAlreadyExists(err) { // Save succeeded. if err != nil { klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) diff --git a/pkg/controller/volume/persistentvolume/testing/testing.go b/pkg/controller/volume/persistentvolume/testing/testing.go index 5ea99e49d63..ba5a09a9075 100644 --- a/pkg/controller/volume/persistentvolume/testing/testing.go +++ b/pkg/controller/volume/persistentvolume/testing/testing.go @@ -24,7 +24,7 @@ import ( "sync" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -223,7 +223,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj return true, volume.DeepCopy(), nil } klog.V(4).Infof("GetVolume: volume %s not found", name) - return true, nil, apierrs.NewNotFound(action.GetResource().GroupResource(), name) + return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name) case action.Matches("get", "persistentvolumeclaims"): name := action.(core.GetAction).GetName() @@ -233,7 +233,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj return true, claim.DeepCopy(), nil } klog.V(4).Infof("GetClaim: claim %s not found", name) - return true, nil, apierrs.NewNotFound(action.GetResource().GroupResource(), name) + return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name) case action.Matches("delete", "persistentvolumes"): name := action.(core.DeleteAction).GetName() diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index ece1636512e..94d4ef6b64f 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -21,7 +21,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -150,7 +150,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error { }() pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName) - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { klog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName) return nil } diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller.go b/pkg/controller/volume/pvprotection/pv_protection_controller.go index 2c6baaa2205..b2da80c0a73 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" coreinformers "k8s.io/client-go/informers/core/v1" @@ -127,7 +127,7 @@ func (c *Controller) processPV(pvName string) error { }() pv, err := c.pvLister.Get(pvName) - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { klog.V(4).Infof("PV %s not found, ignoring", pvName) return nil } diff --git a/pkg/registry/authorization/localsubjectaccessreview/rest.go b/pkg/registry/authorization/localsubjectaccessreview/rest.go index 32378f1c30d..8db3c1836f2 100644 --- a/pkg/registry/authorization/localsubjectaccessreview/rest.go +++ b/pkg/registry/authorization/localsubjectaccessreview/rest.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - kapierrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authorization/authorizer" @@ -50,17 +50,17 @@ func (r *REST) New() runtime.Object { func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { localSubjectAccessReview, ok := obj.(*authorizationapi.LocalSubjectAccessReview) if !ok { - return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a LocaLocalSubjectAccessReview: %#v", obj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("not a LocaLocalSubjectAccessReview: %#v", obj)) } if errs := authorizationvalidation.ValidateLocalSubjectAccessReview(localSubjectAccessReview); len(errs) > 0 { - return nil, kapierrors.NewInvalid(authorizationapi.Kind(localSubjectAccessReview.Kind), "", errs) + return nil, apierrors.NewInvalid(authorizationapi.Kind(localSubjectAccessReview.Kind), "", errs) } namespace := genericapirequest.NamespaceValue(ctx) if len(namespace) == 0 { - return nil, kapierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace)) } if namespace != localSubjectAccessReview.Namespace { - return nil, kapierrors.NewBadRequest(fmt.Sprintf("spec.resourceAttributes.namespace must match namespace: %v", namespace)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("spec.resourceAttributes.namespace must match namespace: %v", namespace)) } if createValidation != nil { diff --git a/pkg/registry/authorization/subjectaccessreview/rest.go b/pkg/registry/authorization/subjectaccessreview/rest.go index e050e4af589..ae06324e8e6 100644 --- a/pkg/registry/authorization/subjectaccessreview/rest.go +++ b/pkg/registry/authorization/subjectaccessreview/rest.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - kapierrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authorization/authorizer" @@ -49,10 +49,10 @@ func (r *REST) New() runtime.Object { func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { subjectAccessReview, ok := obj.(*authorizationapi.SubjectAccessReview) if !ok { - return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a SubjectAccessReview: %#v", obj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("not a SubjectAccessReview: %#v", obj)) } if errs := authorizationvalidation.ValidateSubjectAccessReview(subjectAccessReview); len(errs) > 0 { - return nil, kapierrors.NewInvalid(authorizationapi.Kind(subjectAccessReview.Kind), "", errs) + return nil, apierrors.NewInvalid(authorizationapi.Kind(subjectAccessReview.Kind), "", errs) } if createValidation != nil { diff --git a/pkg/registry/core/service/allocator/storage/storage.go b/pkg/registry/core/service/allocator/storage/storage.go index dfa2231fe3e..be93bbee34d 100644 --- a/pkg/registry/core/service/allocator/storage/storage.go +++ b/pkg/registry/core/service/allocator/storage/storage.go @@ -22,7 +22,7 @@ import ( "fmt" "sync" - k8serr "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/registry/generic" @@ -209,10 +209,10 @@ func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error { switch { case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0: if snapshot.ResourceVersion != existing.ResourceVersion { - return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("the provided resource version does not match")) + return nil, apierrors.NewConflict(e.resource, "", fmt.Errorf("the provided resource version does not match")) } case len(existing.ResourceVersion) != 0: - return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("another caller has already initialized the resource")) + return nil, apierrors.NewConflict(e.resource, "", fmt.Errorf("another caller has already initialized the resource")) } last = snapshot.ResourceVersion return snapshot, nil diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index fbe710fbfb5..a89c177e517 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -30,7 +30,7 @@ import ( "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" @@ -108,7 +108,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string _, err = c.k8s.StorageV1().VolumeAttachments().Create(attachment) alreadyExist := false if err != nil { - if !apierrs.IsAlreadyExists(err) { + if !apierrors.IsAlreadyExists(err) { return "", errors.New(log("attacher.Attach failed: %v", err)) } alreadyExist = true @@ -388,7 +388,7 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { } if err := c.k8s.StorageV1().VolumeAttachments().Delete(attachID, nil); err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { // object deleted or never existed, done klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID)) return nil @@ -415,7 +415,7 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { //object deleted or never existed, done klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle)) return nil diff --git a/pkg/volume/csi/csi_attacher_test.go b/pkg/volume/csi/csi_attacher_test.go index 41c646cf839..4a7df878aa2 100644 --- a/pkg/volume/csi/csi_attacher_test.go +++ b/pkg/volume/csi/csi_attacher_test.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -83,7 +83,7 @@ func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.R for i := 0; i < 100; i++ { attach, err = client.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { <-ticker.C continue } @@ -225,7 +225,7 @@ func TestAttacherAttach(t *testing.T) { status.AttachError = &storage.VolumeError{ Message: "attacher error", } - errStatus := apierrs.NewInternalError(fmt.Errorf("we got an error")).Status() + errStatus := apierrors.NewInternalError(fmt.Errorf("we got an error")).Status() fakeWatcher.Error(&errStatus) } else { status.Attached = true @@ -921,7 +921,7 @@ func TestAttacherDetach(t *testing.T) { reactor: func(action core.Action) (handled bool, ret runtime.Object, err error) { // return Forbidden to all DELETE requests if action.Matches("delete", "volumeattachments") { - return true, nil, apierrs.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error")) + return true, nil, apierrors.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error")) } return false, nil, nil }, @@ -971,7 +971,7 @@ func TestAttacherDetach(t *testing.T) { csiAttacher.waitSleepTime = 100 * time.Millisecond go func() { if watchError { - errStatus := apierrs.NewInternalError(fmt.Errorf("we got an error")).Status() + errStatus := apierrors.NewInternalError(fmt.Errorf("we got an error")).Status() fakeWatcher.Error(&errStatus) return } @@ -986,7 +986,7 @@ func TestAttacherDetach(t *testing.T) { } attach, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{}) if err != nil { - if !apierrs.IsNotFound(err) { + if !apierrors.IsNotFound(err) { t.Fatalf("unexpected err: %v", err) } } else { diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index ad53d124b78..a5e1254ff2a 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -30,7 +30,7 @@ import ( api "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/kubernetes" @@ -304,7 +304,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) { csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName)) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName)) return nil, nil } diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index d0e8f831bd4..fa335a07fc1 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -30,7 +30,7 @@ import ( api "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -742,7 +742,7 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) { } csiDriver, err := p.csiDriverLister.Get(driver) if err != nil { - if apierrs.IsNotFound(err) { + if apierrors.IsNotFound(err) { // Don't skip attach if CSIDriver does not exist return false, nil } @@ -779,7 +779,7 @@ func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storag } c, err := p.csiDriverLister.Get(driver) - if err != nil && !apierrs.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { // Some internal error. return err } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go index bdd41236ac2..53561de7e86 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/apiserver_test.go @@ -41,7 +41,7 @@ import ( fuzzer "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" apiequality "k8s.io/apimachinery/pkg/api/equality" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" @@ -2802,7 +2802,7 @@ func TestGetNamespaceSelfLink(t *testing.T) { func TestGetMissing(t *testing.T) { storage := map[string]rest.Storage{} simpleStorage := SimpleRESTStorage{ - errors: map[string]error{"get": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")}, + errors: map[string]error{"get": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")}, } storage["simple"] = &simpleStorage handler := handle(storage) @@ -2822,7 +2822,7 @@ func TestGetMissing(t *testing.T) { func TestGetRetryAfter(t *testing.T) { storage := map[string]rest.Storage{} simpleStorage := SimpleRESTStorage{ - errors: map[string]error{"get": apierrs.NewServerTimeout(schema.GroupResource{Resource: "simples"}, "id", 2)}, + errors: map[string]error{"get": apierrors.NewServerTimeout(schema.GroupResource{Resource: "simples"}, "id", 2)}, } storage["simple"] = &simpleStorage handler := handle(storage) @@ -2925,7 +2925,7 @@ func TestConnectResponderError(t *testing.T) { connectStorage := &ConnecterRESTStorage{} connectStorage.handlerFunc = func() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - connectStorage.receivedResponder.Error(apierrs.NewForbidden(schema.GroupResource{Resource: "simples"}, itemID, errors.New("you are terminated"))) + connectStorage.receivedResponder.Error(apierrors.NewForbidden(schema.GroupResource{Resource: "simples"}, itemID, errors.New("you are terminated"))) }) } storage := map[string]rest.Storage{ @@ -3271,7 +3271,7 @@ func TestDeleteMissing(t *testing.T) { storage := map[string]rest.Storage{} ID := "id" simpleStorage := SimpleRESTStorage{ - errors: map[string]error{"delete": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)}, + errors: map[string]error{"delete": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)}, } storage["simple"] = &simpleStorage handler := handle(storage) @@ -3543,7 +3543,7 @@ func TestUpdateMissing(t *testing.T) { storage := map[string]rest.Storage{} ID := "id" simpleStorage := SimpleRESTStorage{ - errors: map[string]error{"update": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)}, + errors: map[string]error{"update": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)}, } storage["simple"] = &simpleStorage handler := handle(storage) @@ -3581,7 +3581,7 @@ func TestCreateNotFound(t *testing.T) { "simple": &SimpleRESTStorage{ // storage.Create can fail with not found error in theory. // See http://pr.k8s.io/486#discussion_r15037092. - errors: map[string]error{"create": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")}, + errors: map[string]error{"create": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")}, }, }) server := httptest.NewServer(handler) @@ -4217,7 +4217,7 @@ func expectAPIStatus(t *testing.T, method, url string, data []byte, code int) *m func TestDelayReturnsError(t *testing.T) { storage := SimpleRESTStorage{ injectedFunction: func(obj runtime.Object) (runtime.Object, error) { - return nil, apierrs.NewAlreadyExists(schema.GroupResource{Resource: "foos"}, "bar") + return nil, apierrors.NewAlreadyExists(schema.GroupResource{Resource: "foos"}, "bar") }, } handler := handle(map[string]rest.Storage{"foo": &storage}) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go index f12975d3191..b3c55404012 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -24,7 +24,7 @@ import ( "sync" "time" - kubeerr "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/validation/path" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" @@ -220,13 +220,13 @@ func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, key := NamespaceKeyRootFunc(ctx, prefix) ns, ok := genericapirequest.NamespaceFrom(ctx) if !ok || len(ns) == 0 { - return "", kubeerr.NewBadRequest("Namespace parameter required.") + return "", apierrors.NewBadRequest("Namespace parameter required.") } if len(name) == 0 { - return "", kubeerr.NewBadRequest("Name parameter required.") + return "", apierrors.NewBadRequest("Name parameter required.") } if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { - return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) } key = key + "/" + name return key, nil @@ -236,10 +236,10 @@ func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, // to a resource relative to the given prefix without a namespace. func NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) { if len(name) == 0 { - return "", kubeerr.NewBadRequest("Name parameter required.") + return "", apierrors.NewBadRequest("Name parameter required.") } if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { - return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) } key := prefix + "/" + name return key, nil @@ -363,7 +363,7 @@ func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation if err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil { err = storeerr.InterpretCreateError(err, qualifiedResource, name) err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj) - if !kubeerr.IsAlreadyExists(err) { + if !apierrors.IsAlreadyExists(err) { return nil, err } if errGet := e.Storage.Get(ctx, key, "", out, false); errGet != nil { @@ -374,7 +374,7 @@ func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation return nil, err } if accessor.GetDeletionTimestamp() != nil { - msg := &err.(*kubeerr.StatusError).ErrStatus.Message + msg := &err.(*apierrors.StatusError).ErrStatus.Message *msg = fmt.Sprintf("object is being deleted: %s", *msg) } return nil, err @@ -493,7 +493,7 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj } if version == 0 { if !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate { - return nil, nil, kubeerr.NewNotFound(qualifiedResource, name) + return nil, nil, apierrors.NewNotFound(qualifiedResource, name) } creating = true creatingObj = obj @@ -533,10 +533,10 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj // leave the Kind field empty. See the discussion in #18526. qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource} fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), resourceVersion, "must be specified for an update")} - return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList) + return nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList) } if resourceVersion != version { - return nil, nil, kubeerr.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg)) + return nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg)) } } if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { @@ -916,7 +916,7 @@ func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.V // check if obj has pending finalizers accessor, err := meta.Accessor(obj) if err != nil { - return nil, false, kubeerr.NewInternalError(err) + return nil, false, apierrors.NewInternalError(err) } pendingFinalizers := len(accessor.GetFinalizers()) != 0 var ignoreNotFound bool @@ -1038,7 +1038,7 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali errs <- err return } - if _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !kubeerr.IsNotFound(err) { + if _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !apierrors.IsNotFound(err) { klog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err) errs <- err return diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index a66c9eb9e4e..e4b8721ead4 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -25,7 +25,7 @@ import ( "strings" "sync" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/storage" @@ -332,10 +332,10 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) { func transformErrorToEvent(err error) *watch.Event { err = interpretWatchError(err) - if _, ok := err.(apierrs.APIStatus); !ok { - err = apierrs.NewInternalError(err) + if _, ok := err.(apierrors.APIStatus); !ok { + err = apierrors.NewInternalError(err) } - status := err.(apierrs.APIStatus).Status() + status := err.(apierrors.APIStatus).Status() return &watch.Event{ Type: watch.Error, Object: &status, diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector.go b/staging/src/k8s.io/client-go/tools/cache/reflector.go index 1165c523eb6..fc785c2c104 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector.go @@ -26,7 +26,7 @@ import ( "sync" "time" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -375,7 +375,7 @@ loop: break loop } if event.Type == watch.Error { - return apierrs.FromObject(event.Object) + return apierrors.FromObject(event.Object) } if r.expectedType != nil { if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a { @@ -479,9 +479,9 @@ func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) { } func isExpiredError(err error) bool { - // In Kubernetes 1.17 and earlier, the api server returns both apierrs.StatusReasonExpired and - // apierrs.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent - // and always returns apierrs.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone + // In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and + // apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent + // and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrors.IsGone // check when we fully drop support for Kubernetes 1.17 servers from reflectors. - return apierrs.IsResourceExpired(err) || apierrs.IsGone(err) + return apierrors.IsResourceExpired(err) || apierrors.IsGone(err) } diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector_test.go b/staging/src/k8s.io/client-go/tools/cache/reflector_test.go index 2b0796c6ac4..9f6aaa07668 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector_test.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector_test.go @@ -26,7 +26,7 @@ import ( "time" "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -520,7 +520,7 @@ func TestReflectorExpiredExactResourceVersion(t *testing.T) { return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "10"}, Items: pods[0:4]}, nil case "10": // When watch cache is disabled, if the exact ResourceVersion requested is not available, a "Expired" error is returned. - return nil, apierrs.NewResourceExpired("The resourceVersion for the provided watch is too old.") + return nil, apierrors.NewResourceExpired("The resourceVersion for the provided watch is too old.") case "": return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "11"}, Items: pods[0:8]}, nil default: @@ -584,7 +584,7 @@ func TestReflectorFullListIfExpired(t *testing.T) { return &v1.PodList{ListMeta: metav1.ListMeta{Continue: "C1", ResourceVersion: "11"}, Items: pods[0:4]}, nil // second page of the above list case rvContinueLimit("", "C1", 4): - return nil, apierrs.NewResourceExpired("The resourceVersion for the provided watch is too old.") + return nil, apierrors.NewResourceExpired("The resourceVersion for the provided watch is too old.") // rv=10 unlimited list case rvContinueLimit("10", "", 0): return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "11"}, Items: pods[0:8]}, nil diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_test.go index 3ebc2361dd3..eacbfc807e4 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_test.go @@ -34,7 +34,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - kubeerr "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -651,7 +651,7 @@ func TestApplyRetry(t *testing.T) { case p == pathRC && m == "PATCH": if firstPatch { firstPatch = false - statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first")) + statusErr := apierrors.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first")) bodyBytes, _ := json.Marshal(statusErr) bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes)) return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil @@ -1278,7 +1278,7 @@ func TestForceApply(t *testing.T) { case strings.HasSuffix(p, pathRC) && m == "PATCH": counts["patch"]++ if counts["patch"] <= 6 { - statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first")) + statusErr := apierrors.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first")) bodyBytes, _ := json.Marshal(statusErr) bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes)) return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go index 59dabac4f94..ab157bd7674 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go @@ -27,7 +27,7 @@ import ( "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" - kapierrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -483,7 +483,7 @@ func (o *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e Do() if o.IgnoreNotFound { - r.IgnoreErrors(kapierrors.IsNotFound) + r.IgnoreErrors(apierrors.IsNotFound) } if err := r.Err(); err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go index 5d1b76f068c..c6a655a85d7 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -29,7 +29,7 @@ import ( jsonpatch "github.com/evanphx/json-patch" "github.com/spf13/cobra" "github.com/spf13/pflag" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -60,10 +60,10 @@ type debugError interface { // source is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource. func AddSourceToErr(verb string, source string, err error) error { if source != "" { - if statusError, ok := err.(kerrors.APIStatus); ok { + if statusError, ok := err.(apierrors.APIStatus); ok { status := statusError.Status() status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message) - return &kerrors.StatusError{ErrStatus: status} + return &apierrors.StatusError{ErrStatus: status} } return fmt.Errorf("error when %s %q: %v", verb, source, err) } @@ -129,8 +129,8 @@ func checkErr(err error, handleErr func(string, int)) { switch { case err == ErrExit: handleErr("", DefaultErrorExitCode) - case kerrors.IsInvalid(err): - details := err.(*kerrors.StatusError).Status().Details + case apierrors.IsInvalid(err): + details := err.(*apierrors.StatusError).Status().Details s := "The request is invalid" if details == nil { handleErr(s, DefaultErrorExitCode) @@ -202,7 +202,7 @@ func StandardErrorMessage(err error) (string, bool) { if debugErr, ok := err.(debugError); ok { klog.V(4).Infof(debugErr.DebugError()) } - status, isStatus := err.(kerrors.APIStatus) + status, isStatus := err.(apierrors.APIStatus) switch { case isStatus: switch s := status.Status(); { @@ -213,7 +213,7 @@ func StandardErrorMessage(err error) (string, bool) { default: return fmt.Sprintf("Error from server: %s", err.Error()), true } - case kerrors.IsUnexpectedObjectError(err): + case apierrors.IsUnexpectedObjectError(err): return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true } switch t := err.(type) { @@ -259,7 +259,7 @@ func MultilineError(prefix string, err error) string { // Returns true if a case exists to handle the error type, or false otherwise. func PrintErrorWithCauses(err error, errOut io.Writer) bool { switch t := err.(type) { - case *kerrors.StatusError: + case *apierrors.StatusError: errorDetails := t.Status().Details if errorDetails != nil { fmt.Fprintf(errOut, "error: %s %q is invalid\n\n", errorDetails.Kind, errorDetails.Name) diff --git a/staging/src/k8s.io/kubectl/pkg/scale/scale_test.go b/staging/src/k8s.io/kubectl/pkg/scale/scale_test.go index 276ac5edd41..4a9d9f22bca 100644 --- a/staging/src/k8s.io/kubectl/pkg/scale/scale_test.go +++ b/staging/src/k8s.io/kubectl/pkg/scale/scale_test.go @@ -23,7 +23,7 @@ import ( "time" autoscalingv1 "k8s.io/api/autoscaling/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "k8s.io/apimachinery/pkg/apis/testapigroup/v1" "k8s.io/apimachinery/pkg/runtime" @@ -58,8 +58,8 @@ var ( ) func TestReplicationControllerScaleRetry(t *testing.T) { - verbsOnError := map[string]*kerrors.StatusError{ - "patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil), + verbsOnError := map[string]*apierrors.StatusError{ + "patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil), } scaleClientExpectedAction := []string{"patch", "get"} scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, verbsOnError) @@ -94,8 +94,8 @@ func TestReplicationControllerScaleRetry(t *testing.T) { } func TestReplicationControllerScaleInvalid(t *testing.T) { - verbsOnError := map[string]*kerrors.StatusError{ - "patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), + verbsOnError := map[string]*apierrors.StatusError{ + "patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil), } scaleClientExpectedAction := []string{"patch"} scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 1, verbsOnError) @@ -168,8 +168,8 @@ func TestReplicationControllerScaleFailsPreconditions(t *testing.T) { } func TestDeploymentScaleRetry(t *testing.T) { - verbsOnError := map[string]*kerrors.StatusError{ - "patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil), + verbsOnError := map[string]*apierrors.StatusError{ + "patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil), } scaleClientExpectedAction := []string{"patch", "get"} scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError) @@ -226,8 +226,8 @@ func TestDeploymentScale(t *testing.T) { func TestDeploymentScaleInvalid(t *testing.T) { scaleClientExpectedAction := []string{"patch"} - verbsOnError := map[string]*kerrors.StatusError{ - "patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), + verbsOnError := map[string]*apierrors.StatusError{ + "patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil), } scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError) scaler := NewScaler(scaleClient) @@ -299,8 +299,8 @@ func TestStatefulSetScale(t *testing.T) { func TestStatefulSetScaleRetry(t *testing.T) { scaleClientExpectedAction := []string{"patch", "get"} - verbsOnError := map[string]*kerrors.StatusError{ - "patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil), + verbsOnError := map[string]*apierrors.StatusError{ + "patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil), } scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError) scaler := NewScaler(scaleClient) @@ -335,8 +335,8 @@ func TestStatefulSetScaleRetry(t *testing.T) { func TestStatefulSetScaleInvalid(t *testing.T) { scaleClientExpectedAction := []string{"patch"} - verbsOnError := map[string]*kerrors.StatusError{ - "patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), + verbsOnError := map[string]*apierrors.StatusError{ + "patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil), } scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError) scaler := NewScaler(scaleClient) @@ -407,8 +407,8 @@ func TestReplicaSetScale(t *testing.T) { } func TestReplicaSetScaleRetry(t *testing.T) { - verbsOnError := map[string]*kerrors.StatusError{ - "patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil), + verbsOnError := map[string]*apierrors.StatusError{ + "patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil), } scaleClientExpectedAction := []string{"patch", "get"} scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError) @@ -443,8 +443,8 @@ func TestReplicaSetScaleRetry(t *testing.T) { } func TestReplicaSetScaleInvalid(t *testing.T) { - verbsOnError := map[string]*kerrors.StatusError{ - "patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), + verbsOnError := map[string]*apierrors.StatusError{ + "patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil), } scaleClientExpectedAction := []string{"patch"} scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError) @@ -688,12 +688,12 @@ func TestGenericScale(t *testing.T) { } } -func createFakeScaleClient(resource string, resourceName string, replicas int, errorsOnVerb map[string]*kerrors.StatusError) *fakescale.FakeScaleClient { - shouldReturnAnError := func(verb string) (*kerrors.StatusError, bool) { +func createFakeScaleClient(resource string, resourceName string, replicas int, errorsOnVerb map[string]*apierrors.StatusError) *fakescale.FakeScaleClient { + shouldReturnAnError := func(verb string) (*apierrors.StatusError, bool) { if anError, anErrorExists := errorsOnVerb[verb]; anErrorExists { return anError, true } - return &kerrors.StatusError{}, false + return &apierrors.StatusError{}, false } newReplicas := int32(replicas) scaleClient := &fakescale.FakeScaleClient{} From 6596a14d3920a0b4157f360f1aaa521f7965bf74 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Tue, 12 Nov 2019 16:43:58 +0800 Subject: [PATCH 3/3] add missing alias of api errors under test --- .../pkg/registry/generic/registry/store.go | 2 +- test/e2e/apimachinery/chunking.go | 6 ++--- .../apimachinery/crd_conversion_webhook.go | 4 ++-- test/e2e/apimachinery/garbage_collector.go | 16 ++++++------- test/e2e/apimachinery/namespace.go | 6 ++--- test/e2e/apimachinery/protocol.go | 14 +++++------ test/e2e/apimachinery/resource_quota.go | 22 ++++++++--------- test/e2e/apimachinery/table_conversion.go | 4 ++-- test/e2e/apimachinery/webhook.go | 12 +++++----- test/e2e/apps/cronjob.go | 4 ++-- test/e2e/apps/deployment.go | 6 ++--- test/e2e/apps/job.go | 4 ++-- test/e2e/apps/rc.go | 6 ++--- test/e2e/apps/replica_set.go | 6 ++--- test/e2e/auth/audit_dynamic.go | 4 ++-- .../autoscaling/cluster_size_autoscaling.go | 10 ++++---- test/e2e/common/container.go | 4 ++-- test/e2e/common/lease.go | 4 ++-- test/e2e/common/runtimeclass.go | 3 +-- test/e2e/framework/job/wait.go | 4 ++-- test/e2e/framework/pods.go | 8 +++---- test/e2e/framework/service/jig.go | 4 ++-- test/e2e/framework/service/resource.go | 4 ++-- test/e2e/kubectl/kubectl.go | 2 +- test/e2e/network/fixture.go | 10 ++++---- test/e2e/network/ingress.go | 4 ++-- test/e2e/network/proxy.go | 6 ++--- test/e2e/scheduling/preemption.go | 14 +++++------ test/e2e/servicecatalog/podpreset.go | 6 ++--- test/e2e/storage/csi_mock_volume.go | 8 +++---- test/e2e/storage/drivers/csi.go | 4 ++-- test/e2e/storage/drivers/in_tree.go | 4 ++-- test/e2e/storage/testsuites/ephemeral.go | 4 ++-- test/e2e/storage/testsuites/volumelimits.go | 4 ++-- test/e2e/upgrades/sysctl.go | 4 ++-- test/e2e_node/apparmor_test.go | 4 ++-- test/e2e_node/critical_pod_test.go | 4 ++-- test/e2e_node/eviction_test.go | 8 +++---- test/e2e_node/mirror_pod_test.go | 4 ++-- .../admissionwebhook/admission_test.go | 14 +++++------ .../apiserver/apply/apply_crd_test.go | 12 +++++----- .../integration/apiserver/apply/apply_test.go | 8 +++---- .../apiserver/certreload/certreload_test.go | 4 ++-- .../max_json_patch_operations_test.go | 4 ++-- .../apiserver/max_request_body_bytes_test.go | 17 +++++++------ test/integration/apiserver/patch_test.go | 4 ++-- test/integration/auth/node_test.go | 6 ++--- test/integration/daemonset/daemonset_test.go | 4 ++-- test/integration/dryrun/dryrun_test.go | 10 ++++---- test/integration/evictions/evictions_test.go | 12 +++++----- .../cluster_scoped_owner_test.go | 4 ++-- .../garbage_collector_test.go | 24 +++++++++---------- test/integration/ipamperf/util.go | 4 ++-- test/integration/master/audit_dynamic_test.go | 4 ++-- .../master/synthetic_master_test.go | 10 ++++---- .../integration/replicaset/replicaset_test.go | 4 ++-- .../replicationcontroller_test.go | 4 ++-- test/integration/scheduler/predicates_test.go | 6 ++--- test/integration/scheduler/priorities_test.go | 4 ++-- test/integration/scheduler/util.go | 8 +++---- .../serviceaccount/service_account_test.go | 22 ++++++++--------- test/integration/utils.go | 4 ++-- test/soak/serve_hostnames/serve_hostnames.go | 4 ++-- 63 files changed, 221 insertions(+), 223 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go index b3c55404012..dc287d1ed5b 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -933,7 +933,7 @@ func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.V if err == nil && deleteImmediately && preconditions.ResourceVersion != nil { accessor, err = meta.Accessor(out) if err != nil { - return out, false, kubeerr.NewInternalError(err) + return out, false, apierrors.NewInternalError(err) } resourceVersion := accessor.GetResourceVersion() preconditions.ResourceVersion = &resourceVersion diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index b7da344a2cb..3e0b87a74ff 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -27,7 +27,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/features" @@ -153,11 +153,11 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { framework.Logf("Token %s has not expired yet", firstToken) return false, nil } - if err != nil && !errors.IsResourceExpired(err) { + if err != nil && !apierrors.IsResourceExpired(err) { return false, err } framework.Logf("got error %s", err) - status, ok := err.(errors.APIStatus) + status, ok := err.(apierrors.APIStatus) if !ok { return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err)) } diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index 441bea6998f..185933b94f8 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -26,7 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/intstr" @@ -236,7 +236,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa }, }, }) - if err != nil && errors.IsAlreadyExists(err) { + if err != nil && apierrors.IsAlreadyExists(err) { framework.Logf("role binding %s already exists", roleBindingCRDName) } else { framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace) diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 596a387def6..ac5177f08a9 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -28,7 +28,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -667,7 +667,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Logf("") return false, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -769,7 +769,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Logf("") return false, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -882,7 +882,7 @@ var _ = SIGDescribe("Garbage collector", func() { definition := apiextensionstestserver.NewRandomNameV1CustomResourceDefinition(apiextensionsv1.ClusterScoped) defer func() { err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("failed to delete CustomResourceDefinition: %v", err) } }() @@ -951,7 +951,7 @@ var _ = SIGDescribe("Garbage collector", func() { // Ensure the dependent is deleted. if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { _, err := resourceClient.Get(dependentName, metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { framework.Logf("owner: %#v", persistedOwner) framework.Logf("dependent: %#v", persistedDependent) @@ -963,7 +963,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err == nil { framework.Failf("expected owner resource %q to be deleted", ownerName) } else { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { framework.Failf("unexpected error getting owner resource %q: %v", ownerName, err) } } @@ -985,7 +985,7 @@ var _ = SIGDescribe("Garbage collector", func() { definition := apiextensionstestserver.NewRandomNameV1CustomResourceDefinition(apiextensionsv1.ClusterScoped) defer func() { err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("failed to delete CustomResourceDefinition: %v", err) } }() @@ -1056,7 +1056,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err == nil { return false, nil } - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return false, fmt.Errorf("failed to get owner: %v", err) } return true, nil diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index c0cdefd773b..f77c2f1f47f 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -23,7 +23,7 @@ import ( "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" @@ -121,7 +121,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { _, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { return true, nil } return false, nil @@ -178,7 +178,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { _, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { return true, nil } return false, nil diff --git a/test/e2e/apimachinery/protocol.go b/test/e2e/apimachinery/protocol.go index 13ebccdf6f9..a2c001e62a2 100644 --- a/test/e2e/apimachinery/protocol.go +++ b/test/e2e/apimachinery/protocol.go @@ -23,7 +23,7 @@ import ( g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" @@ -63,16 +63,16 @@ var _ = SIGDescribe("client-go should negotiate", func() { case watch.Added, watch.Modified: // this is allowed case watch.Error: - err := errors.FromObject(evt.Object) - // In Kubernetes 1.17 and earlier, the api server returns both apierrs.StatusReasonExpired and - // apierrs.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent - // and always returns apierrs.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone + err := apierrors.FromObject(evt.Object) + // In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and + // apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent + // and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone // check when we fully drop support for Kubernetes 1.17 servers from reflectors. - if errors.IsGone(err) || errors.IsResourceExpired(err) { + if apierrors.IsGone(err) || apierrors.IsResourceExpired(err) { // this is allowed, since the kubernetes object could be very old break } - if errors.IsUnexpectedObjectError(err) { + if apierrors.IsUnexpectedObjectError(err) { g.Fail(fmt.Sprintf("unexpected object, wanted v1.Status: %#v", evt.Object)) } g.Fail(fmt.Sprintf("unexpected error: %#v", evt.Object)) diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index e01c24ac0e7..40de5ccf425 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -24,7 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -886,7 +886,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Verifying the deleted ResourceQuota") _, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) }) }) @@ -1076,7 +1076,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1115,7 +1115,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1160,7 +1160,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1206,10 +1206,10 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("2") @@ -1261,7 +1261,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1295,7 +1295,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1334,7 +1334,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} hard[v1.ResourcePods] = resource.MustParse("1") @@ -1714,7 +1714,7 @@ func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName s resourceQuota.Spec.Hard[resourceName] = current _, err = c.CoreV1().ResourceQuotas(ns).Update(resourceQuota) // ignoring conflicts since someone else may already updated it. - if errors.IsConflict(err) { + if apierrors.IsConflict(err) { return false, nil } return false, err diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index 27c294caaa0..2decea9c5e2 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -27,7 +27,7 @@ import ( authorizationv1 "k8s.io/api/authorization/v1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/client-go/util/workqueue" @@ -164,7 +164,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { } err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1;g=meta.k8s.io").Body(sar).Do().Into(table) framework.ExpectError(err, "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar) - framework.ExpectEqual(err.(errors.APIStatus).Status().Code, int32(406)) + framework.ExpectEqual(err.(apierrors.APIStatus).Status().Code, int32(406)) }) }) diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index ad37e9e137a..324b04b0727 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -30,7 +30,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" @@ -739,7 +739,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { }, }, }) - if err != nil && errors.IsAlreadyExists(err) { + if err != nil && apierrors.IsAlreadyExists(err) { framework.Logf("role binding %s already exists", roleBindingName) } else { framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace) @@ -1120,7 +1120,7 @@ func testWebhook(f *framework.Framework) { framework.Failf("expect error %q, got %q", "deadline", err.Error()) } // ensure the pod was not actually created - if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}); !errors.IsNotFound(err) { + if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { framework.Failf("expect notfound error looking for rejected pod, got %v", err) } @@ -1296,7 +1296,7 @@ func testFailClosedWebhook(f *framework.Framework) { } _, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap) framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName) - if !errors.IsInternalError(err) { + if !apierrors.IsInternalError(err) { framework.Failf("expect an internal error, got %#v", err) } } @@ -1661,7 +1661,7 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig return true, nil } // Only retry update on conflict - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return false, err } return false, nil @@ -1683,7 +1683,7 @@ func updateCustomResource(c dynamic.ResourceInterface, ns, name string, update u return true, nil } // Only retry update on conflict - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return false, err } return false, nil diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 7bf1921f0c9..0aa5e8bfd73 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -26,7 +26,7 @@ import ( batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -214,7 +214,7 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Ensuring job was deleted") _, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectError(err) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) ginkgo.By("Ensuring the job is not in the cronjob active list") err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name) diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index f824524685f..13c23813119 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -29,7 +29,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -206,7 +206,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) { framework.Logf("Ensuring deployment %s was deleted", deploymentName) _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) framework.ExpectError(err) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err) @@ -615,7 +615,7 @@ func testIterativeDeployments(f *framework.Framework) { name := podList.Items[p].Name framework.Logf("%02d: deleting deployment pod %q", i, name) err := c.CoreV1().Pods(ns).Delete(name, nil) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } } diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index e3ee4497155..238f54eb095 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -21,7 +21,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchinternal "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/test/e2e/framework" @@ -162,7 +162,7 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Ensuring job was deleted") _, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) }) /* diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 6ed684d04b8..bab2e3b9ce7 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -21,7 +21,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -287,7 +287,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the RC - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } framework.ExpectNoError(err) @@ -323,7 +323,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { pod.Labels = map[string]string{"name": "not-matching-name"} _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) - if err != nil && errors.IsConflict(err) { + if err != nil && apierrors.IsConflict(err) { return false, nil } if err != nil { diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 77497f22e61..0c3f01cc031 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -22,7 +22,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -289,7 +289,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the ReplicaSet - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } framework.ExpectNoError(err) @@ -315,7 +315,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { pod.Labels = map[string]string{"name": "not-matching-name"} _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) - if err != nil && errors.IsConflict(err) { + if err != nil && apierrors.IsConflict(err) { return false, nil } if err != nil { diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index 80f2a62a3dc..1984d070c92 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -25,7 +25,7 @@ import ( auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" @@ -111,7 +111,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { // get pod ip err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) { p, err := f.ClientSet.CoreV1().Pods(namespace).Get("audit-proxy", metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { framework.Logf("waiting for audit-proxy pod to be present") return false, nil } else if err != nil { diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index ad0a6351811..18f0454e2eb 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -31,7 +31,7 @@ import ( v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -1476,7 +1476,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { if err == nil { return nil } - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return err } klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j) @@ -1517,7 +1517,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd if err == nil { return nil } - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return err } klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j) @@ -1692,7 +1692,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa if err == nil { break } - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { return err } klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j) @@ -1936,7 +1936,7 @@ func createPriorityClasses(f *framework.Framework) func() { if err != nil { klog.Errorf("Error creating priority class: %v", err) } - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) } return func() { diff --git a/test/e2e/common/container.go b/test/e2e/common/container.go index d00037eafc6..f1a10410770 100644 --- a/test/e2e/common/container.go +++ b/test/e2e/common/container.go @@ -21,7 +21,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -105,7 +105,7 @@ func (cc *ConformanceContainer) Present() (bool, error) { if err == nil { return true, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } return false, err diff --git a/test/e2e/common/lease.go b/test/e2e/common/lease.go index 58db1c8a9ee..74cab28cb86 100644 --- a/test/e2e/common/lease.go +++ b/test/e2e/common/lease.go @@ -23,7 +23,7 @@ import ( coordinationv1 "k8s.io/api/coordination/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -154,6 +154,6 @@ var _ = framework.KubeDescribe("Lease", func() { framework.ExpectNoError(err, "deleting Lease failed") _, err = leaseClient.Get(name, metav1.GetOptions{}) - framework.ExpectEqual(errors.IsNotFound(err), true) + framework.ExpectEqual(apierrors.IsNotFound(err), true) }) }) diff --git a/test/e2e/common/runtimeclass.go b/test/e2e/common/runtimeclass.go index 8087bedee3f..95b12ac19f5 100644 --- a/test/e2e/common/runtimeclass.go +++ b/test/e2e/common/runtimeclass.go @@ -21,7 +21,6 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -72,7 +71,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { ginkgo.By("Waiting for the RuntimeClass to disappear") framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) { _, err := rcClient.Get(rcName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil // done } if err != nil { diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index b975460087f..d6b4a02b400 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -21,7 +21,7 @@ import ( batchv1 "k8s.io/api/batch/v1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -91,7 +91,7 @@ func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.D func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error { return wait.Poll(framework.Poll, timeout, func() (bool, error) { _, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index dcfc122cc29..2c39288e595 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -23,7 +23,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" @@ -115,7 +115,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod { } // Update updates the pod object. It retries if there is a conflict, throw out error if -// there is any other errors. name is the pod name, updateFn is the function updating the +// there is any other apierrors. name is the pod name, updateFn is the function updating the // pod object. func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { @@ -129,7 +129,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { Logf("Successfully updated pod %q", name) return true, nil } - if errors.IsConflict(err) { + if apierrors.IsConflict(err) { Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err) return false, nil } @@ -147,7 +147,7 @@ func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeo // disappear before the timeout, it will fail the test. func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options *metav1.DeleteOptions, timeout time.Duration) { err := c.Delete(name, options) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { Failf("Failed to delete pod %q: %v", name, err) } gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(), diff --git a/test/e2e/framework/service/jig.go b/test/e2e/framework/service/jig.go index e387071a517..9d14880e698 100644 --- a/test/e2e/framework/service/jig.go +++ b/test/e2e/framework/service/jig.go @@ -27,7 +27,7 @@ import ( "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -446,7 +446,7 @@ func (j *TestJig) UpdateService(update func(*v1.Service)) (*v1.Service, error) { if err == nil { return j.sanityCheckService(result, service.Spec.Type) } - if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { + if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { return nil, fmt.Errorf("failed to update Service %q: %v", j.Name, err) } } diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go index 91697939ca5..66546862310 100644 --- a/test/e2e/framework/service/resource.go +++ b/test/e2e/framework/service/resource.go @@ -20,7 +20,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" @@ -73,7 +73,7 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update service, err = c.CoreV1().Services(namespace).Update(service) - if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { + if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { return service, err } } diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index fe0992e7f59..52b1baefb7f 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -2660,7 +2660,7 @@ func waitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D _, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Deleted: - return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "") + return false, apierrors.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "") } switch rc := event.Object.(type) { case *v1.ReplicationController: diff --git a/test/e2e/network/fixture.go b/test/e2e/network/fixture.go index b6f2521eb83..1b7884300c4 100644 --- a/test/e2e/network/fixture.go +++ b/test/e2e/network/fixture.go @@ -18,7 +18,7 @@ package network import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" @@ -118,7 +118,7 @@ func (t *TestFixture) Cleanup() []error { // First, resize the RC to 0. old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } return err @@ -126,7 +126,7 @@ func (t *TestFixture) Cleanup() []error { x := int32(0) old.Spec.Replicas = &x if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(old); err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } return err @@ -139,7 +139,7 @@ func (t *TestFixture) Cleanup() []error { // TODO(mikedanese): Wait. // Then, delete the RC altogether. if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { errs = append(errs, err) } } @@ -149,7 +149,7 @@ func (t *TestFixture) Cleanup() []error { ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace) err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) if err != nil { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { errs = append(errs, err) } } diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index cb33dbfd026..361968a3868 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/uuid" @@ -792,7 +792,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat } ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName)) err := wait.Poll(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) { - if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) { + if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !apierrors.IsNotFound(err) { framework.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err) return false, nil } diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index 08f79c1ad13..1654809da94 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -27,7 +27,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/net" @@ -226,7 +226,7 @@ var _ = SIGDescribe("Proxy", func() { body, status, d, err := doProxy(f, path, i) if err != nil { - if serr, ok := err.(*errors.StatusError); ok { + if serr, ok := err.(*apierrors.StatusError); ok { recordError(fmt.Sprintf("%v (%v; %v): path %v gave status error: %+v", i, status, d, path, serr.Status())) } else { @@ -322,7 +322,7 @@ func waitForEndpoint(c clientset.Interface, ns, name string) error { registerTimeout := time.Minute for t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) { endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { framework.Logf("Endpoint %s/%s is not ready yet", ns, name) continue } diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 66eeae5a4f9..c0b8cdc6a76 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -26,7 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -79,7 +79,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { var err error for _, pair := range priorityPairs { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) } e2enode.WaitForTotalHealthy(cs, time.Minute) @@ -143,7 +143,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }) // Make sure that the lowest priority pod is deleted. preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) - podDeleted := (err != nil && errors.IsNotFound(err)) || + podDeleted := (err != nil && apierrors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) framework.ExpectEqual(podDeleted, true) // Other pods (mid priority ones) should be present. @@ -198,7 +198,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { // Clean-up the critical pod // Always run cleanup to make sure the pod is properly cleaned up. err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0)) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err) } }() @@ -212,7 +212,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }) // Make sure that the lowest priority pod is deleted. preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) - podDeleted := (err != nil && errors.IsNotFound(err)) || + podDeleted := (err != nil && apierrors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) framework.ExpectEqual(podDeleted, true) // Other pods (mid priority ones) should be present. @@ -301,9 +301,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { _, err := cs.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}) if err != nil { framework.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err) - framework.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err) + framework.Logf("Reason: %v. Msg: %v", apierrors.ReasonForError(err), err) } - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) } }) diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index 1eaa5aba0c4..9e672b80a60 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -23,7 +23,7 @@ import ( "k8s.io/api/core/v1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/watch" @@ -73,7 +73,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { } _, err := createPodPreset(f.ClientSet, f.Namespace.Name, pip) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled") } framework.ExpectNoError(err) @@ -191,7 +191,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { } _, err := createPodPreset(f.ClientSet, f.Namespace.Name, pip) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled") } framework.ExpectNoError(err) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 9375b158498..08d162349ca 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -277,7 +277,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { attachmentName := fmt.Sprintf("csi-%x", attachmentHash) _, err = m.cs.StorageV1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { if !test.disableAttach { framework.ExpectNoError(err, "Expected VolumeAttachment but none was found") } @@ -618,7 +618,7 @@ func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Inte waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) { csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return false, err } attachLimit = getVolumeLimitFromCSINode(csiNode, driverName) @@ -809,7 +809,7 @@ func waitForCSIDriver(cs clientset.Interface, driverName string) error { framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) { _, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{}) - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return err } } diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index f0e9cd1d8c7..0a24a73b251 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -44,7 +44,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" storagev1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" @@ -492,7 +492,7 @@ func waitForCSIDriverRegistrationOnNode(nodeName string, driverName string, cs c return wait.PollImmediate(10*time.Second, csiNodeRegisterTimeout, func() (bool, error) { csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return false, err } for _, driver := range csiNode.Spec.Drivers { diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 087212f28d2..4a4110de265 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -46,7 +46,7 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -319,7 +319,7 @@ func (v *glusterVolume) DeleteVolume() { framework.Logf("Deleting Gluster endpoints %q...", name) err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil) if err != nil { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { framework.Failf("Gluster delete endpoints failed: %v", err) } framework.Logf("Gluster endpoints %q not found, assuming deleted", name) diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index a18ec5a357a..a8981be8763 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -25,7 +25,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -370,7 +370,7 @@ func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) { // Pod was created, feature supported. StopPod(c, pod) return true, nil - case errors.IsInvalid(err): + case apierrors.IsInvalid(err): // "Invalid" because it uses a feature that isn't supported. return false, nil default: diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index 0563394012e..4405335fbf3 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -26,7 +26,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -246,7 +246,7 @@ func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulabl if err == nil { existing++ } else { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { pvNames.Delete(pvName) } else { framework.Logf("Failed to get PV %s: %s", pvName, err) diff --git a/test/e2e/upgrades/sysctl.go b/test/e2e/upgrades/sysctl.go index 27dc034a2e5..c47ebf53543 100644 --- a/test/e2e/upgrades/sysctl.go +++ b/test/e2e/upgrades/sysctl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/ginkgo" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/sysctl" @@ -60,7 +60,7 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade") pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } if err == nil { diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 053fb59d152..9cce7f9c2ac 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -28,7 +28,7 @@ import ( "strings" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" @@ -159,7 +159,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. _, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) { switch e.Type { case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name) + return false, apierrors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name) } switch t := e.Object.(type) { case *v1.Pod: diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 1670a5abf76..3eeafab38d4 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -21,7 +21,7 @@ import ( v1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeapi "k8s.io/kubernetes/pkg/apis/core" @@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C }) _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err) // Create pods, starting with non-critical so that the critical preempts the other pods. f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed}) diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index d398b491c28..21ed8000202 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ }) ginkgo.BeforeEach(func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) @@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser }) ginkgo.BeforeEach(func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) @@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis }) ginkgo.BeforeEach(func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index a45f4128e44..cc0cb27f5b9 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" @@ -177,7 +177,7 @@ func deleteStaticPod(dir, name, namespace string) error { func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error { _, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } return goerrors.New("pod not disappear") diff --git a/test/integration/apiserver/admissionwebhook/admission_test.go b/test/integration/apiserver/admissionwebhook/admission_test.go index 3aea2748c29..2246b1ce277 100644 --- a/test/integration/apiserver/admissionwebhook/admission_test.go +++ b/test/integration/apiserver/admissionwebhook/admission_test.go @@ -40,7 +40,7 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -678,7 +678,7 @@ func testResourceDelete(c *testContext) { // wait for the item to be gone err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } if err == nil { @@ -753,7 +753,7 @@ func testResourceDelete(c *testContext) { // wait for the item to be gone err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } if err == nil { @@ -801,7 +801,7 @@ func testResourceDeletecollection(c *testContext) { // wait for the item to be gone err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } if err == nil { @@ -943,7 +943,7 @@ func testNamespaceDelete(c *testContext) { } // verify namespace is gone obj, err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{}) - if err == nil || !errors.IsNotFound(err) { + if err == nil || !apierrors.IsNotFound(err) { c.t.Errorf("expected namespace to be gone, got %#v, %v", obj, err) } } @@ -1048,7 +1048,7 @@ func testPodBindingEviction(c *testContext) { forceDelete := &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background} defer func() { err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(pod.GetName(), forceDelete) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { c.t.Error(err) return } @@ -1414,7 +1414,7 @@ func createOrGetResource(client dynamic.Interface, gvr schema.GroupVersionResour if err == nil { return obj, nil } - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return nil, err } return client.Resource(gvr).Namespace(ns).Create(stubObj, metav1.CreateOptions{}) diff --git a/test/integration/apiserver/apply/apply_crd_test.go b/test/integration/apiserver/apply/apply_crd_test.go index 2451136f38d..c2a73813402 100644 --- a/test/integration/apiserver/apply/apply_crd_test.go +++ b/test/integration/apiserver/apply/apply_crd_test.go @@ -25,7 +25,7 @@ import ( apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apiextensions-apiserver/test/integration/fixtures" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" genericfeatures "k8s.io/apiserver/pkg/features" @@ -108,7 +108,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -299,7 +299,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -339,7 +339,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result) } - status, ok = err.(*errors.StatusError) + status, ok = err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -504,7 +504,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -698,7 +698,7 @@ spec: if err == nil { t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result) } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } diff --git a/test/integration/apiserver/apply/apply_test.go b/test/integration/apiserver/apply/apply_test.go index b1c206db12c..996a18712d0 100644 --- a/test/integration/apiserver/apply/apply_test.go +++ b/test/integration/apiserver/apply/apply_test.go @@ -28,7 +28,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -276,7 +276,7 @@ func TestCreateOnApplyFailsWithUID(t *testing.T) { }`)). Do(). Get() - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { t.Fatalf("Expected conflict error but got: %v", err) } } @@ -348,7 +348,7 @@ func TestApplyUpdateApplyConflictForced(t *testing.T) { if err == nil { t.Fatalf("Expecting to get conflicts when applying object") } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get conflicts as API error") } @@ -849,7 +849,7 @@ func TestApplyFailsWithVersionMismatch(t *testing.T) { if err == nil { t.Fatalf("Expecting to get version mismatch when applying object") } - status, ok := err.(*errors.StatusError) + status, ok := err.(*apierrors.StatusError) if !ok { t.Fatalf("Expecting to get version mismatch as API error") } diff --git a/test/integration/apiserver/certreload/certreload_test.go b/test/integration/apiserver/certreload/certreload_test.go index a23ae31703c..97bdcfcdfeb 100644 --- a/test/integration/apiserver/certreload/certreload_test.go +++ b/test/integration/apiserver/certreload/certreload_test.go @@ -25,7 +25,7 @@ import ( "testing" "time" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server/dynamiccertificates" @@ -146,7 +146,7 @@ MnVCuBwfwDXCAiEAw/1TA+CjPq9JC5ek1ifR0FybTURjeQqYkKpve1dveps= func waitForConfigMapCAContent(t *testing.T, kubeClient kubernetes.Interface, key, content string, count int) func() (bool, error) { return func() (bool, error) { clusterAuthInfo, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get("extension-apiserver-authentication", metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } if err != nil { diff --git a/test/integration/apiserver/max_json_patch_operations_test.go b/test/integration/apiserver/max_json_patch_operations_test.go index 0785428862b..388a35a9dee 100644 --- a/test/integration/apiserver/max_json_patch_operations_test.go +++ b/test/integration/apiserver/max_json_patch_operations_test.go @@ -22,7 +22,7 @@ import ( "testing" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" @@ -60,7 +60,7 @@ func TestMaxJSONPatchOperations(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } if !strings.Contains(err.Error(), "The allowed maximum operations in a JSON patch is") { diff --git a/test/integration/apiserver/max_request_body_bytes_test.go b/test/integration/apiserver/max_request_body_bytes_test.go index f343a272c0c..f6c2e1b7b63 100644 --- a/test/integration/apiserver/max_request_body_bytes_test.go +++ b/test/integration/apiserver/max_request_body_bytes_test.go @@ -22,7 +22,6 @@ import ( "testing" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -46,7 +45,7 @@ func TestMaxResourceSize(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } @@ -69,7 +68,7 @@ func TestMaxResourceSize(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } @@ -80,7 +79,7 @@ func TestMaxResourceSize(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } @@ -89,7 +88,7 @@ func TestMaxResourceSize(t *testing.T) { patchBody := []byte(`[{"op":"add","path":"/foo","value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}]`) err = rest.Patch(types.JSONPatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). Body(patchBody).Do().Error() - if err != nil && !errors.IsBadRequest(err) { + if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } }) @@ -105,7 +104,7 @@ func TestMaxResourceSize(t *testing.T) { patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.MergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). Body(patchBody).Do().Error() - if err != nil && !errors.IsBadRequest(err) { + if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } }) @@ -121,7 +120,7 @@ func TestMaxResourceSize(t *testing.T) { patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.StrategicMergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). Body(patchBody).Do().Error() - if err != nil && !errors.IsBadRequest(err) { + if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } }) @@ -137,7 +136,7 @@ func TestMaxResourceSize(t *testing.T) { patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). Body(patchBody).Do().Error() - if err != nil && !errors.IsBadRequest(err) { + if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %#v", err) } }) @@ -155,7 +154,7 @@ func TestMaxResourceSize(t *testing.T) { if err == nil { t.Fatalf("unexpected no error") } - if !errors.IsRequestEntityTooLargeError(err) { + if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected requested entity too large err, got %v", err) } diff --git a/test/integration/apiserver/patch_test.go b/test/integration/apiserver/patch_test.go index 6216d5ad842..cf063cd31b1 100644 --- a/test/integration/apiserver/patch_test.go +++ b/test/integration/apiserver/patch_test.go @@ -25,7 +25,7 @@ import ( "github.com/google/uuid" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -86,7 +86,7 @@ func TestPatchConflicts(t *testing.T) { Do(). Get() - if errors.IsConflict(err) { + if apierrors.IsConflict(err) { t.Logf("tolerated conflict error patching %s: %v", "secrets", err) return } diff --git a/test/integration/auth/node_test.go b/test/integration/auth/node_test.go index 6370aee186c..3c5885c7528 100644 --- a/test/integration/auth/node_test.go +++ b/test/integration/auth/node_test.go @@ -28,7 +28,7 @@ import ( policy "k8s.io/api/policy/v1beta1" storagev1 "k8s.io/api/storage/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -638,14 +638,14 @@ func expect(t *testing.T, f func() error, wantErr func(error) bool) (timeout boo func expectForbidden(t *testing.T, f func() error) { t.Helper() - if ok, err := expect(t, f, errors.IsForbidden); !ok { + if ok, err := expect(t, f, apierrors.IsForbidden); !ok { t.Errorf("Expected forbidden error, got %v", err) } } func expectNotFound(t *testing.T, f func() error) { t.Helper() - if ok, err := expect(t, f, errors.IsNotFound); !ok { + if ok, err := expect(t, f, apierrors.IsNotFound); !ok { t.Errorf("Expected notfound error, got %v", err) } } diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index c8ce944ee45..8a1d8f81b4a 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -25,7 +25,7 @@ import ( apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -310,7 +310,7 @@ func validateDaemonSetPodsAndMarkReady( func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } if err != nil { diff --git a/test/integration/dryrun/dryrun_test.go b/test/integration/dryrun/dryrun_test.go index 730f887f085..d3edf307488 100644 --- a/test/integration/dryrun/dryrun_test.go +++ b/test/integration/dryrun/dryrun_test.go @@ -21,7 +21,7 @@ import ( v1 "k8s.io/api/core/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -55,7 +55,7 @@ func DryRunCreateTest(t *testing.T, rsc dynamic.ResourceInterface, obj *unstruct obj.GroupVersionKind()) } - if _, err := rsc.Get(obj.GetName(), metav1.GetOptions{}); !errors.IsNotFound(err) { + if _, err := rsc.Get(obj.GetName(), metav1.GetOptions{}); !apierrors.IsNotFound(err) { t.Fatalf("object shouldn't exist: %v", err) } } @@ -92,7 +92,7 @@ func getReplicasOrFail(t *testing.T, obj *unstructured.Unstructured) int64 { func DryRunScalePatchTest(t *testing.T, rsc dynamic.ResourceInterface, name string) { obj, err := rsc.Get(name, metav1.GetOptions{}, "scale") - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return } if err != nil { @@ -119,7 +119,7 @@ func DryRunScalePatchTest(t *testing.T, rsc dynamic.ResourceInterface, name stri func DryRunScaleUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) { obj, err := rsc.Get(name, metav1.GetOptions{}, "scale") - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return } if err != nil { @@ -156,7 +156,7 @@ func DryRunUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) } obj.SetAnnotations(map[string]string{"update": "true"}) obj, err = rsc.Update(obj, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}}) - if err == nil || !errors.IsConflict(err) { + if err == nil || !apierrors.IsConflict(err) { break } } diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index a0be6e50287..da3cd0e89bb 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/intstr" @@ -114,9 +114,9 @@ func TestConcurrentEvictionRequests(t *testing.T) { err := wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction) switch { - case errors.IsTooManyRequests(e): + case apierrors.IsTooManyRequests(e): return false, nil - case errors.IsConflict(e): + case apierrors.IsConflict(e): return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e) case e == nil: return true, nil @@ -132,7 +132,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { _, err = clientSet.CoreV1().Pods(ns.Name).Get(podName, metav1.GetOptions{}) switch { - case errors.IsNotFound(err): + case apierrors.IsNotFound(err): atomic.AddUint32(&numberPodsEvicted, 1) // pod was evicted and deleted so return from goroutine immediately return @@ -222,9 +222,9 @@ func TestTerminalPodEviction(t *testing.T) { err = wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction) switch { - case errors.IsTooManyRequests(e): + case apierrors.IsTooManyRequests(e): return false, nil - case errors.IsConflict(e): + case apierrors.IsConflict(e): return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e) case e == nil: return true, nil diff --git a/test/integration/garbagecollector/cluster_scoped_owner_test.go b/test/integration/garbagecollector/cluster_scoped_owner_test.go index 1f8092f7890..b1a68f812b6 100644 --- a/test/integration/garbagecollector/cluster_scoped_owner_test.go +++ b/test/integration/garbagecollector/cluster_scoped_owner_test.go @@ -24,7 +24,7 @@ import ( "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -119,7 +119,7 @@ func TestClusterScopedOwners(t *testing.T) { if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) { _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-missing", metav1.GetOptions{}) switch { - case errors.IsNotFound(err): + case apierrors.IsNotFound(err): return true, nil case err != nil: return false, err diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index 6f3e97f79df..f78060b5ea8 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -28,7 +28,7 @@ import ( apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -698,7 +698,7 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) { if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) { _, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -766,7 +766,7 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) { if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) { _, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -843,7 +843,7 @@ func TestDoubleDeletionWithFinalizer(t *testing.T) { } if err := wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) { _, err := podClient.Get(pod.Name, metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("Failed waiting for pod %q to be deleted", pod.Name) } @@ -950,7 +950,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) { // Ensure the owner is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for owner resource %q to be deleted", owner.GetName()) } @@ -960,7 +960,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) { if err == nil { t.Fatalf("expected dependent %q to be deleted", dependent.GetName()) } else { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { t.Fatalf("unexpected error getting dependent %q: %v", dependent.GetName(), err) } } @@ -1028,7 +1028,7 @@ func TestMixedRelationships(t *testing.T) { // Ensure the owner is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := resourceClient.Get(customOwner.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for owner resource %q to be deleted", customOwner.GetName()) } @@ -1038,7 +1038,7 @@ func TestMixedRelationships(t *testing.T) { if err == nil { t.Fatalf("expected dependent %q to be deleted", coreDependent.GetName()) } else { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { t.Fatalf("unexpected error getting dependent %q: %v", coreDependent.GetName(), err) } } @@ -1052,7 +1052,7 @@ func TestMixedRelationships(t *testing.T) { // Ensure the owner is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := configMapClient.Get(coreOwner.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for owner resource %q to be deleted", coreOwner.GetName()) } @@ -1062,7 +1062,7 @@ func TestMixedRelationships(t *testing.T) { if err == nil { t.Fatalf("expected dependent %q to be deleted", customDependent.GetName()) } else { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { t.Fatalf("unexpected error getting dependent %q: %v", customDependent.GetName(), err) } } @@ -1123,7 +1123,7 @@ func testCRDDeletion(t *testing.T, ctx *testContext, ns *v1.Namespace, definitio // Ensure the owner is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for owner %q to be deleted", owner.GetName()) } @@ -1131,7 +1131,7 @@ func testCRDDeletion(t *testing.T, ctx *testContext, ns *v1.Namespace, definitio // Ensure the dependent is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { _, err := configMapClient.Get(dependent.GetName(), metav1.GetOptions{}) - return errors.IsNotFound(err), nil + return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for dependent %q (owned by %q) to be deleted", dependent.GetName(), owner.GetName()) } diff --git a/test/integration/ipamperf/util.go b/test/integration/ipamperf/util.go index 1b6e25b4874..883f6dd0df3 100644 --- a/test/integration/ipamperf/util.go +++ b/test/integration/ipamperf/util.go @@ -20,7 +20,7 @@ import ( "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -78,7 +78,7 @@ func createNodes(apiURL string, config *Config) error { for i := 0; i < config.NumNodes; i++ { var err error for j := 0; j < maxCreateRetries; j++ { - if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && errors.IsServerTimeout(err) { + if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && apierrors.IsServerTimeout(err) { klog.Infof("Server timeout creating nodes, retrying after %v", retryDelay) time.Sleep(retryDelay) continue diff --git a/test/integration/master/audit_dynamic_test.go b/test/integration/master/audit_dynamic_test.go index 81bdc22979b..c7b27a6bd1c 100644 --- a/test/integration/master/audit_dynamic_test.go +++ b/test/integration/master/audit_dynamic_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" auditinternal "k8s.io/apiserver/pkg/apis/audit" @@ -231,7 +231,7 @@ func sinkHealth(t *testing.T, kubeclient kubernetes.Interface, servers ...*utils // corresponding expected audit event func simpleOp(name string, kubeclient kubernetes.Interface) ([]utils.AuditEvent, error) { _, err := kubeclient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return nil, err } diff --git a/test/integration/master/synthetic_master_test.go b/test/integration/master/synthetic_master_test.go index ec89466fdcd..f9f018f6a6b 100644 --- a/test/integration/master/synthetic_master_test.go +++ b/test/integration/master/synthetic_master_test.go @@ -36,7 +36,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/group" @@ -102,7 +102,7 @@ func TestKubernetesService(t *testing.T) { defer closeFn() coreClient := clientset.NewForConfigOrDie(config.GenericConfig.LoopbackClientConfig) err := wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { - if _, err := coreClient.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}); err != nil && errors.IsNotFound(err) { + if _, err := coreClient.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}); err != nil && apierrors.IsNotFound(err) { return false, nil } else if err != nil { return false, err @@ -682,10 +682,10 @@ func TestServiceAlloc(t *testing.T) { // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return false, err } - return !errors.IsNotFound(err), nil + return !apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("creating kubernetes service timed out") } @@ -864,7 +864,7 @@ func TestUpdateNodeObjects(t *testing.T) { n.Status.Conditions = nil } if _, err := c.Nodes().UpdateStatus(n); err != nil { - if !errors.IsConflict(err) { + if !apierrors.IsConflict(err) { fmt.Printf("[%d] error after %d: %v\n", node, i, err) break } diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index a549629f9e7..fd3f7ebf9d9 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -26,7 +26,7 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" @@ -681,7 +681,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { if err != nil { // If the pod is not found, it means the RS picks the pod for deletion (it is extra) // Verify there is only one pod in namespace and it has ControllerRef to the RS - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 1 { return false, fmt.Errorf("Expected 1 pod in current namespace, got %d", len(pods.Items)) diff --git a/test/integration/replicationcontroller/replicationcontroller_test.go b/test/integration/replicationcontroller/replicationcontroller_test.go index 214ad6be9a2..ba50d8749ef 100644 --- a/test/integration/replicationcontroller/replicationcontroller_test.go +++ b/test/integration/replicationcontroller/replicationcontroller_test.go @@ -24,7 +24,7 @@ import ( "time" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" @@ -654,7 +654,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { if err != nil { // If the pod is not found, it means the RC picks the pod for deletion (it is extra) // Verify there is only one pod in namespace and it has ControllerRef to the RC - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 1 { return false, fmt.Errorf("Expected 1 pod in current namespace, got %d", len(pods.Items)) diff --git a/test/integration/scheduler/predicates_test.go b/test/integration/scheduler/predicates_test.go index 95f20d10bc3..0d57e533840 100644 --- a/test/integration/scheduler/predicates_test.go +++ b/test/integration/scheduler/predicates_test.go @@ -22,7 +22,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -835,7 +835,7 @@ func TestInterPodAffinity(t *testing.T) { } testPod, err := cs.CoreV1().Pods(context.ns.Name).Create(test.pod) if err != nil { - if !(test.errorType == "invalidPod" && errors.IsInvalid(err)) { + if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) { t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test) } } @@ -1017,7 +1017,7 @@ func TestEvenPodsSpreadPredicate(t *testing.T) { } } testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(tt.incomingPod) - if err != nil && !errors.IsInvalid(err) { + if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } diff --git a/test/integration/scheduler/priorities_test.go b/test/integration/scheduler/priorities_test.go index 149de57c23e..05d4e6705ee 100644 --- a/test/integration/scheduler/priorities_test.go +++ b/test/integration/scheduler/priorities_test.go @@ -22,7 +22,7 @@ import ( "testing" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -346,7 +346,7 @@ func TestEvenPodsSpreadPriority(t *testing.T) { } } testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(tt.incomingPod) - if err != nil && !errors.IsInvalid(err) { + if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 68f44c89735..42f1bf479c1 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -26,7 +26,7 @@ import ( v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -283,7 +283,7 @@ func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key stri switch { case err == nil && passFunc(n): return true, nil - case errors.IsNotFound(err): + case apierrors.IsNotFound(err): nodes = append(nodes, nil) case err != nil: t.Errorf("Unexpected error: %v", err) @@ -557,7 +557,7 @@ func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) func podDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } if pod.DeletionTimestamp != nil { @@ -737,7 +737,7 @@ func getPod(cs clientset.Interface, podName string, podNamespace string) (*v1.Po func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) { for _, p := range pods { err := cs.CoreV1().Pods(p.Namespace).Delete(p.Name, metav1.NewDeleteOptions(0)) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { t.Errorf("error while deleting pod %v/%v: %v", p.Namespace, p.Name, err) } } diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index bf29cff7d1e..980ff0cd64b 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -32,7 +32,7 @@ import ( v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -207,7 +207,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { // Create "my" namespace _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -290,13 +290,13 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { // Create "my" namespace _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "other" namespace _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -496,7 +496,7 @@ func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWai var err error err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { user, err = c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } if err != nil { @@ -513,7 +513,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st findToken := func() (bool, error) { user, err := c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return false, nil } if err != nil { @@ -522,7 +522,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st for _, ref := range user.Secrets { secret, err := c.CoreV1().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { continue } if err != nil { @@ -586,8 +586,8 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string for _, op := range readOps { err := op() - unauthorizedError := errors.IsUnauthorized(err) - forbiddenError := errors.IsForbidden(err) + unauthorizedError := apierrors.IsUnauthorized(err) + forbiddenError := apierrors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: @@ -603,8 +603,8 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string for _, op := range writeOps { err := op() - unauthorizedError := errors.IsUnauthorized(err) - forbiddenError := errors.IsForbidden(err) + unauthorizedError := apierrors.IsUnauthorized(err) + forbiddenError := apierrors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: diff --git a/test/integration/utils.go b/test/integration/utils.go index 5a0a6d51b29..e1f6e0092f1 100644 --- a/test/integration/utils.go +++ b/test/integration/utils.go @@ -21,7 +21,7 @@ import ( "time" "google.golang.org/grpc" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/storagebackend" @@ -62,7 +62,7 @@ func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, in if err == nil { return false, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 0f9c8d16b7d..083e792e44e 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -30,7 +30,7 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -127,7 +127,7 @@ func main() { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { if _, err := client.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}); err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return } }