unify alias of api errors under pkg and staging

This commit is contained in:
danielqsj 2019-11-12 16:26:59 +08:00
parent fc738cbb1d
commit 5bc0e26c19
23 changed files with 116 additions and 116 deletions

View File

@ -22,7 +22,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@ -177,7 +177,7 @@ func (c *Publisher) syncNamespace(ns string) error {
cm, err := c.cmLister.ConfigMaps(ns).Get(RootCACertConfigMapName)
switch {
case apierrs.IsNotFound(err):
case apierrors.IsNotFound(err):
_, err := c.client.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: RootCACertConfigMapName,

View File

@ -21,7 +21,7 @@ import (
"time"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -188,7 +188,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
}()
ns, err := c.nsLister.Get(key)
if apierrs.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
@ -204,7 +204,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
switch _, err := c.saLister.ServiceAccounts(ns.Name).Get(sa.Name); {
case err == nil:
continue
case apierrs.IsNotFound(err):
case apierrors.IsNotFound(err):
case err != nil:
return err
}
@ -212,9 +212,9 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
// TODO eliminate this once the fake client can handle creation without NS
sa.Namespace = ns.Name
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) {
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrors.IsAlreadyExists(err) {
// we can safely ignore terminating namespace errors
if !apierrs.HasStatusCause(err, v1.NamespaceTerminatingCause) {
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
createFailures = append(createFailures, err)
}
}

View File

@ -22,7 +22,7 @@ import (
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
@ -423,7 +423,7 @@ func TestProvisionSync(t *testing.T) {
// Inject errors to simulate crashed API server during
// kubeclient.PersistentVolumes.Create()
{Verb: "create", Resource: "persistentvolumes", Error: errors.New("Mock creation error1")},
{Verb: "create", Resource: "persistentvolumes", Error: apierrs.NewAlreadyExists(api.Resource("persistentvolumes"), "")},
{Verb: "create", Resource: "persistentvolumes", Error: apierrors.NewAlreadyExists(api.Resource("persistentvolumes"), "")},
},
wrapTestWithPluginCalls(
nil, // recycle calls

View File

@ -24,7 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
@ -534,16 +534,16 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
// updated to Released state when PVC does not exist.
if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed {
obj, err = ctrl.claimLister.PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name)
if err != nil && !apierrs.IsNotFound(err) {
if err != nil && !apierrors.IsNotFound(err) {
return err
}
found = !apierrs.IsNotFound(err)
found = !apierrors.IsNotFound(err)
if !found {
obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name, metav1.GetOptions{})
if err != nil && !apierrs.IsNotFound(err) {
if err != nil && !apierrors.IsNotFound(err) {
return err
}
found = !apierrs.IsNotFound(err)
found = !apierrors.IsNotFound(err)
}
}
}
@ -1391,7 +1391,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
pvName := ctrl.getProvisionedVolumeNameForClaim(claim)
volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil && !apierrs.IsNotFound(err) {
if err != nil && !apierrors.IsNotFound(err) {
klog.V(3).Infof("error reading persistent volume %q: %v", pvName, err)
return pluginName, err
}
@ -1489,7 +1489,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
var newVol *v1.PersistentVolume
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) {
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrors.IsAlreadyExists(err) {
// Save succeeded.
if err != nil {
klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))

View File

@ -24,7 +24,7 @@ import (
"sync"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -223,7 +223,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
return true, volume.DeepCopy(), nil
}
klog.V(4).Infof("GetVolume: volume %s not found", name)
return true, nil, apierrs.NewNotFound(action.GetResource().GroupResource(), name)
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name)
case action.Matches("get", "persistentvolumeclaims"):
name := action.(core.GetAction).GetName()
@ -233,7 +233,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
return true, claim.DeepCopy(), nil
}
klog.V(4).Infof("GetClaim: claim %s not found", name)
return true, nil, apierrs.NewNotFound(action.GetResource().GroupResource(), name)
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name)
case action.Matches("delete", "persistentvolumes"):
name := action.(core.DeleteAction).GetName()

View File

@ -21,7 +21,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -150,7 +150,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
}()
pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
if apierrs.IsNotFound(err) {
if apierrors.IsNotFound(err) {
klog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName)
return nil
}

View File

@ -21,7 +21,7 @@ import (
"time"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
@ -127,7 +127,7 @@ func (c *Controller) processPV(pvName string) error {
}()
pv, err := c.pvLister.Get(pvName)
if apierrs.IsNotFound(err) {
if apierrors.IsNotFound(err) {
klog.V(4).Infof("PV %s not found, ignoring", pvName)
return nil
}

View File

@ -20,7 +20,7 @@ import (
"context"
"fmt"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/authorization/authorizer"
@ -50,17 +50,17 @@ func (r *REST) New() runtime.Object {
func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
localSubjectAccessReview, ok := obj.(*authorizationapi.LocalSubjectAccessReview)
if !ok {
return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a LocaLocalSubjectAccessReview: %#v", obj))
return nil, apierrors.NewBadRequest(fmt.Sprintf("not a LocaLocalSubjectAccessReview: %#v", obj))
}
if errs := authorizationvalidation.ValidateLocalSubjectAccessReview(localSubjectAccessReview); len(errs) > 0 {
return nil, kapierrors.NewInvalid(authorizationapi.Kind(localSubjectAccessReview.Kind), "", errs)
return nil, apierrors.NewInvalid(authorizationapi.Kind(localSubjectAccessReview.Kind), "", errs)
}
namespace := genericapirequest.NamespaceValue(ctx)
if len(namespace) == 0 {
return nil, kapierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace))
return nil, apierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace))
}
if namespace != localSubjectAccessReview.Namespace {
return nil, kapierrors.NewBadRequest(fmt.Sprintf("spec.resourceAttributes.namespace must match namespace: %v", namespace))
return nil, apierrors.NewBadRequest(fmt.Sprintf("spec.resourceAttributes.namespace must match namespace: %v", namespace))
}
if createValidation != nil {

View File

@ -20,7 +20,7 @@ import (
"context"
"fmt"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/authorization/authorizer"
@ -49,10 +49,10 @@ func (r *REST) New() runtime.Object {
func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
subjectAccessReview, ok := obj.(*authorizationapi.SubjectAccessReview)
if !ok {
return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a SubjectAccessReview: %#v", obj))
return nil, apierrors.NewBadRequest(fmt.Sprintf("not a SubjectAccessReview: %#v", obj))
}
if errs := authorizationvalidation.ValidateSubjectAccessReview(subjectAccessReview); len(errs) > 0 {
return nil, kapierrors.NewInvalid(authorizationapi.Kind(subjectAccessReview.Kind), "", errs)
return nil, apierrors.NewInvalid(authorizationapi.Kind(subjectAccessReview.Kind), "", errs)
}
if createValidation != nil {

View File

@ -22,7 +22,7 @@ import (
"fmt"
"sync"
k8serr "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/registry/generic"
@ -209,10 +209,10 @@ func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error {
switch {
case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0:
if snapshot.ResourceVersion != existing.ResourceVersion {
return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("the provided resource version does not match"))
return nil, apierrors.NewConflict(e.resource, "", fmt.Errorf("the provided resource version does not match"))
}
case len(existing.ResourceVersion) != 0:
return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("another caller has already initialized the resource"))
return nil, apierrors.NewConflict(e.resource, "", fmt.Errorf("another caller has already initialized the resource"))
}
last = snapshot.ResourceVersion
return snapshot, nil

View File

@ -30,7 +30,7 @@ import (
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
@ -108,7 +108,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
_, err = c.k8s.StorageV1().VolumeAttachments().Create(attachment)
alreadyExist := false
if err != nil {
if !apierrs.IsAlreadyExists(err) {
if !apierrors.IsAlreadyExists(err) {
return "", errors.New(log("attacher.Attach failed: %v", err))
}
alreadyExist = true
@ -388,7 +388,7 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
}
if err := c.k8s.StorageV1().VolumeAttachments().Delete(attachID, nil); err != nil {
if apierrs.IsNotFound(err) {
if apierrors.IsNotFound(err) {
// object deleted or never existed, done
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
return nil
@ -415,7 +415,7 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
if apierrors.IsNotFound(err) {
//object deleted or never existed, done
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
return nil

View File

@ -28,7 +28,7 @@ import (
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -83,7 +83,7 @@ func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.R
for i := 0; i < 100; i++ {
attach, err = client.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
if apierrors.IsNotFound(err) {
<-ticker.C
continue
}
@ -225,7 +225,7 @@ func TestAttacherAttach(t *testing.T) {
status.AttachError = &storage.VolumeError{
Message: "attacher error",
}
errStatus := apierrs.NewInternalError(fmt.Errorf("we got an error")).Status()
errStatus := apierrors.NewInternalError(fmt.Errorf("we got an error")).Status()
fakeWatcher.Error(&errStatus)
} else {
status.Attached = true
@ -921,7 +921,7 @@ func TestAttacherDetach(t *testing.T) {
reactor: func(action core.Action) (handled bool, ret runtime.Object, err error) {
// return Forbidden to all DELETE requests
if action.Matches("delete", "volumeattachments") {
return true, nil, apierrs.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error"))
return true, nil, apierrors.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error"))
}
return false, nil, nil
},
@ -971,7 +971,7 @@ func TestAttacherDetach(t *testing.T) {
csiAttacher.waitSleepTime = 100 * time.Millisecond
go func() {
if watchError {
errStatus := apierrs.NewInternalError(fmt.Errorf("we got an error")).Status()
errStatus := apierrors.NewInternalError(fmt.Errorf("we got an error")).Status()
fakeWatcher.Error(&errStatus)
return
}
@ -986,7 +986,7 @@ func TestAttacherDetach(t *testing.T) {
}
attach, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
if err != nil {
if !apierrs.IsNotFound(err) {
if !apierrors.IsNotFound(err) {
t.Fatalf("unexpected err: %v", err)
}
} else {

View File

@ -30,7 +30,7 @@ import (
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
@ -304,7 +304,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) {
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
if err != nil {
if apierrs.IsNotFound(err) {
if apierrors.IsNotFound(err) {
klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName))
return nil, nil
}

View File

@ -30,7 +30,7 @@ import (
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -742,7 +742,7 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) {
}
csiDriver, err := p.csiDriverLister.Get(driver)
if err != nil {
if apierrs.IsNotFound(err) {
if apierrors.IsNotFound(err) {
// Don't skip attach if CSIDriver does not exist
return false, nil
}
@ -779,7 +779,7 @@ func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storag
}
c, err := p.csiDriverLister.Get(driver)
if err != nil && !apierrs.IsNotFound(err) {
if err != nil && !apierrors.IsNotFound(err) {
// Some internal error.
return err
}

View File

@ -41,7 +41,7 @@ import (
fuzzer "k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
@ -2802,7 +2802,7 @@ func TestGetNamespaceSelfLink(t *testing.T) {
func TestGetMissing(t *testing.T) {
storage := map[string]rest.Storage{}
simpleStorage := SimpleRESTStorage{
errors: map[string]error{"get": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")},
errors: map[string]error{"get": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")},
}
storage["simple"] = &simpleStorage
handler := handle(storage)
@ -2822,7 +2822,7 @@ func TestGetMissing(t *testing.T) {
func TestGetRetryAfter(t *testing.T) {
storage := map[string]rest.Storage{}
simpleStorage := SimpleRESTStorage{
errors: map[string]error{"get": apierrs.NewServerTimeout(schema.GroupResource{Resource: "simples"}, "id", 2)},
errors: map[string]error{"get": apierrors.NewServerTimeout(schema.GroupResource{Resource: "simples"}, "id", 2)},
}
storage["simple"] = &simpleStorage
handler := handle(storage)
@ -2925,7 +2925,7 @@ func TestConnectResponderError(t *testing.T) {
connectStorage := &ConnecterRESTStorage{}
connectStorage.handlerFunc = func() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
connectStorage.receivedResponder.Error(apierrs.NewForbidden(schema.GroupResource{Resource: "simples"}, itemID, errors.New("you are terminated")))
connectStorage.receivedResponder.Error(apierrors.NewForbidden(schema.GroupResource{Resource: "simples"}, itemID, errors.New("you are terminated")))
})
}
storage := map[string]rest.Storage{
@ -3271,7 +3271,7 @@ func TestDeleteMissing(t *testing.T) {
storage := map[string]rest.Storage{}
ID := "id"
simpleStorage := SimpleRESTStorage{
errors: map[string]error{"delete": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)},
errors: map[string]error{"delete": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)},
}
storage["simple"] = &simpleStorage
handler := handle(storage)
@ -3543,7 +3543,7 @@ func TestUpdateMissing(t *testing.T) {
storage := map[string]rest.Storage{}
ID := "id"
simpleStorage := SimpleRESTStorage{
errors: map[string]error{"update": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)},
errors: map[string]error{"update": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)},
}
storage["simple"] = &simpleStorage
handler := handle(storage)
@ -3581,7 +3581,7 @@ func TestCreateNotFound(t *testing.T) {
"simple": &SimpleRESTStorage{
// storage.Create can fail with not found error in theory.
// See http://pr.k8s.io/486#discussion_r15037092.
errors: map[string]error{"create": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")},
errors: map[string]error{"create": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")},
},
})
server := httptest.NewServer(handler)
@ -4217,7 +4217,7 @@ func expectAPIStatus(t *testing.T, method, url string, data []byte, code int) *m
func TestDelayReturnsError(t *testing.T) {
storage := SimpleRESTStorage{
injectedFunction: func(obj runtime.Object) (runtime.Object, error) {
return nil, apierrs.NewAlreadyExists(schema.GroupResource{Resource: "foos"}, "bar")
return nil, apierrors.NewAlreadyExists(schema.GroupResource{Resource: "foos"}, "bar")
},
}
handler := handle(map[string]rest.Storage{"foo": &storage})

View File

@ -24,7 +24,7 @@ import (
"sync"
"time"
kubeerr "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/validation/path"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
@ -220,13 +220,13 @@ func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string,
key := NamespaceKeyRootFunc(ctx, prefix)
ns, ok := genericapirequest.NamespaceFrom(ctx)
if !ok || len(ns) == 0 {
return "", kubeerr.NewBadRequest("Namespace parameter required.")
return "", apierrors.NewBadRequest("Namespace parameter required.")
}
if len(name) == 0 {
return "", kubeerr.NewBadRequest("Name parameter required.")
return "", apierrors.NewBadRequest("Name parameter required.")
}
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
}
key = key + "/" + name
return key, nil
@ -236,10 +236,10 @@ func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string,
// to a resource relative to the given prefix without a namespace.
func NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) {
if len(name) == 0 {
return "", kubeerr.NewBadRequest("Name parameter required.")
return "", apierrors.NewBadRequest("Name parameter required.")
}
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
}
key := prefix + "/" + name
return key, nil
@ -363,7 +363,7 @@ func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation
if err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil {
err = storeerr.InterpretCreateError(err, qualifiedResource, name)
err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
if !kubeerr.IsAlreadyExists(err) {
if !apierrors.IsAlreadyExists(err) {
return nil, err
}
if errGet := e.Storage.Get(ctx, key, "", out, false); errGet != nil {
@ -374,7 +374,7 @@ func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation
return nil, err
}
if accessor.GetDeletionTimestamp() != nil {
msg := &err.(*kubeerr.StatusError).ErrStatus.Message
msg := &err.(*apierrors.StatusError).ErrStatus.Message
*msg = fmt.Sprintf("object is being deleted: %s", *msg)
}
return nil, err
@ -493,7 +493,7 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj
}
if version == 0 {
if !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate {
return nil, nil, kubeerr.NewNotFound(qualifiedResource, name)
return nil, nil, apierrors.NewNotFound(qualifiedResource, name)
}
creating = true
creatingObj = obj
@ -533,10 +533,10 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj
// leave the Kind field empty. See the discussion in #18526.
qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource}
fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), resourceVersion, "must be specified for an update")}
return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList)
return nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList)
}
if resourceVersion != version {
return nil, nil, kubeerr.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))
return nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))
}
}
if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
@ -916,7 +916,7 @@ func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.V
// check if obj has pending finalizers
accessor, err := meta.Accessor(obj)
if err != nil {
return nil, false, kubeerr.NewInternalError(err)
return nil, false, apierrors.NewInternalError(err)
}
pendingFinalizers := len(accessor.GetFinalizers()) != 0
var ignoreNotFound bool
@ -1038,7 +1038,7 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali
errs <- err
return
}
if _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !kubeerr.IsNotFound(err) {
if _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !apierrors.IsNotFound(err) {
klog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err)
errs <- err
return

View File

@ -25,7 +25,7 @@ import (
"strings"
"sync"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/storage"
@ -332,10 +332,10 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) {
func transformErrorToEvent(err error) *watch.Event {
err = interpretWatchError(err)
if _, ok := err.(apierrs.APIStatus); !ok {
err = apierrs.NewInternalError(err)
if _, ok := err.(apierrors.APIStatus); !ok {
err = apierrors.NewInternalError(err)
}
status := err.(apierrs.APIStatus).Status()
status := err.(apierrors.APIStatus).Status()
return &watch.Event{
Type: watch.Error,
Object: &status,

View File

@ -26,7 +26,7 @@ import (
"sync"
"time"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -375,7 +375,7 @@ loop:
break loop
}
if event.Type == watch.Error {
return apierrs.FromObject(event.Object)
return apierrors.FromObject(event.Object)
}
if r.expectedType != nil {
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
@ -479,9 +479,9 @@ func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) {
}
func isExpiredError(err error) bool {
// In Kubernetes 1.17 and earlier, the api server returns both apierrs.StatusReasonExpired and
// apierrs.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
// and always returns apierrs.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone
// In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and
// apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
// and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrors.IsGone
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
return apierrs.IsResourceExpired(err) || apierrs.IsGone(err)
return apierrors.IsResourceExpired(err) || apierrors.IsGone(err)
}

View File

@ -26,7 +26,7 @@ import (
"time"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
@ -520,7 +520,7 @@ func TestReflectorExpiredExactResourceVersion(t *testing.T) {
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "10"}, Items: pods[0:4]}, nil
case "10":
// When watch cache is disabled, if the exact ResourceVersion requested is not available, a "Expired" error is returned.
return nil, apierrs.NewResourceExpired("The resourceVersion for the provided watch is too old.")
return nil, apierrors.NewResourceExpired("The resourceVersion for the provided watch is too old.")
case "":
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "11"}, Items: pods[0:8]}, nil
default:
@ -584,7 +584,7 @@ func TestReflectorFullListIfExpired(t *testing.T) {
return &v1.PodList{ListMeta: metav1.ListMeta{Continue: "C1", ResourceVersion: "11"}, Items: pods[0:4]}, nil
// second page of the above list
case rvContinueLimit("", "C1", 4):
return nil, apierrs.NewResourceExpired("The resourceVersion for the provided watch is too old.")
return nil, apierrors.NewResourceExpired("The resourceVersion for the provided watch is too old.")
// rv=10 unlimited list
case rvContinueLimit("10", "", 0):
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "11"}, Items: pods[0:8]}, nil

View File

@ -34,7 +34,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kubeerr "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
@ -651,7 +651,7 @@ func TestApplyRetry(t *testing.T) {
case p == pathRC && m == "PATCH":
if firstPatch {
firstPatch = false
statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first"))
statusErr := apierrors.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first"))
bodyBytes, _ := json.Marshal(statusErr)
bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes))
return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil
@ -1278,7 +1278,7 @@ func TestForceApply(t *testing.T) {
case strings.HasSuffix(p, pathRC) && m == "PATCH":
counts["patch"]++
if counts["patch"] <= 6 {
statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first"))
statusErr := apierrors.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first"))
bodyBytes, _ := json.Marshal(statusErr)
bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes))
return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil

View File

@ -27,7 +27,7 @@ import (
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -483,7 +483,7 @@ func (o *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e
Do()
if o.IgnoreNotFound {
r.IgnoreErrors(kapierrors.IsNotFound)
r.IgnoreErrors(apierrors.IsNotFound)
}
if err := r.Err(); err != nil {
return err

View File

@ -29,7 +29,7 @@ import (
jsonpatch "github.com/evanphx/json-patch"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
kerrors "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -60,10 +60,10 @@ type debugError interface {
// source is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource.
func AddSourceToErr(verb string, source string, err error) error {
if source != "" {
if statusError, ok := err.(kerrors.APIStatus); ok {
if statusError, ok := err.(apierrors.APIStatus); ok {
status := statusError.Status()
status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message)
return &kerrors.StatusError{ErrStatus: status}
return &apierrors.StatusError{ErrStatus: status}
}
return fmt.Errorf("error when %s %q: %v", verb, source, err)
}
@ -129,8 +129,8 @@ func checkErr(err error, handleErr func(string, int)) {
switch {
case err == ErrExit:
handleErr("", DefaultErrorExitCode)
case kerrors.IsInvalid(err):
details := err.(*kerrors.StatusError).Status().Details
case apierrors.IsInvalid(err):
details := err.(*apierrors.StatusError).Status().Details
s := "The request is invalid"
if details == nil {
handleErr(s, DefaultErrorExitCode)
@ -202,7 +202,7 @@ func StandardErrorMessage(err error) (string, bool) {
if debugErr, ok := err.(debugError); ok {
klog.V(4).Infof(debugErr.DebugError())
}
status, isStatus := err.(kerrors.APIStatus)
status, isStatus := err.(apierrors.APIStatus)
switch {
case isStatus:
switch s := status.Status(); {
@ -213,7 +213,7 @@ func StandardErrorMessage(err error) (string, bool) {
default:
return fmt.Sprintf("Error from server: %s", err.Error()), true
}
case kerrors.IsUnexpectedObjectError(err):
case apierrors.IsUnexpectedObjectError(err):
return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true
}
switch t := err.(type) {
@ -259,7 +259,7 @@ func MultilineError(prefix string, err error) string {
// Returns true if a case exists to handle the error type, or false otherwise.
func PrintErrorWithCauses(err error, errOut io.Writer) bool {
switch t := err.(type) {
case *kerrors.StatusError:
case *apierrors.StatusError:
errorDetails := t.Status().Details
if errorDetails != nil {
fmt.Fprintf(errOut, "error: %s %q is invalid\n\n", errorDetails.Kind, errorDetails.Name)

View File

@ -23,7 +23,7 @@ import (
"time"
autoscalingv1 "k8s.io/api/autoscaling/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "k8s.io/apimachinery/pkg/apis/testapigroup/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -58,8 +58,8 @@ var (
)
func TestReplicationControllerScaleRetry(t *testing.T) {
verbsOnError := map[string]*kerrors.StatusError{
"patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
verbsOnError := map[string]*apierrors.StatusError{
"patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil),
}
scaleClientExpectedAction := []string{"patch", "get"}
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, verbsOnError)
@ -94,8 +94,8 @@ func TestReplicationControllerScaleRetry(t *testing.T) {
}
func TestReplicationControllerScaleInvalid(t *testing.T) {
verbsOnError := map[string]*kerrors.StatusError{
"patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
verbsOnError := map[string]*apierrors.StatusError{
"patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil),
}
scaleClientExpectedAction := []string{"patch"}
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 1, verbsOnError)
@ -168,8 +168,8 @@ func TestReplicationControllerScaleFailsPreconditions(t *testing.T) {
}
func TestDeploymentScaleRetry(t *testing.T) {
verbsOnError := map[string]*kerrors.StatusError{
"patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
verbsOnError := map[string]*apierrors.StatusError{
"patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil),
}
scaleClientExpectedAction := []string{"patch", "get"}
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
@ -226,8 +226,8 @@ func TestDeploymentScale(t *testing.T) {
func TestDeploymentScaleInvalid(t *testing.T) {
scaleClientExpectedAction := []string{"patch"}
verbsOnError := map[string]*kerrors.StatusError{
"patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
verbsOnError := map[string]*apierrors.StatusError{
"patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil),
}
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
scaler := NewScaler(scaleClient)
@ -299,8 +299,8 @@ func TestStatefulSetScale(t *testing.T) {
func TestStatefulSetScaleRetry(t *testing.T) {
scaleClientExpectedAction := []string{"patch", "get"}
verbsOnError := map[string]*kerrors.StatusError{
"patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
verbsOnError := map[string]*apierrors.StatusError{
"patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil),
}
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
scaler := NewScaler(scaleClient)
@ -335,8 +335,8 @@ func TestStatefulSetScaleRetry(t *testing.T) {
func TestStatefulSetScaleInvalid(t *testing.T) {
scaleClientExpectedAction := []string{"patch"}
verbsOnError := map[string]*kerrors.StatusError{
"patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
verbsOnError := map[string]*apierrors.StatusError{
"patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil),
}
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
scaler := NewScaler(scaleClient)
@ -407,8 +407,8 @@ func TestReplicaSetScale(t *testing.T) {
}
func TestReplicaSetScaleRetry(t *testing.T) {
verbsOnError := map[string]*kerrors.StatusError{
"patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
verbsOnError := map[string]*apierrors.StatusError{
"patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil),
}
scaleClientExpectedAction := []string{"patch", "get"}
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
@ -443,8 +443,8 @@ func TestReplicaSetScaleRetry(t *testing.T) {
}
func TestReplicaSetScaleInvalid(t *testing.T) {
verbsOnError := map[string]*kerrors.StatusError{
"patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
verbsOnError := map[string]*apierrors.StatusError{
"patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil),
}
scaleClientExpectedAction := []string{"patch"}
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
@ -688,12 +688,12 @@ func TestGenericScale(t *testing.T) {
}
}
func createFakeScaleClient(resource string, resourceName string, replicas int, errorsOnVerb map[string]*kerrors.StatusError) *fakescale.FakeScaleClient {
shouldReturnAnError := func(verb string) (*kerrors.StatusError, bool) {
func createFakeScaleClient(resource string, resourceName string, replicas int, errorsOnVerb map[string]*apierrors.StatusError) *fakescale.FakeScaleClient {
shouldReturnAnError := func(verb string) (*apierrors.StatusError, bool) {
if anError, anErrorExists := errorsOnVerb[verb]; anErrorExists {
return anError, true
}
return &kerrors.StatusError{}, false
return &apierrors.StatusError{}, false
}
newReplicas := int32(replicas)
scaleClient := &fakescale.FakeScaleClient{}