mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 08:17:26 +00:00
Merge pull request #85123 from danielqsj/apierrs
Unify aliases for "k8s.io/apimachinery/pkg/api/errors"
This commit is contained in:
commit
4158e7c1de
@ -22,7 +22,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -177,7 +177,7 @@ func (c *Publisher) syncNamespace(ns string) error {
|
|||||||
|
|
||||||
cm, err := c.cmLister.ConfigMaps(ns).Get(RootCACertConfigMapName)
|
cm, err := c.cmLister.ConfigMaps(ns).Get(RootCACertConfigMapName)
|
||||||
switch {
|
switch {
|
||||||
case apierrs.IsNotFound(err):
|
case apierrors.IsNotFound(err):
|
||||||
_, err := c.client.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
|
_, err := c.client.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: RootCACertConfigMapName,
|
Name: RootCACertConfigMapName,
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
@ -188,7 +188,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
ns, err := c.nsLister.Get(key)
|
ns, err := c.nsLister.Get(key)
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -204,7 +204,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
|
|||||||
switch _, err := c.saLister.ServiceAccounts(ns.Name).Get(sa.Name); {
|
switch _, err := c.saLister.ServiceAccounts(ns.Name).Get(sa.Name); {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
continue
|
continue
|
||||||
case apierrs.IsNotFound(err):
|
case apierrors.IsNotFound(err):
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -212,9 +212,9 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
|
|||||||
// TODO eliminate this once the fake client can handle creation without NS
|
// TODO eliminate this once the fake client can handle creation without NS
|
||||||
sa.Namespace = ns.Name
|
sa.Namespace = ns.Name
|
||||||
|
|
||||||
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) {
|
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
// we can safely ignore terminating namespace errors
|
// we can safely ignore terminating namespace errors
|
||||||
if !apierrs.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||||
createFailures = append(createFailures, err)
|
createFailures = append(createFailures, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
@ -423,7 +423,7 @@ func TestProvisionSync(t *testing.T) {
|
|||||||
// Inject errors to simulate crashed API server during
|
// Inject errors to simulate crashed API server during
|
||||||
// kubeclient.PersistentVolumes.Create()
|
// kubeclient.PersistentVolumes.Create()
|
||||||
{Verb: "create", Resource: "persistentvolumes", Error: errors.New("Mock creation error1")},
|
{Verb: "create", Resource: "persistentvolumes", Error: errors.New("Mock creation error1")},
|
||||||
{Verb: "create", Resource: "persistentvolumes", Error: apierrs.NewAlreadyExists(api.Resource("persistentvolumes"), "")},
|
{Verb: "create", Resource: "persistentvolumes", Error: apierrors.NewAlreadyExists(api.Resource("persistentvolumes"), "")},
|
||||||
},
|
},
|
||||||
wrapTestWithPluginCalls(
|
wrapTestWithPluginCalls(
|
||||||
nil, // recycle calls
|
nil, // recycle calls
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -534,16 +534,16 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
|
|||||||
// updated to Released state when PVC does not exist.
|
// updated to Released state when PVC does not exist.
|
||||||
if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed {
|
if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed {
|
||||||
obj, err = ctrl.claimLister.PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name)
|
obj, err = ctrl.claimLister.PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
found = !apierrs.IsNotFound(err)
|
found = !apierrors.IsNotFound(err)
|
||||||
if !found {
|
if !found {
|
||||||
obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name, metav1.GetOptions{})
|
obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name, metav1.GetOptions{})
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
found = !apierrs.IsNotFound(err)
|
found = !apierrors.IsNotFound(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1391,7 +1391,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
|
|||||||
|
|
||||||
pvName := ctrl.getProvisionedVolumeNameForClaim(claim)
|
pvName := ctrl.getProvisionedVolumeNameForClaim(claim)
|
||||||
volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
klog.V(3).Infof("error reading persistent volume %q: %v", pvName, err)
|
klog.V(3).Infof("error reading persistent volume %q: %v", pvName, err)
|
||||||
return pluginName, err
|
return pluginName, err
|
||||||
}
|
}
|
||||||
@ -1489,7 +1489,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
|
|||||||
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
|
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
|
||||||
klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
|
klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
|
||||||
var newVol *v1.PersistentVolume
|
var newVol *v1.PersistentVolume
|
||||||
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) {
|
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrors.IsAlreadyExists(err) {
|
||||||
// Save succeeded.
|
// Save succeeded.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))
|
klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -223,7 +223,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
|||||||
return true, volume.DeepCopy(), nil
|
return true, volume.DeepCopy(), nil
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("GetVolume: volume %s not found", name)
|
klog.V(4).Infof("GetVolume: volume %s not found", name)
|
||||||
return true, nil, apierrs.NewNotFound(action.GetResource().GroupResource(), name)
|
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name)
|
||||||
|
|
||||||
case action.Matches("get", "persistentvolumeclaims"):
|
case action.Matches("get", "persistentvolumeclaims"):
|
||||||
name := action.(core.GetAction).GetName()
|
name := action.(core.GetAction).GetName()
|
||||||
@ -233,7 +233,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
|||||||
return true, claim.DeepCopy(), nil
|
return true, claim.DeepCopy(), nil
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("GetClaim: claim %s not found", name)
|
klog.V(4).Infof("GetClaim: claim %s not found", name)
|
||||||
return true, nil, apierrs.NewNotFound(action.GetResource().GroupResource(), name)
|
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name)
|
||||||
|
|
||||||
case action.Matches("delete", "persistentvolumes"):
|
case action.Matches("delete", "persistentvolumes"):
|
||||||
name := action.(core.DeleteAction).GetName()
|
name := action.(core.DeleteAction).GetName()
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
@ -150,7 +150,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
|
pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
klog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName)
|
klog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
@ -127,7 +127,7 @@ func (c *Controller) processPV(pvName string) error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
pv, err := c.pvLister.Get(pvName)
|
pv, err := c.pvLister.Get(pvName)
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
klog.V(4).Infof("PV %s not found, ignoring", pvName)
|
klog.V(4).Infof("PV %s not found, ignoring", pvName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
kapierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||||
@ -50,17 +50,17 @@ func (r *REST) New() runtime.Object {
|
|||||||
func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
|
func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
|
||||||
localSubjectAccessReview, ok := obj.(*authorizationapi.LocalSubjectAccessReview)
|
localSubjectAccessReview, ok := obj.(*authorizationapi.LocalSubjectAccessReview)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a LocaLocalSubjectAccessReview: %#v", obj))
|
return nil, apierrors.NewBadRequest(fmt.Sprintf("not a LocaLocalSubjectAccessReview: %#v", obj))
|
||||||
}
|
}
|
||||||
if errs := authorizationvalidation.ValidateLocalSubjectAccessReview(localSubjectAccessReview); len(errs) > 0 {
|
if errs := authorizationvalidation.ValidateLocalSubjectAccessReview(localSubjectAccessReview); len(errs) > 0 {
|
||||||
return nil, kapierrors.NewInvalid(authorizationapi.Kind(localSubjectAccessReview.Kind), "", errs)
|
return nil, apierrors.NewInvalid(authorizationapi.Kind(localSubjectAccessReview.Kind), "", errs)
|
||||||
}
|
}
|
||||||
namespace := genericapirequest.NamespaceValue(ctx)
|
namespace := genericapirequest.NamespaceValue(ctx)
|
||||||
if len(namespace) == 0 {
|
if len(namespace) == 0 {
|
||||||
return nil, kapierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace))
|
return nil, apierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace))
|
||||||
}
|
}
|
||||||
if namespace != localSubjectAccessReview.Namespace {
|
if namespace != localSubjectAccessReview.Namespace {
|
||||||
return nil, kapierrors.NewBadRequest(fmt.Sprintf("spec.resourceAttributes.namespace must match namespace: %v", namespace))
|
return nil, apierrors.NewBadRequest(fmt.Sprintf("spec.resourceAttributes.namespace must match namespace: %v", namespace))
|
||||||
}
|
}
|
||||||
|
|
||||||
if createValidation != nil {
|
if createValidation != nil {
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
kapierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||||
@ -49,10 +49,10 @@ func (r *REST) New() runtime.Object {
|
|||||||
func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
|
func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
|
||||||
subjectAccessReview, ok := obj.(*authorizationapi.SubjectAccessReview)
|
subjectAccessReview, ok := obj.(*authorizationapi.SubjectAccessReview)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a SubjectAccessReview: %#v", obj))
|
return nil, apierrors.NewBadRequest(fmt.Sprintf("not a SubjectAccessReview: %#v", obj))
|
||||||
}
|
}
|
||||||
if errs := authorizationvalidation.ValidateSubjectAccessReview(subjectAccessReview); len(errs) > 0 {
|
if errs := authorizationvalidation.ValidateSubjectAccessReview(subjectAccessReview); len(errs) > 0 {
|
||||||
return nil, kapierrors.NewInvalid(authorizationapi.Kind(subjectAccessReview.Kind), "", errs)
|
return nil, apierrors.NewInvalid(authorizationapi.Kind(subjectAccessReview.Kind), "", errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
if createValidation != nil {
|
if createValidation != nil {
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
k8serr "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apiserver/pkg/registry/generic"
|
"k8s.io/apiserver/pkg/registry/generic"
|
||||||
@ -209,10 +209,10 @@ func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error {
|
|||||||
switch {
|
switch {
|
||||||
case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0:
|
case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0:
|
||||||
if snapshot.ResourceVersion != existing.ResourceVersion {
|
if snapshot.ResourceVersion != existing.ResourceVersion {
|
||||||
return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("the provided resource version does not match"))
|
return nil, apierrors.NewConflict(e.resource, "", fmt.Errorf("the provided resource version does not match"))
|
||||||
}
|
}
|
||||||
case len(existing.ResourceVersion) != 0:
|
case len(existing.ResourceVersion) != 0:
|
||||||
return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("another caller has already initialized the resource"))
|
return nil, apierrors.NewConflict(e.resource, "", fmt.Errorf("another caller has already initialized the resource"))
|
||||||
}
|
}
|
||||||
last = snapshot.ResourceVersion
|
last = snapshot.ResourceVersion
|
||||||
return snapshot, nil
|
return snapshot, nil
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
@ -108,7 +108,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string
|
|||||||
_, err = c.k8s.StorageV1().VolumeAttachments().Create(attachment)
|
_, err = c.k8s.StorageV1().VolumeAttachments().Create(attachment)
|
||||||
alreadyExist := false
|
alreadyExist := false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !apierrs.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return "", errors.New(log("attacher.Attach failed: %v", err))
|
return "", errors.New(log("attacher.Attach failed: %v", err))
|
||||||
}
|
}
|
||||||
alreadyExist = true
|
alreadyExist = true
|
||||||
@ -388,7 +388,7 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := c.k8s.StorageV1().VolumeAttachments().Delete(attachID, nil); err != nil {
|
if err := c.k8s.StorageV1().VolumeAttachments().Delete(attachID, nil); err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
// object deleted or never existed, done
|
// object deleted or never existed, done
|
||||||
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
|
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
|
||||||
return nil
|
return nil
|
||||||
@ -415,7 +415,7 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str
|
|||||||
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
|
||||||
attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
//object deleted or never existed, done
|
//object deleted or never existed, done
|
||||||
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
|
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
|
||||||
return nil
|
return nil
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -83,7 +83,7 @@ func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.R
|
|||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
attach, err = client.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
attach, err = client.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
<-ticker.C
|
<-ticker.C
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -225,7 +225,7 @@ func TestAttacherAttach(t *testing.T) {
|
|||||||
status.AttachError = &storage.VolumeError{
|
status.AttachError = &storage.VolumeError{
|
||||||
Message: "attacher error",
|
Message: "attacher error",
|
||||||
}
|
}
|
||||||
errStatus := apierrs.NewInternalError(fmt.Errorf("we got an error")).Status()
|
errStatus := apierrors.NewInternalError(fmt.Errorf("we got an error")).Status()
|
||||||
fakeWatcher.Error(&errStatus)
|
fakeWatcher.Error(&errStatus)
|
||||||
} else {
|
} else {
|
||||||
status.Attached = true
|
status.Attached = true
|
||||||
@ -921,7 +921,7 @@ func TestAttacherDetach(t *testing.T) {
|
|||||||
reactor: func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
reactor: func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
// return Forbidden to all DELETE requests
|
// return Forbidden to all DELETE requests
|
||||||
if action.Matches("delete", "volumeattachments") {
|
if action.Matches("delete", "volumeattachments") {
|
||||||
return true, nil, apierrs.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error"))
|
return true, nil, apierrors.NewForbidden(action.GetResource().GroupResource(), action.GetNamespace(), fmt.Errorf("mock error"))
|
||||||
}
|
}
|
||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
},
|
},
|
||||||
@ -971,7 +971,7 @@ func TestAttacherDetach(t *testing.T) {
|
|||||||
csiAttacher.waitSleepTime = 100 * time.Millisecond
|
csiAttacher.waitSleepTime = 100 * time.Millisecond
|
||||||
go func() {
|
go func() {
|
||||||
if watchError {
|
if watchError {
|
||||||
errStatus := apierrs.NewInternalError(fmt.Errorf("we got an error")).Status()
|
errStatus := apierrors.NewInternalError(fmt.Errorf("we got an error")).Status()
|
||||||
fakeWatcher.Error(&errStatus)
|
fakeWatcher.Error(&errStatus)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -986,7 +986,7 @@ func TestAttacherDetach(t *testing.T) {
|
|||||||
}
|
}
|
||||||
attach, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
attach, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !apierrs.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
t.Fatalf("unexpected err: %v", err)
|
t.Fatalf("unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
|
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1beta1"
|
storage "k8s.io/api/storage/v1beta1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
@ -304,7 +304,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) {
|
|||||||
|
|
||||||
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
|
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName))
|
klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
|
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1beta1"
|
storage "k8s.io/api/storage/v1beta1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
@ -742,7 +742,7 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) {
|
|||||||
}
|
}
|
||||||
csiDriver, err := p.csiDriverLister.Get(driver)
|
csiDriver, err := p.csiDriverLister.Get(driver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
// Don't skip attach if CSIDriver does not exist
|
// Don't skip attach if CSIDriver does not exist
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -779,7 +779,7 @@ func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storag
|
|||||||
}
|
}
|
||||||
|
|
||||||
c, err := p.csiDriverLister.Get(driver)
|
c, err := p.csiDriverLister.Get(driver)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
// Some internal error.
|
// Some internal error.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ import (
|
|||||||
|
|
||||||
fuzzer "k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
|
fuzzer "k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||||
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
|
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
|
||||||
@ -2802,7 +2802,7 @@ func TestGetNamespaceSelfLink(t *testing.T) {
|
|||||||
func TestGetMissing(t *testing.T) {
|
func TestGetMissing(t *testing.T) {
|
||||||
storage := map[string]rest.Storage{}
|
storage := map[string]rest.Storage{}
|
||||||
simpleStorage := SimpleRESTStorage{
|
simpleStorage := SimpleRESTStorage{
|
||||||
errors: map[string]error{"get": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")},
|
errors: map[string]error{"get": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")},
|
||||||
}
|
}
|
||||||
storage["simple"] = &simpleStorage
|
storage["simple"] = &simpleStorage
|
||||||
handler := handle(storage)
|
handler := handle(storage)
|
||||||
@ -2822,7 +2822,7 @@ func TestGetMissing(t *testing.T) {
|
|||||||
func TestGetRetryAfter(t *testing.T) {
|
func TestGetRetryAfter(t *testing.T) {
|
||||||
storage := map[string]rest.Storage{}
|
storage := map[string]rest.Storage{}
|
||||||
simpleStorage := SimpleRESTStorage{
|
simpleStorage := SimpleRESTStorage{
|
||||||
errors: map[string]error{"get": apierrs.NewServerTimeout(schema.GroupResource{Resource: "simples"}, "id", 2)},
|
errors: map[string]error{"get": apierrors.NewServerTimeout(schema.GroupResource{Resource: "simples"}, "id", 2)},
|
||||||
}
|
}
|
||||||
storage["simple"] = &simpleStorage
|
storage["simple"] = &simpleStorage
|
||||||
handler := handle(storage)
|
handler := handle(storage)
|
||||||
@ -2925,7 +2925,7 @@ func TestConnectResponderError(t *testing.T) {
|
|||||||
connectStorage := &ConnecterRESTStorage{}
|
connectStorage := &ConnecterRESTStorage{}
|
||||||
connectStorage.handlerFunc = func() http.Handler {
|
connectStorage.handlerFunc = func() http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
connectStorage.receivedResponder.Error(apierrs.NewForbidden(schema.GroupResource{Resource: "simples"}, itemID, errors.New("you are terminated")))
|
connectStorage.receivedResponder.Error(apierrors.NewForbidden(schema.GroupResource{Resource: "simples"}, itemID, errors.New("you are terminated")))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
storage := map[string]rest.Storage{
|
storage := map[string]rest.Storage{
|
||||||
@ -3271,7 +3271,7 @@ func TestDeleteMissing(t *testing.T) {
|
|||||||
storage := map[string]rest.Storage{}
|
storage := map[string]rest.Storage{}
|
||||||
ID := "id"
|
ID := "id"
|
||||||
simpleStorage := SimpleRESTStorage{
|
simpleStorage := SimpleRESTStorage{
|
||||||
errors: map[string]error{"delete": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)},
|
errors: map[string]error{"delete": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)},
|
||||||
}
|
}
|
||||||
storage["simple"] = &simpleStorage
|
storage["simple"] = &simpleStorage
|
||||||
handler := handle(storage)
|
handler := handle(storage)
|
||||||
@ -3543,7 +3543,7 @@ func TestUpdateMissing(t *testing.T) {
|
|||||||
storage := map[string]rest.Storage{}
|
storage := map[string]rest.Storage{}
|
||||||
ID := "id"
|
ID := "id"
|
||||||
simpleStorage := SimpleRESTStorage{
|
simpleStorage := SimpleRESTStorage{
|
||||||
errors: map[string]error{"update": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)},
|
errors: map[string]error{"update": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, ID)},
|
||||||
}
|
}
|
||||||
storage["simple"] = &simpleStorage
|
storage["simple"] = &simpleStorage
|
||||||
handler := handle(storage)
|
handler := handle(storage)
|
||||||
@ -3581,7 +3581,7 @@ func TestCreateNotFound(t *testing.T) {
|
|||||||
"simple": &SimpleRESTStorage{
|
"simple": &SimpleRESTStorage{
|
||||||
// storage.Create can fail with not found error in theory.
|
// storage.Create can fail with not found error in theory.
|
||||||
// See http://pr.k8s.io/486#discussion_r15037092.
|
// See http://pr.k8s.io/486#discussion_r15037092.
|
||||||
errors: map[string]error{"create": apierrs.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")},
|
errors: map[string]error{"create": apierrors.NewNotFound(schema.GroupResource{Resource: "simples"}, "id")},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
server := httptest.NewServer(handler)
|
server := httptest.NewServer(handler)
|
||||||
@ -4217,7 +4217,7 @@ func expectAPIStatus(t *testing.T, method, url string, data []byte, code int) *m
|
|||||||
func TestDelayReturnsError(t *testing.T) {
|
func TestDelayReturnsError(t *testing.T) {
|
||||||
storage := SimpleRESTStorage{
|
storage := SimpleRESTStorage{
|
||||||
injectedFunction: func(obj runtime.Object) (runtime.Object, error) {
|
injectedFunction: func(obj runtime.Object) (runtime.Object, error) {
|
||||||
return nil, apierrs.NewAlreadyExists(schema.GroupResource{Resource: "foos"}, "bar")
|
return nil, apierrors.NewAlreadyExists(schema.GroupResource{Resource: "foos"}, "bar")
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
handler := handle(map[string]rest.Storage{"foo": &storage})
|
handler := handle(map[string]rest.Storage{"foo": &storage})
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
kubeerr "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
"k8s.io/apimachinery/pkg/api/validation/path"
|
"k8s.io/apimachinery/pkg/api/validation/path"
|
||||||
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||||
@ -220,13 +220,13 @@ func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string,
|
|||||||
key := NamespaceKeyRootFunc(ctx, prefix)
|
key := NamespaceKeyRootFunc(ctx, prefix)
|
||||||
ns, ok := genericapirequest.NamespaceFrom(ctx)
|
ns, ok := genericapirequest.NamespaceFrom(ctx)
|
||||||
if !ok || len(ns) == 0 {
|
if !ok || len(ns) == 0 {
|
||||||
return "", kubeerr.NewBadRequest("Namespace parameter required.")
|
return "", apierrors.NewBadRequest("Namespace parameter required.")
|
||||||
}
|
}
|
||||||
if len(name) == 0 {
|
if len(name) == 0 {
|
||||||
return "", kubeerr.NewBadRequest("Name parameter required.")
|
return "", apierrors.NewBadRequest("Name parameter required.")
|
||||||
}
|
}
|
||||||
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
|
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
|
||||||
return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
|
return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
|
||||||
}
|
}
|
||||||
key = key + "/" + name
|
key = key + "/" + name
|
||||||
return key, nil
|
return key, nil
|
||||||
@ -236,10 +236,10 @@ func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string,
|
|||||||
// to a resource relative to the given prefix without a namespace.
|
// to a resource relative to the given prefix without a namespace.
|
||||||
func NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) {
|
func NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) {
|
||||||
if len(name) == 0 {
|
if len(name) == 0 {
|
||||||
return "", kubeerr.NewBadRequest("Name parameter required.")
|
return "", apierrors.NewBadRequest("Name parameter required.")
|
||||||
}
|
}
|
||||||
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
|
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
|
||||||
return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
|
return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
|
||||||
}
|
}
|
||||||
key := prefix + "/" + name
|
key := prefix + "/" + name
|
||||||
return key, nil
|
return key, nil
|
||||||
@ -363,7 +363,7 @@ func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation
|
|||||||
if err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil {
|
if err := e.Storage.Create(ctx, key, obj, out, ttl, dryrun.IsDryRun(options.DryRun)); err != nil {
|
||||||
err = storeerr.InterpretCreateError(err, qualifiedResource, name)
|
err = storeerr.InterpretCreateError(err, qualifiedResource, name)
|
||||||
err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
|
err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
|
||||||
if !kubeerr.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if errGet := e.Storage.Get(ctx, key, "", out, false); errGet != nil {
|
if errGet := e.Storage.Get(ctx, key, "", out, false); errGet != nil {
|
||||||
@ -374,7 +374,7 @@ func (e *Store) Create(ctx context.Context, obj runtime.Object, createValidation
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if accessor.GetDeletionTimestamp() != nil {
|
if accessor.GetDeletionTimestamp() != nil {
|
||||||
msg := &err.(*kubeerr.StatusError).ErrStatus.Message
|
msg := &err.(*apierrors.StatusError).ErrStatus.Message
|
||||||
*msg = fmt.Sprintf("object is being deleted: %s", *msg)
|
*msg = fmt.Sprintf("object is being deleted: %s", *msg)
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -493,7 +493,7 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj
|
|||||||
}
|
}
|
||||||
if version == 0 {
|
if version == 0 {
|
||||||
if !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate {
|
if !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate {
|
||||||
return nil, nil, kubeerr.NewNotFound(qualifiedResource, name)
|
return nil, nil, apierrors.NewNotFound(qualifiedResource, name)
|
||||||
}
|
}
|
||||||
creating = true
|
creating = true
|
||||||
creatingObj = obj
|
creatingObj = obj
|
||||||
@ -533,10 +533,10 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj
|
|||||||
// leave the Kind field empty. See the discussion in #18526.
|
// leave the Kind field empty. See the discussion in #18526.
|
||||||
qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource}
|
qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource}
|
||||||
fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), resourceVersion, "must be specified for an update")}
|
fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), resourceVersion, "must be specified for an update")}
|
||||||
return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList)
|
return nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList)
|
||||||
}
|
}
|
||||||
if resourceVersion != version {
|
if resourceVersion != version {
|
||||||
return nil, nil, kubeerr.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))
|
return nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
|
if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
|
||||||
@ -916,7 +916,7 @@ func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.V
|
|||||||
// check if obj has pending finalizers
|
// check if obj has pending finalizers
|
||||||
accessor, err := meta.Accessor(obj)
|
accessor, err := meta.Accessor(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, kubeerr.NewInternalError(err)
|
return nil, false, apierrors.NewInternalError(err)
|
||||||
}
|
}
|
||||||
pendingFinalizers := len(accessor.GetFinalizers()) != 0
|
pendingFinalizers := len(accessor.GetFinalizers()) != 0
|
||||||
var ignoreNotFound bool
|
var ignoreNotFound bool
|
||||||
@ -933,7 +933,7 @@ func (e *Store) Delete(ctx context.Context, name string, deleteValidation rest.V
|
|||||||
if err == nil && deleteImmediately && preconditions.ResourceVersion != nil {
|
if err == nil && deleteImmediately && preconditions.ResourceVersion != nil {
|
||||||
accessor, err = meta.Accessor(out)
|
accessor, err = meta.Accessor(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, false, kubeerr.NewInternalError(err)
|
return out, false, apierrors.NewInternalError(err)
|
||||||
}
|
}
|
||||||
resourceVersion := accessor.GetResourceVersion()
|
resourceVersion := accessor.GetResourceVersion()
|
||||||
preconditions.ResourceVersion = &resourceVersion
|
preconditions.ResourceVersion = &resourceVersion
|
||||||
@ -1038,7 +1038,7 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali
|
|||||||
errs <- err
|
errs <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !kubeerr.IsNotFound(err) {
|
if _, _, err := e.Delete(ctx, accessor.GetName(), deleteValidation, options); err != nil && !apierrors.IsNotFound(err) {
|
||||||
klog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err)
|
klog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err)
|
||||||
errs <- err
|
errs <- err
|
||||||
return
|
return
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/apiserver/pkg/storage"
|
"k8s.io/apiserver/pkg/storage"
|
||||||
@ -332,10 +332,10 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) {
|
|||||||
|
|
||||||
func transformErrorToEvent(err error) *watch.Event {
|
func transformErrorToEvent(err error) *watch.Event {
|
||||||
err = interpretWatchError(err)
|
err = interpretWatchError(err)
|
||||||
if _, ok := err.(apierrs.APIStatus); !ok {
|
if _, ok := err.(apierrors.APIStatus); !ok {
|
||||||
err = apierrs.NewInternalError(err)
|
err = apierrors.NewInternalError(err)
|
||||||
}
|
}
|
||||||
status := err.(apierrs.APIStatus).Status()
|
status := err.(apierrors.APIStatus).Status()
|
||||||
return &watch.Event{
|
return &watch.Event{
|
||||||
Type: watch.Error,
|
Type: watch.Error,
|
||||||
Object: &status,
|
Object: &status,
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
@ -375,7 +375,7 @@ loop:
|
|||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
if event.Type == watch.Error {
|
if event.Type == watch.Error {
|
||||||
return apierrs.FromObject(event.Object)
|
return apierrors.FromObject(event.Object)
|
||||||
}
|
}
|
||||||
if r.expectedType != nil {
|
if r.expectedType != nil {
|
||||||
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
|
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
|
||||||
@ -479,9 +479,9 @@ func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isExpiredError(err error) bool {
|
func isExpiredError(err error) bool {
|
||||||
// In Kubernetes 1.17 and earlier, the api server returns both apierrs.StatusReasonExpired and
|
// In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and
|
||||||
// apierrs.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
|
// apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
|
||||||
// and always returns apierrs.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone
|
// and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrors.IsGone
|
||||||
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
|
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
|
||||||
return apierrs.IsResourceExpired(err) || apierrs.IsGone(err)
|
return apierrors.IsResourceExpired(err) || apierrors.IsGone(err)
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -520,7 +520,7 @@ func TestReflectorExpiredExactResourceVersion(t *testing.T) {
|
|||||||
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "10"}, Items: pods[0:4]}, nil
|
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "10"}, Items: pods[0:4]}, nil
|
||||||
case "10":
|
case "10":
|
||||||
// When watch cache is disabled, if the exact ResourceVersion requested is not available, a "Expired" error is returned.
|
// When watch cache is disabled, if the exact ResourceVersion requested is not available, a "Expired" error is returned.
|
||||||
return nil, apierrs.NewResourceExpired("The resourceVersion for the provided watch is too old.")
|
return nil, apierrors.NewResourceExpired("The resourceVersion for the provided watch is too old.")
|
||||||
case "":
|
case "":
|
||||||
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "11"}, Items: pods[0:8]}, nil
|
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "11"}, Items: pods[0:8]}, nil
|
||||||
default:
|
default:
|
||||||
@ -584,7 +584,7 @@ func TestReflectorFullListIfExpired(t *testing.T) {
|
|||||||
return &v1.PodList{ListMeta: metav1.ListMeta{Continue: "C1", ResourceVersion: "11"}, Items: pods[0:4]}, nil
|
return &v1.PodList{ListMeta: metav1.ListMeta{Continue: "C1", ResourceVersion: "11"}, Items: pods[0:4]}, nil
|
||||||
// second page of the above list
|
// second page of the above list
|
||||||
case rvContinueLimit("", "C1", 4):
|
case rvContinueLimit("", "C1", 4):
|
||||||
return nil, apierrs.NewResourceExpired("The resourceVersion for the provided watch is too old.")
|
return nil, apierrors.NewResourceExpired("The resourceVersion for the provided watch is too old.")
|
||||||
// rv=10 unlimited list
|
// rv=10 unlimited list
|
||||||
case rvContinueLimit("10", "", 0):
|
case rvContinueLimit("10", "", 0):
|
||||||
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "11"}, Items: pods[0:8]}, nil
|
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "11"}, Items: pods[0:8]}, nil
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
kubeerr "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -651,7 +651,7 @@ func TestApplyRetry(t *testing.T) {
|
|||||||
case p == pathRC && m == "PATCH":
|
case p == pathRC && m == "PATCH":
|
||||||
if firstPatch {
|
if firstPatch {
|
||||||
firstPatch = false
|
firstPatch = false
|
||||||
statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first"))
|
statusErr := apierrors.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first"))
|
||||||
bodyBytes, _ := json.Marshal(statusErr)
|
bodyBytes, _ := json.Marshal(statusErr)
|
||||||
bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes))
|
bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil
|
return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil
|
||||||
@ -1278,7 +1278,7 @@ func TestForceApply(t *testing.T) {
|
|||||||
case strings.HasSuffix(p, pathRC) && m == "PATCH":
|
case strings.HasSuffix(p, pathRC) && m == "PATCH":
|
||||||
counts["patch"]++
|
counts["patch"]++
|
||||||
if counts["patch"] <= 6 {
|
if counts["patch"] <= 6 {
|
||||||
statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first"))
|
statusErr := apierrors.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first"))
|
||||||
bodyBytes, _ := json.Marshal(statusErr)
|
bodyBytes, _ := json.Marshal(statusErr)
|
||||||
bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes))
|
bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil
|
return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader(), Body: bodyErr}, nil
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
kapierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
@ -483,7 +483,7 @@ func (o *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e
|
|||||||
Do()
|
Do()
|
||||||
|
|
||||||
if o.IgnoreNotFound {
|
if o.IgnoreNotFound {
|
||||||
r.IgnoreErrors(kapierrors.IsNotFound)
|
r.IgnoreErrors(apierrors.IsNotFound)
|
||||||
}
|
}
|
||||||
if err := r.Err(); err != nil {
|
if err := r.Err(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
jsonpatch "github.com/evanphx/json-patch"
|
jsonpatch "github.com/evanphx/json-patch"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -60,10 +60,10 @@ type debugError interface {
|
|||||||
// source is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource.
|
// source is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource.
|
||||||
func AddSourceToErr(verb string, source string, err error) error {
|
func AddSourceToErr(verb string, source string, err error) error {
|
||||||
if source != "" {
|
if source != "" {
|
||||||
if statusError, ok := err.(kerrors.APIStatus); ok {
|
if statusError, ok := err.(apierrors.APIStatus); ok {
|
||||||
status := statusError.Status()
|
status := statusError.Status()
|
||||||
status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message)
|
status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message)
|
||||||
return &kerrors.StatusError{ErrStatus: status}
|
return &apierrors.StatusError{ErrStatus: status}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("error when %s %q: %v", verb, source, err)
|
return fmt.Errorf("error when %s %q: %v", verb, source, err)
|
||||||
}
|
}
|
||||||
@ -129,8 +129,8 @@ func checkErr(err error, handleErr func(string, int)) {
|
|||||||
switch {
|
switch {
|
||||||
case err == ErrExit:
|
case err == ErrExit:
|
||||||
handleErr("", DefaultErrorExitCode)
|
handleErr("", DefaultErrorExitCode)
|
||||||
case kerrors.IsInvalid(err):
|
case apierrors.IsInvalid(err):
|
||||||
details := err.(*kerrors.StatusError).Status().Details
|
details := err.(*apierrors.StatusError).Status().Details
|
||||||
s := "The request is invalid"
|
s := "The request is invalid"
|
||||||
if details == nil {
|
if details == nil {
|
||||||
handleErr(s, DefaultErrorExitCode)
|
handleErr(s, DefaultErrorExitCode)
|
||||||
@ -202,7 +202,7 @@ func StandardErrorMessage(err error) (string, bool) {
|
|||||||
if debugErr, ok := err.(debugError); ok {
|
if debugErr, ok := err.(debugError); ok {
|
||||||
klog.V(4).Infof(debugErr.DebugError())
|
klog.V(4).Infof(debugErr.DebugError())
|
||||||
}
|
}
|
||||||
status, isStatus := err.(kerrors.APIStatus)
|
status, isStatus := err.(apierrors.APIStatus)
|
||||||
switch {
|
switch {
|
||||||
case isStatus:
|
case isStatus:
|
||||||
switch s := status.Status(); {
|
switch s := status.Status(); {
|
||||||
@ -213,7 +213,7 @@ func StandardErrorMessage(err error) (string, bool) {
|
|||||||
default:
|
default:
|
||||||
return fmt.Sprintf("Error from server: %s", err.Error()), true
|
return fmt.Sprintf("Error from server: %s", err.Error()), true
|
||||||
}
|
}
|
||||||
case kerrors.IsUnexpectedObjectError(err):
|
case apierrors.IsUnexpectedObjectError(err):
|
||||||
return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true
|
return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true
|
||||||
}
|
}
|
||||||
switch t := err.(type) {
|
switch t := err.(type) {
|
||||||
@ -259,7 +259,7 @@ func MultilineError(prefix string, err error) string {
|
|||||||
// Returns true if a case exists to handle the error type, or false otherwise.
|
// Returns true if a case exists to handle the error type, or false otherwise.
|
||||||
func PrintErrorWithCauses(err error, errOut io.Writer) bool {
|
func PrintErrorWithCauses(err error, errOut io.Writer) bool {
|
||||||
switch t := err.(type) {
|
switch t := err.(type) {
|
||||||
case *kerrors.StatusError:
|
case *apierrors.StatusError:
|
||||||
errorDetails := t.Status().Details
|
errorDetails := t.Status().Details
|
||||||
if errorDetails != nil {
|
if errorDetails != nil {
|
||||||
fmt.Fprintf(errOut, "error: %s %q is invalid\n\n", errorDetails.Kind, errorDetails.Name)
|
fmt.Fprintf(errOut, "error: %s %q is invalid\n\n", errorDetails.Kind, errorDetails.Name)
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/apimachinery/pkg/apis/testapigroup/v1"
|
api "k8s.io/apimachinery/pkg/apis/testapigroup/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -58,8 +58,8 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestReplicationControllerScaleRetry(t *testing.T) {
|
func TestReplicationControllerScaleRetry(t *testing.T) {
|
||||||
verbsOnError := map[string]*kerrors.StatusError{
|
verbsOnError := map[string]*apierrors.StatusError{
|
||||||
"patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
|
"patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"patch", "get"}
|
scaleClientExpectedAction := []string{"patch", "get"}
|
||||||
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, verbsOnError)
|
||||||
@ -94,8 +94,8 @@ func TestReplicationControllerScaleRetry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReplicationControllerScaleInvalid(t *testing.T) {
|
func TestReplicationControllerScaleInvalid(t *testing.T) {
|
||||||
verbsOnError := map[string]*kerrors.StatusError{
|
verbsOnError := map[string]*apierrors.StatusError{
|
||||||
"patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
"patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"patch"}
|
scaleClientExpectedAction := []string{"patch"}
|
||||||
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 1, verbsOnError)
|
scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 1, verbsOnError)
|
||||||
@ -168,8 +168,8 @@ func TestReplicationControllerScaleFailsPreconditions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDeploymentScaleRetry(t *testing.T) {
|
func TestDeploymentScaleRetry(t *testing.T) {
|
||||||
verbsOnError := map[string]*kerrors.StatusError{
|
verbsOnError := map[string]*apierrors.StatusError{
|
||||||
"patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
|
"patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"patch", "get"}
|
scaleClientExpectedAction := []string{"patch", "get"}
|
||||||
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
|
||||||
@ -226,8 +226,8 @@ func TestDeploymentScale(t *testing.T) {
|
|||||||
|
|
||||||
func TestDeploymentScaleInvalid(t *testing.T) {
|
func TestDeploymentScaleInvalid(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"patch"}
|
scaleClientExpectedAction := []string{"patch"}
|
||||||
verbsOnError := map[string]*kerrors.StatusError{
|
verbsOnError := map[string]*apierrors.StatusError{
|
||||||
"patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
"patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient)
|
scaler := NewScaler(scaleClient)
|
||||||
@ -299,8 +299,8 @@ func TestStatefulSetScale(t *testing.T) {
|
|||||||
|
|
||||||
func TestStatefulSetScaleRetry(t *testing.T) {
|
func TestStatefulSetScaleRetry(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"patch", "get"}
|
scaleClientExpectedAction := []string{"patch", "get"}
|
||||||
verbsOnError := map[string]*kerrors.StatusError{
|
verbsOnError := map[string]*apierrors.StatusError{
|
||||||
"patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
|
"patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient)
|
scaler := NewScaler(scaleClient)
|
||||||
@ -335,8 +335,8 @@ func TestStatefulSetScaleRetry(t *testing.T) {
|
|||||||
|
|
||||||
func TestStatefulSetScaleInvalid(t *testing.T) {
|
func TestStatefulSetScaleInvalid(t *testing.T) {
|
||||||
scaleClientExpectedAction := []string{"patch"}
|
scaleClientExpectedAction := []string{"patch"}
|
||||||
verbsOnError := map[string]*kerrors.StatusError{
|
verbsOnError := map[string]*apierrors.StatusError{
|
||||||
"patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
"patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError)
|
||||||
scaler := NewScaler(scaleClient)
|
scaler := NewScaler(scaleClient)
|
||||||
@ -407,8 +407,8 @@ func TestReplicaSetScale(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReplicaSetScaleRetry(t *testing.T) {
|
func TestReplicaSetScaleRetry(t *testing.T) {
|
||||||
verbsOnError := map[string]*kerrors.StatusError{
|
verbsOnError := map[string]*apierrors.StatusError{
|
||||||
"patch": kerrors.NewConflict(api.Resource("Status"), "foo", nil),
|
"patch": apierrors.NewConflict(api.Resource("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"patch", "get"}
|
scaleClientExpectedAction := []string{"patch", "get"}
|
||||||
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
|
||||||
@ -443,8 +443,8 @@ func TestReplicaSetScaleRetry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReplicaSetScaleInvalid(t *testing.T) {
|
func TestReplicaSetScaleInvalid(t *testing.T) {
|
||||||
verbsOnError := map[string]*kerrors.StatusError{
|
verbsOnError := map[string]*apierrors.StatusError{
|
||||||
"patch": kerrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
"patch": apierrors.NewInvalid(api.Kind("Status"), "foo", nil),
|
||||||
}
|
}
|
||||||
scaleClientExpectedAction := []string{"patch"}
|
scaleClientExpectedAction := []string{"patch"}
|
||||||
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
|
scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError)
|
||||||
@ -688,12 +688,12 @@ func TestGenericScale(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createFakeScaleClient(resource string, resourceName string, replicas int, errorsOnVerb map[string]*kerrors.StatusError) *fakescale.FakeScaleClient {
|
func createFakeScaleClient(resource string, resourceName string, replicas int, errorsOnVerb map[string]*apierrors.StatusError) *fakescale.FakeScaleClient {
|
||||||
shouldReturnAnError := func(verb string) (*kerrors.StatusError, bool) {
|
shouldReturnAnError := func(verb string) (*apierrors.StatusError, bool) {
|
||||||
if anError, anErrorExists := errorsOnVerb[verb]; anErrorExists {
|
if anError, anErrorExists := errorsOnVerb[verb]; anErrorExists {
|
||||||
return anError, true
|
return anError, true
|
||||||
}
|
}
|
||||||
return &kerrors.StatusError{}, false
|
return &apierrors.StatusError{}, false
|
||||||
}
|
}
|
||||||
newReplicas := int32(replicas)
|
newReplicas := int32(replicas)
|
||||||
scaleClient := &fakescale.FakeScaleClient{}
|
scaleClient := &fakescale.FakeScaleClient{}
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
@ -367,7 +367,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
|||||||
request.SetHeader("Accept", "application/json")
|
request.SetHeader("Accept", "application/json")
|
||||||
_, err := request.DoRaw()
|
_, err := request.DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
status, ok := err.(*apierrs.StatusError)
|
status, ok := err.(*apierrors.StatusError)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apiserver/pkg/features"
|
"k8s.io/apiserver/pkg/features"
|
||||||
@ -153,11 +153,11 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||||||
framework.Logf("Token %s has not expired yet", firstToken)
|
framework.Logf("Token %s has not expired yet", firstToken)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if err != nil && !errors.IsResourceExpired(err) {
|
if err != nil && !apierrors.IsResourceExpired(err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
framework.Logf("got error %s", err)
|
framework.Logf("got error %s", err)
|
||||||
status, ok := err.(errors.APIStatus)
|
status, ok := err.(apierrors.APIStatus)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
|
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
@ -236,7 +236,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil && errors.IsAlreadyExists(err) {
|
if err != nil && apierrors.IsAlreadyExists(err) {
|
||||||
framework.Logf("role binding %s already exists", roleBindingCRDName)
|
framework.Logf("role binding %s already exists", roleBindingCRDName)
|
||||||
} else {
|
} else {
|
||||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -667,7 +667,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
framework.Logf("")
|
framework.Logf("")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
@ -769,7 +769,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
framework.Logf("")
|
framework.Logf("")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
@ -882,7 +882,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
definition := apiextensionstestserver.NewRandomNameV1CustomResourceDefinition(apiextensionsv1.ClusterScoped)
|
definition := apiextensionstestserver.NewRandomNameV1CustomResourceDefinition(apiextensionsv1.ClusterScoped)
|
||||||
defer func() {
|
defer func() {
|
||||||
err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient)
|
err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient)
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -951,7 +951,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
// Ensure the dependent is deleted.
|
// Ensure the dependent is deleted.
|
||||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
|
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
|
||||||
return errors.IsNotFound(err), nil
|
return apierrors.IsNotFound(err), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
framework.Logf("owner: %#v", persistedOwner)
|
framework.Logf("owner: %#v", persistedOwner)
|
||||||
framework.Logf("dependent: %#v", persistedDependent)
|
framework.Logf("dependent: %#v", persistedDependent)
|
||||||
@ -963,7 +963,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
framework.Failf("expected owner resource %q to be deleted", ownerName)
|
framework.Failf("expected owner resource %q to be deleted", ownerName)
|
||||||
} else {
|
} else {
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("unexpected error getting owner resource %q: %v", ownerName, err)
|
framework.Failf("unexpected error getting owner resource %q: %v", ownerName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -985,7 +985,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
definition := apiextensionstestserver.NewRandomNameV1CustomResourceDefinition(apiextensionsv1.ClusterScoped)
|
definition := apiextensionstestserver.NewRandomNameV1CustomResourceDefinition(apiextensionsv1.ClusterScoped)
|
||||||
defer func() {
|
defer func() {
|
||||||
err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient)
|
err = apiextensionstestserver.DeleteV1CustomResourceDefinition(definition, apiExtensionClient)
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -1056,7 +1056,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, fmt.Errorf("failed to get owner: %v", err)
|
return false, fmt.Errorf("failed to get owner: %v", err)
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -121,7 +121,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
|
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
|
||||||
func() (bool, error) {
|
func() (bool, error) {
|
||||||
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
|
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
|
||||||
if err != nil && errors.IsNotFound(err) {
|
if err != nil && apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -178,7 +178,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
|
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
|
||||||
func() (bool, error) {
|
func() (bool, error) {
|
||||||
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
|
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
|
||||||
if err != nil && errors.IsNotFound(err) {
|
if err != nil && apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
g "github.com/onsi/ginkgo"
|
g "github.com/onsi/ginkgo"
|
||||||
o "github.com/onsi/gomega"
|
o "github.com/onsi/gomega"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
@ -63,16 +63,16 @@ var _ = SIGDescribe("client-go should negotiate", func() {
|
|||||||
case watch.Added, watch.Modified:
|
case watch.Added, watch.Modified:
|
||||||
// this is allowed
|
// this is allowed
|
||||||
case watch.Error:
|
case watch.Error:
|
||||||
err := errors.FromObject(evt.Object)
|
err := apierrors.FromObject(evt.Object)
|
||||||
// In Kubernetes 1.17 and earlier, the api server returns both apierrs.StatusReasonExpired and
|
// In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and
|
||||||
// apierrs.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
|
// apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
|
||||||
// and always returns apierrs.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone
|
// and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrs.IsGone
|
||||||
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
|
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
|
||||||
if errors.IsGone(err) || errors.IsResourceExpired(err) {
|
if apierrors.IsGone(err) || apierrors.IsResourceExpired(err) {
|
||||||
// this is allowed, since the kubernetes object could be very old
|
// this is allowed, since the kubernetes object could be very old
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if errors.IsUnexpectedObjectError(err) {
|
if apierrors.IsUnexpectedObjectError(err) {
|
||||||
g.Fail(fmt.Sprintf("unexpected object, wanted v1.Status: %#v", evt.Object))
|
g.Fail(fmt.Sprintf("unexpected object, wanted v1.Status: %#v", evt.Object))
|
||||||
}
|
}
|
||||||
g.Fail(fmt.Sprintf("unexpected error: %#v", evt.Object))
|
g.Fail(fmt.Sprintf("unexpected error: %#v", evt.Object))
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
@ -886,7 +886,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||||||
|
|
||||||
ginkgo.By("Verifying the deleted ResourceQuota")
|
ginkgo.By("Verifying the deleted ResourceQuota")
|
||||||
_, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
|
_, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
|
||||||
framework.ExpectEqual(errors.IsNotFound(err), true)
|
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -1076,7 +1076,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
|
|||||||
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() {
|
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() {
|
||||||
|
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
|
|
||||||
hard := v1.ResourceList{}
|
hard := v1.ResourceList{}
|
||||||
hard[v1.ResourcePods] = resource.MustParse("1")
|
hard[v1.ResourcePods] = resource.MustParse("1")
|
||||||
@ -1115,7 +1115,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
|
|||||||
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() {
|
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() {
|
||||||
|
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
|
|
||||||
hard := v1.ResourceList{}
|
hard := v1.ResourceList{}
|
||||||
hard[v1.ResourcePods] = resource.MustParse("1")
|
hard[v1.ResourcePods] = resource.MustParse("1")
|
||||||
@ -1160,7 +1160,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
|
|||||||
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() {
|
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() {
|
||||||
|
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
|
|
||||||
hard := v1.ResourceList{}
|
hard := v1.ResourceList{}
|
||||||
hard[v1.ResourcePods] = resource.MustParse("1")
|
hard[v1.ResourcePods] = resource.MustParse("1")
|
||||||
@ -1206,10 +1206,10 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
|
|||||||
|
|
||||||
ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() {
|
ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
|
|
||||||
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)})
|
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
|
|
||||||
hard := v1.ResourceList{}
|
hard := v1.ResourceList{}
|
||||||
hard[v1.ResourcePods] = resource.MustParse("2")
|
hard[v1.ResourcePods] = resource.MustParse("2")
|
||||||
@ -1261,7 +1261,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
|
|||||||
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() {
|
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() {
|
||||||
|
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
|
|
||||||
hard := v1.ResourceList{}
|
hard := v1.ResourceList{}
|
||||||
hard[v1.ResourcePods] = resource.MustParse("1")
|
hard[v1.ResourcePods] = resource.MustParse("1")
|
||||||
@ -1295,7 +1295,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
|
|||||||
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() {
|
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() {
|
||||||
|
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
|
|
||||||
hard := v1.ResourceList{}
|
hard := v1.ResourceList{}
|
||||||
hard[v1.ResourcePods] = resource.MustParse("1")
|
hard[v1.ResourcePods] = resource.MustParse("1")
|
||||||
@ -1334,7 +1334,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
|
|||||||
ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() {
|
ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() {
|
||||||
|
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
|
|
||||||
hard := v1.ResourceList{}
|
hard := v1.ResourceList{}
|
||||||
hard[v1.ResourcePods] = resource.MustParse("1")
|
hard[v1.ResourcePods] = resource.MustParse("1")
|
||||||
@ -1714,7 +1714,7 @@ func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName s
|
|||||||
resourceQuota.Spec.Hard[resourceName] = current
|
resourceQuota.Spec.Hard[resourceName] = current
|
||||||
_, err = c.CoreV1().ResourceQuotas(ns).Update(resourceQuota)
|
_, err = c.CoreV1().ResourceQuotas(ns).Update(resourceQuota)
|
||||||
// ignoring conflicts since someone else may already updated it.
|
// ignoring conflicts since someone else may already updated it.
|
||||||
if errors.IsConflict(err) {
|
if apierrors.IsConflict(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
|
|
||||||
authorizationv1 "k8s.io/api/authorization/v1"
|
authorizationv1 "k8s.io/api/authorization/v1"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
@ -164,7 +164,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
|||||||
}
|
}
|
||||||
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1;g=meta.k8s.io").Body(sar).Do().Into(table)
|
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1;g=meta.k8s.io").Body(sar).Do().Into(table)
|
||||||
framework.ExpectError(err, "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar)
|
framework.ExpectError(err, "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar)
|
||||||
framework.ExpectEqual(err.(errors.APIStatus).Status().Code, int32(406))
|
framework.ExpectEqual(err.(apierrors.APIStatus).Status().Code, int32(406))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -739,7 +739,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil && errors.IsAlreadyExists(err) {
|
if err != nil && apierrors.IsAlreadyExists(err) {
|
||||||
framework.Logf("role binding %s already exists", roleBindingName)
|
framework.Logf("role binding %s already exists", roleBindingName)
|
||||||
} else {
|
} else {
|
||||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||||
@ -1120,7 +1120,7 @@ func testWebhook(f *framework.Framework) {
|
|||||||
framework.Failf("expect error %q, got %q", "deadline", err.Error())
|
framework.Failf("expect error %q, got %q", "deadline", err.Error())
|
||||||
}
|
}
|
||||||
// ensure the pod was not actually created
|
// ensure the pod was not actually created
|
||||||
if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}); !errors.IsNotFound(err) {
|
if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("expect notfound error looking for rejected pod, got %v", err)
|
framework.Failf("expect notfound error looking for rejected pod, got %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1296,7 +1296,7 @@ func testFailClosedWebhook(f *framework.Framework) {
|
|||||||
}
|
}
|
||||||
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
|
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
|
||||||
framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
|
framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
|
||||||
if !errors.IsInternalError(err) {
|
if !apierrors.IsInternalError(err) {
|
||||||
framework.Failf("expect an internal error, got %#v", err)
|
framework.Failf("expect an internal error, got %#v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1661,7 +1661,7 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
// Only retry update on conflict
|
// Only retry update on conflict
|
||||||
if !errors.IsConflict(err) {
|
if !apierrors.IsConflict(err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -1683,7 +1683,7 @@ func updateCustomResource(c dynamic.ResourceInterface, ns, name string, update u
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
// Only retry update on conflict
|
// Only retry update on conflict
|
||||||
if !errors.IsConflict(err) {
|
if !apierrors.IsConflict(err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -214,7 +214,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
ginkgo.By("Ensuring job was deleted")
|
ginkgo.By("Ensuring job was deleted")
|
||||||
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
framework.ExpectEqual(errors.IsNotFound(err), true)
|
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||||
|
|
||||||
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
||||||
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -206,7 +206,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
|||||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||||
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
framework.ExpectEqual(errors.IsNotFound(err), true)
|
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -615,7 +615,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||||||
name := podList.Items[p].Name
|
name := podList.Items[p].Name
|
||||||
framework.Logf("%02d: deleting deployment pod %q", i, name)
|
framework.Logf("%02d: deleting deployment pod %q", i, name)
|
||||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -162,7 +162,7 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
ginkgo.By("Ensuring job was deleted")
|
ginkgo.By("Ensuring job was deleted")
|
||||||
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
framework.ExpectEqual(errors.IsNotFound(err), true)
|
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -287,7 +287,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
|||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||||
// The Pod p should either be adopted or deleted by the RC
|
// The Pod p should either be adopted or deleted by the RC
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -323,7 +323,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
|||||||
|
|
||||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||||
if err != nil && errors.IsConflict(err) {
|
if err != nil && apierrors.IsConflict(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -289,7 +289,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||||
// The Pod p should either be adopted or deleted by the ReplicaSet
|
// The Pod p should either be adopted or deleted by the ReplicaSet
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -315,7 +315,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||||||
|
|
||||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||||
if err != nil && errors.IsConflict(err) {
|
if err != nil && apierrors.IsConflict(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
|
auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
@ -111,7 +111,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
|||||||
// get pod ip
|
// get pod ip
|
||||||
err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||||
p, err := f.ClientSet.CoreV1().Pods(namespace).Get("audit-proxy", metav1.GetOptions{})
|
p, err := f.ClientSet.CoreV1().Pods(namespace).Get("audit-proxy", metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
framework.Logf("waiting for audit-proxy pod to be present")
|
framework.Logf("waiting for audit-proxy pod to be present")
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||||
@ -122,7 +122,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
|||||||
|
|
||||||
func expectForbidden(err error) {
|
func expectForbidden(err error) {
|
||||||
framework.ExpectError(err, "should be forbidden")
|
framework.ExpectError(err, "should be forbidden")
|
||||||
framework.ExpectEqual(apierrs.IsForbidden(err), true, "should be forbidden error")
|
framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error")
|
||||||
}
|
}
|
||||||
|
|
||||||
func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -1476,7 +1476,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !errors.IsConflict(err) {
|
if !apierrors.IsConflict(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
|
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
|
||||||
@ -1517,7 +1517,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !errors.IsConflict(err) {
|
if !apierrors.IsConflict(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
|
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
|
||||||
@ -1692,7 +1692,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !errors.IsConflict(err) {
|
if !apierrors.IsConflict(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
|
klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
|
||||||
@ -1936,7 +1936,7 @@ func createPriorityClasses(f *framework.Framework) func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Error creating priority class: %v", err)
|
klog.Errorf("Error creating priority class: %v", err)
|
||||||
}
|
}
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
return func() {
|
return func() {
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -67,7 +67,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
|
|||||||
_, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{})
|
_, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name)
|
framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name)
|
||||||
} else if !apierrs.IsNotFound(err) {
|
} else if !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("failed to get node %q err: %q", nodeToDelete.Name, err)
|
framework.Failf("failed to get node %q err: %q", nodeToDelete.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
@ -105,7 +105,7 @@ func (cc *ConformanceContainer) Present() (bool, error) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
coordinationv1 "k8s.io/api/coordination/v1"
|
coordinationv1 "k8s.io/api/coordination/v1"
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -154,6 +154,6 @@ var _ = framework.KubeDescribe("Lease", func() {
|
|||||||
framework.ExpectNoError(err, "deleting Lease failed")
|
framework.ExpectNoError(err, "deleting Lease failed")
|
||||||
|
|
||||||
_, err = leaseClient.Get(name, metav1.GetOptions{})
|
_, err = leaseClient.Get(name, metav1.GetOptions{})
|
||||||
framework.ExpectEqual(errors.IsNotFound(err), true)
|
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -21,8 +21,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -72,7 +71,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
|
|||||||
ginkgo.By("Waiting for the RuntimeClass to disappear")
|
ginkgo.By("Waiting for the RuntimeClass to disappear")
|
||||||
framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) {
|
framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) {
|
||||||
_, err := rcClient.Get(rcName, metav1.GetOptions{})
|
_, err := rcClient.Get(rcName, metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil // done
|
return true, nil // done
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -124,7 +123,7 @@ func expectPodRejection(f *framework.Framework, pod *v1.Pod) {
|
|||||||
} else {
|
} else {
|
||||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
framework.ExpectError(err, "should be forbidden")
|
framework.ExpectError(err, "should be forbidden")
|
||||||
framework.ExpectEqual(apierrs.IsForbidden(err), true, "should be forbidden error")
|
framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ import (
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
@ -522,7 +522,7 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
|
|||||||
framework.DescribeIng(j.Ingress.Namespace)
|
framework.DescribeIng(j.Ingress.Namespace)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
|
||||||
framework.Failf("failed to update ingress %s/%s: %v", ns, name, err)
|
framework.Failf("failed to update ingress %s/%s: %v", ns, name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
|
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -91,7 +91,7 @@ func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.D
|
|||||||
func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
|
func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
|
||||||
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
||||||
_, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
_, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
@ -157,7 +157,7 @@ func TestCheckReadyForTests(t *testing.T) {
|
|||||||
expectedErr: "Forced error",
|
expectedErr: "Forced error",
|
||||||
}, {
|
}, {
|
||||||
desc: "Retryable errors from node list are reported but still return false",
|
desc: "Retryable errors from node list are reported but still return false",
|
||||||
nodeListErr: apierrs.NewTimeoutError("Retryable error", 10),
|
nodeListErr: apierrors.NewTimeoutError("Retryable error", 10),
|
||||||
expected: false,
|
expected: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
)
|
)
|
||||||
@ -55,7 +55,7 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string
|
|||||||
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||||
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
|
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil // assume pod was already deleted
|
return nil // assume pod was already deleted
|
||||||
}
|
}
|
||||||
return fmt.Errorf("pod Delete API error: %v", err)
|
return fmt.Errorf("pod Delete API error: %v", err)
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -213,7 +213,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou
|
|||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
|
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -387,7 +387,7 @@ func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, nam
|
|||||||
func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
|
func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil // done
|
return true, nil // done
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -115,7 +115,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update updates the pod object. It retries if there is a conflict, throw out error if
|
// Update updates the pod object. It retries if there is a conflict, throw out error if
|
||||||
// there is any other errors. name is the pod name, updateFn is the function updating the
|
// there is any other apierrors. name is the pod name, updateFn is the function updating the
|
||||||
// pod object.
|
// pod object.
|
||||||
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||||
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||||
@ -129,7 +129,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
|||||||
Logf("Successfully updated pod %q", name)
|
Logf("Successfully updated pod %q", name)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
if errors.IsConflict(err) {
|
if apierrors.IsConflict(err) {
|
||||||
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -147,7 +147,7 @@ func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeo
|
|||||||
// disappear before the timeout, it will fail the test.
|
// disappear before the timeout, it will fail the test.
|
||||||
func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options *metav1.DeleteOptions, timeout time.Duration) {
|
func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options *metav1.DeleteOptions, timeout time.Duration) {
|
||||||
err := c.Delete(name, options)
|
err := c.Delete(name, options)
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
Failf("Failed to delete pod %q: %v", name, err)
|
Failf("Failed to delete pod %q: %v", name, err)
|
||||||
}
|
}
|
||||||
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||||
@ -111,7 +111,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
|
|||||||
privilegedPSPOnce.Do(func() {
|
privilegedPSPOnce.Do(func() {
|
||||||
_, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(
|
_, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(
|
||||||
podSecurityPolicyPrivileged, metav1.GetOptions{})
|
podSecurityPolicyPrivileged, metav1.GetOptions{})
|
||||||
if !apierrs.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
// Privileged PSP was already created.
|
// Privileged PSP was already created.
|
||||||
ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged)
|
ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged)
|
||||||
return
|
return
|
||||||
@ -119,7 +119,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
|
|||||||
|
|
||||||
psp := privilegedPSP(podSecurityPolicyPrivileged)
|
psp := privilegedPSP(podSecurityPolicyPrivileged)
|
||||||
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp)
|
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp)
|
||||||
if !apierrs.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,7 +134,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
|
|||||||
Verbs: []string{"use"},
|
Verbs: []string{"use"},
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
if !apierrs.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
ExpectNoError(err, "Failed to create PSP role")
|
ExpectNoError(err, "Failed to create PSP role")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -186,7 +186,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error {
|
|||||||
if c != nil && len(pvName) > 0 {
|
if c != nil && len(pvName) > 0 {
|
||||||
framework.Logf("Deleting PersistentVolume %q", pvName)
|
framework.Logf("Deleting PersistentVolume %q", pvName)
|
||||||
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
|
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return fmt.Errorf("PV Delete API error: %v", err)
|
return fmt.Errorf("PV Delete API error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -198,7 +198,7 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
|
|||||||
if c != nil && len(pvcName) > 0 {
|
if c != nil && len(pvcName) > 0 {
|
||||||
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
|
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
|
||||||
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
|
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return fmt.Errorf("PVC Delete API error: %v", err)
|
return fmt.Errorf("PVC Delete API error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -275,10 +275,10 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
|
|||||||
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
|
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if !apierrs.IsNotFound(err) {
|
} else if !apierrors.IsNotFound(err) {
|
||||||
return fmt.Errorf("PVC Get API error: %v", err)
|
return fmt.Errorf("PVC Get API error: %v", err)
|
||||||
}
|
}
|
||||||
// delete pvckey from map even if apierrs.IsNotFound above is true and thus the
|
// delete pvckey from map even if apierrors.IsNotFound above is true and thus the
|
||||||
// claim was not actually deleted here
|
// claim was not actually deleted here
|
||||||
delete(claims, pvcKey)
|
delete(claims, pvcKey)
|
||||||
deletedPVCs++
|
deletedPVCs++
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -446,7 +446,7 @@ func (j *TestJig) UpdateService(update func(*v1.Service)) (*v1.Service, error) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return j.sanityCheckService(result, service.Spec.Type)
|
return j.sanityCheckService(result, service.Spec.Type)
|
||||||
}
|
}
|
||||||
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
|
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
|
||||||
return nil, fmt.Errorf("failed to update Service %q: %v", j.Name, err)
|
return nil, fmt.Errorf("failed to update Service %q: %v", j.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
@ -73,7 +73,7 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update
|
|||||||
|
|
||||||
service, err = c.CoreV1().Services(namespace).Update(service)
|
service, err = c.CoreV1().Services(namespace).Update(service)
|
||||||
|
|
||||||
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
|
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
|
||||||
return service, err
|
return service, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ package framework
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||||
@ -65,7 +65,7 @@ func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVers
|
|||||||
_, err := resourceClient.List(metav1.ListOptions{})
|
_, err := resourceClient.List(metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// not all resources support list, so we ignore those
|
// not all resources support list, so we ignore those
|
||||||
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
|
if apierrors.IsMethodNotSupported(err) || apierrors.IsNotFound(err) || apierrors.IsForbidden(err) {
|
||||||
skipInternalf(1, "Could not find %s resource, skipping test: %#v", gvr, err)
|
skipInternalf(1, "Could not find %s resource, skipping test: %#v", gvr, err)
|
||||||
}
|
}
|
||||||
Failf("Unexpected error getting %v: %v", gvr, err)
|
Failf("Unexpected error getting %v: %v", gvr, err)
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -256,7 +256,7 @@ func update(c clientset.Interface, ns, name string, update func(ss *appsv1.State
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return ss
|
return ss
|
||||||
}
|
}
|
||||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
|
||||||
e2efwk.Failf("failed to update statefulset %q: %v", name, err)
|
e2efwk.Failf("failed to update statefulset %q: %v", name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ import (
|
|||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -346,7 +346,7 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll,
|
|||||||
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
|
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
Logf("PersistentVolume %s was removed", pvName)
|
Logf("PersistentVolume %s was removed", pvName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -365,7 +365,7 @@ func findAvailableNamespaceName(baseName string, c clientset.Interface) (string,
|
|||||||
// Already taken
|
// Already taken
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
Logf("Unexpected error while getting namespace: %v", err)
|
Logf("Unexpected error while getting namespace: %v", err)
|
||||||
@ -470,7 +470,7 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i
|
|||||||
case err == nil:
|
case err == nil:
|
||||||
Logf("Service %s in namespace %s found.", name, namespace)
|
Logf("Service %s in namespace %s found.", name, namespace)
|
||||||
return exist, nil
|
return exist, nil
|
||||||
case apierrs.IsNotFound(err):
|
case apierrors.IsNotFound(err):
|
||||||
Logf("Service %s in namespace %s disappeared.", name, namespace)
|
Logf("Service %s in namespace %s disappeared.", name, namespace)
|
||||||
return !exist, nil
|
return !exist, nil
|
||||||
case !testutils.IsRetryableAPIError(err):
|
case !testutils.IsRetryableAPIError(err):
|
||||||
@ -1190,7 +1190,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
|||||||
|
|
||||||
rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name)
|
rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
Logf("%v %s not found: %v", kind, name, err)
|
Logf("%v %s not found: %v", kind, name, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -372,7 +372,7 @@ func StartVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
|||||||
serverPod, err := podClient.Create(serverPod)
|
serverPod, err := podClient.Create(serverPod)
|
||||||
// ok if the server pod already exists. TODO: make this controllable by callers
|
// ok if the server pod already exists. TODO: make this controllable by callers
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
framework.Logf("Ignore \"already-exists\" error, re-get pod...")
|
framework.Logf("Ignore \"already-exists\" error, re-get pod...")
|
||||||
ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
|
ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
|
||||||
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||||
|
@ -45,7 +45,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -1325,9 +1325,9 @@ metadata:
|
|||||||
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
|
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
|
||||||
|
|
||||||
// if the error is API not found or could not find default credentials or TLS handshake timeout, try again
|
// if the error is API not found or could not find default credentials or TLS handshake timeout, try again
|
||||||
if apierrs.IsNotFound(err) ||
|
if apierrors.IsNotFound(err) ||
|
||||||
apierrs.IsUnauthorized(err) ||
|
apierrors.IsUnauthorized(err) ||
|
||||||
apierrs.IsServerTimeout(err) {
|
apierrors.IsServerTimeout(err) {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
@ -1969,7 +1969,7 @@ metadata:
|
|||||||
ginkgo.By("verifying the job " + jobName + " was deleted")
|
ginkgo.By("verifying the job " + jobName + " was deleted")
|
||||||
_, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
_, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
framework.ExpectEqual(apierrs.IsNotFound(err), true)
|
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -2660,7 +2660,7 @@ func waitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D
|
|||||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||||
switch event.Type {
|
switch event.Type {
|
||||||
case watch.Deleted:
|
case watch.Deleted:
|
||||||
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
|
return false, apierrors.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
|
||||||
}
|
}
|
||||||
switch rc := event.Object.(type) {
|
switch rc := event.Object.(type) {
|
||||||
case *v1.ReplicationController:
|
case *v1.ReplicationController:
|
||||||
|
@ -22,8 +22,8 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -139,7 +139,7 @@ func WaitForBootstrapTokenSecretToDisappear(c clientset.Interface, tokenID strin
|
|||||||
|
|
||||||
return wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) {
|
return wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) {
|
||||||
_, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{})
|
_, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{})
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -150,7 +150,7 @@ func WaitForBootstrapTokenSecretToDisappear(c clientset.Interface, tokenID strin
|
|||||||
func WaitForBootstrapTokenSecretNotDisappear(c clientset.Interface, tokenID string, t time.Duration) error {
|
func WaitForBootstrapTokenSecretNotDisappear(c clientset.Interface, tokenID string, t time.Duration) error {
|
||||||
err := wait.Poll(framework.Poll, t, func() (bool, error) {
|
err := wait.Poll(framework.Poll, t, func() (bool, error) {
|
||||||
secret, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{})
|
secret, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{})
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, errors.New("secret not exists")
|
return true, errors.New("secret not exists")
|
||||||
}
|
}
|
||||||
if secret != nil {
|
if secret != nil {
|
||||||
|
@ -18,7 +18,7 @@ package network
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
@ -118,7 +118,7 @@ func (t *TestFixture) Cleanup() []error {
|
|||||||
// First, resize the RC to 0.
|
// First, resize the RC to 0.
|
||||||
old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{})
|
old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@ -126,7 +126,7 @@ func (t *TestFixture) Cleanup() []error {
|
|||||||
x := int32(0)
|
x := int32(0)
|
||||||
old.Spec.Replicas = &x
|
old.Spec.Replicas = &x
|
||||||
if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(old); err != nil {
|
if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(old); err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@ -139,7 +139,7 @@ func (t *TestFixture) Cleanup() []error {
|
|||||||
// TODO(mikedanese): Wait.
|
// TODO(mikedanese): Wait.
|
||||||
// Then, delete the RC altogether.
|
// Then, delete the RC altogether.
|
||||||
if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil {
|
if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -149,7 +149,7 @@ func (t *TestFixture) Cleanup() []error {
|
|||||||
ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace)
|
ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace)
|
||||||
err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil)
|
err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
@ -792,7 +792,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
|
|||||||
}
|
}
|
||||||
ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
|
ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
|
||||||
err := wait.Poll(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) {
|
err := wait.Poll(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||||
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
|
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
framework.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/net"
|
"k8s.io/apimachinery/pkg/util/net"
|
||||||
@ -226,7 +226,7 @@ var _ = SIGDescribe("Proxy", func() {
|
|||||||
body, status, d, err := doProxy(f, path, i)
|
body, status, d, err := doProxy(f, path, i)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if serr, ok := err.(*errors.StatusError); ok {
|
if serr, ok := err.(*apierrors.StatusError); ok {
|
||||||
recordError(fmt.Sprintf("%v (%v; %v): path %v gave status error: %+v",
|
recordError(fmt.Sprintf("%v (%v; %v): path %v gave status error: %+v",
|
||||||
i, status, d, path, serr.Status()))
|
i, status, d, path, serr.Status()))
|
||||||
} else {
|
} else {
|
||||||
@ -322,7 +322,7 @@ func waitForEndpoint(c clientset.Interface, ns, name string) error {
|
|||||||
registerTimeout := time.Minute
|
registerTimeout := time.Minute
|
||||||
for t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) {
|
for t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) {
|
||||||
endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
|
endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
framework.Logf("Endpoint %s/%s is not ready yet", ns, name)
|
framework.Logf("Endpoint %s/%s is not ready yet", ns, name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
nodev1beta1 "k8s.io/api/node/v1beta1"
|
nodev1beta1 "k8s.io/api/node/v1beta1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing"
|
runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -55,7 +55,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
|
|||||||
}
|
}
|
||||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
framework.ExpectError(err, "should be forbidden")
|
framework.ExpectError(err, "should be forbidden")
|
||||||
framework.ExpectEqual(apierrs.IsForbidden(err), true, "should be forbidden error")
|
framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error")
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling [NodeFeature:RuntimeHandler] [Disruptive] ", func() {
|
ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling [NodeFeature:RuntimeHandler] [Disruptive] ", func() {
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -79,7 +79,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
var err error
|
var err error
|
||||||
for _, pair := range priorityPairs {
|
for _, pair := range priorityPairs {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
@ -143,7 +143,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
})
|
})
|
||||||
// Make sure that the lowest priority pod is deleted.
|
// Make sure that the lowest priority pod is deleted.
|
||||||
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
|
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
|
||||||
podDeleted := (err != nil && errors.IsNotFound(err)) ||
|
podDeleted := (err != nil && apierrors.IsNotFound(err)) ||
|
||||||
(err == nil && preemptedPod.DeletionTimestamp != nil)
|
(err == nil && preemptedPod.DeletionTimestamp != nil)
|
||||||
framework.ExpectEqual(podDeleted, true)
|
framework.ExpectEqual(podDeleted, true)
|
||||||
// Other pods (mid priority ones) should be present.
|
// Other pods (mid priority ones) should be present.
|
||||||
@ -198,7 +198,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
// Clean-up the critical pod
|
// Clean-up the critical pod
|
||||||
// Always run cleanup to make sure the pod is properly cleaned up.
|
// Always run cleanup to make sure the pod is properly cleaned up.
|
||||||
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0))
|
err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0))
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err)
|
framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -212,7 +212,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
})
|
})
|
||||||
// Make sure that the lowest priority pod is deleted.
|
// Make sure that the lowest priority pod is deleted.
|
||||||
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
|
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{})
|
||||||
podDeleted := (err != nil && errors.IsNotFound(err)) ||
|
podDeleted := (err != nil && apierrors.IsNotFound(err)) ||
|
||||||
(err == nil && preemptedPod.DeletionTimestamp != nil)
|
(err == nil && preemptedPod.DeletionTimestamp != nil)
|
||||||
framework.ExpectEqual(podDeleted, true)
|
framework.ExpectEqual(podDeleted, true)
|
||||||
// Other pods (mid priority ones) should be present.
|
// Other pods (mid priority ones) should be present.
|
||||||
@ -301,9 +301,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
|||||||
_, err := cs.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal})
|
_, err := cs.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err)
|
framework.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err)
|
||||||
framework.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err)
|
framework.Logf("Reason: %v. Msg: %v", apierrors.ReasonForError(err), err)
|
||||||
}
|
}
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
_ "github.com/stretchr/testify/assert"
|
_ "github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
@ -84,7 +84,7 @@ func addOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods
|
|||||||
node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData)
|
node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData)
|
||||||
_, err = c.CoreV1().Nodes().Update(node)
|
_, err = c.CoreV1().Nodes().Update(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !apierrs.IsConflict(err) {
|
if !apierrors.IsConflict(err) {
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("Conflict when trying to add/update avoidPods %v to %v with error %v", avoidPods, nodeName, err)
|
framework.Logf("Conflict when trying to add/update avoidPods %v to %v with error %v", avoidPods, nodeName, err)
|
||||||
@ -113,7 +113,7 @@ func removeAvoidPodsOffNode(c clientset.Interface, nodeName string) {
|
|||||||
delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey)
|
delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey)
|
||||||
_, err = c.CoreV1().Nodes().Update(node)
|
_, err = c.CoreV1().Nodes().Update(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !apierrs.IsConflict(err) {
|
if !apierrors.IsConflict(err) {
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("Conflict when trying to remove avoidPods to %v", nodeName)
|
framework.Logf("Conflict when trying to remove avoidPods to %v", nodeName)
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
|
settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
@ -73,7 +73,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, err := createPodPreset(f.ClientSet, f.Namespace.Name, pip)
|
_, err := createPodPreset(f.ClientSet, f.Namespace.Name, pip)
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled")
|
framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled")
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -191,7 +191,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, err := createPodPreset(f.ClientSet, f.Namespace.Name, pip)
|
_, err := createPodPreset(f.ClientSet, f.Namespace.Name, pip)
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled")
|
framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled")
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
@ -277,7 +277,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
|
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
|
||||||
_, err = m.cs.StorageV1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{})
|
_, err = m.cs.StorageV1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
if !test.disableAttach {
|
if !test.disableAttach {
|
||||||
framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
|
framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
|
||||||
}
|
}
|
||||||
@ -618,7 +618,7 @@ func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Inte
|
|||||||
|
|
||||||
waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) {
|
waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) {
|
||||||
csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{})
|
csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{})
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
attachLimit = getVolumeLimitFromCSINode(csiNode, driverName)
|
attachLimit = getVolumeLimitFromCSINode(csiNode, driverName)
|
||||||
@ -809,7 +809,7 @@ func waitForCSIDriver(cs clientset.Interface, driverName string) error {
|
|||||||
framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
|
framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
|
||||||
_, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
|
_, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{})
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -492,7 +492,7 @@ func waitForCSIDriverRegistrationOnNode(nodeName string, driverName string, cs c
|
|||||||
|
|
||||||
return wait.PollImmediate(10*time.Second, csiNodeRegisterTimeout, func() (bool, error) {
|
return wait.PollImmediate(10*time.Second, csiNodeRegisterTimeout, func() (bool, error) {
|
||||||
csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{})
|
csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{})
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
for _, driver := range csiNode.Spec.Drivers {
|
for _, driver := range csiNode.Spec.Drivers {
|
||||||
|
@ -46,7 +46,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -319,7 +319,7 @@ func (v *glusterVolume) DeleteVolume() {
|
|||||||
framework.Logf("Deleting Gluster endpoints %q...", name)
|
framework.Logf("Deleting Gluster endpoints %q...", name)
|
||||||
err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil)
|
err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("Gluster delete endpoints failed: %v", err)
|
framework.Failf("Gluster delete endpoints failed: %v", err)
|
||||||
}
|
}
|
||||||
framework.Logf("Gluster endpoints %q not found, assuming deleted", name)
|
framework.Logf("Gluster endpoints %q not found, assuming deleted", name)
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
|
|||||||
testFlexVolume(driverInstallAs, config, f)
|
testFlexVolume(driverInstallAs, config, f)
|
||||||
|
|
||||||
ginkgo.By("waiting for flex client pod to terminate")
|
ginkgo.By("waiting for flex client pod to terminate")
|
||||||
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrors.IsNotFound(err) {
|
||||||
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,7 +213,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
|
|||||||
testFlexVolume(driverInstallAs, config, f)
|
testFlexVolume(driverInstallAs, config, f)
|
||||||
|
|
||||||
ginkgo.By("waiting for flex client pod to terminate")
|
ginkgo.By("waiting for flex client pod to terminate")
|
||||||
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrors.IsNotFound(err) {
|
||||||
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,8 +22,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/pkg/util/slice"
|
"k8s.io/kubernetes/pkg/util/slice"
|
||||||
@ -41,7 +41,7 @@ func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcNa
|
|||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
||||||
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
|
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
|
framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -30,11 +30,10 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
apierrors "k8s.io/apimachinery/pkg/util/errors"
|
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -317,7 +316,7 @@ func (r *VolumeResource) CleanupResource() error {
|
|||||||
cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume"))
|
cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return apierrors.NewAggregate(cleanUpErrs)
|
return utilerrors.NewAggregate(cleanUpErrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPVCPV(
|
func createPVCPV(
|
||||||
@ -409,7 +408,7 @@ func isDelayedBinding(sc *storagev1.StorageClass) bool {
|
|||||||
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
|
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
|
||||||
func deleteStorageClass(cs clientset.Interface, className string) error {
|
func deleteStorageClass(cs clientset.Interface, className string) error {
|
||||||
err := cs.StorageV1().StorageClasses().Delete(className, nil)
|
err := cs.StorageV1().StorageClasses().Delete(className, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -370,7 +370,7 @@ func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
|
|||||||
// Pod was created, feature supported.
|
// Pod was created, feature supported.
|
||||||
StopPod(c, pod)
|
StopPod(c, pod)
|
||||||
return true, nil
|
return true, nil
|
||||||
case errors.IsInvalid(err):
|
case apierrors.IsInvalid(err):
|
||||||
// "Invalid" because it uses a feature that isn't supported.
|
// "Invalid" because it uses a feature that isn't supported.
|
||||||
return false, nil
|
return false, nil
|
||||||
default:
|
default:
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
@ -247,7 +247,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
|||||||
_, err = client.StorageV1().StorageClasses().Create(class)
|
_, err = client.StorageV1().StorageClasses().Create(class)
|
||||||
// The "should provision storage with snapshot data source" test already has created the class.
|
// The "should provision storage with snapshot data source" test already has created the class.
|
||||||
// TODO: make class creation optional and remove the IsAlreadyExists exception
|
// TODO: make class creation optional and remove the IsAlreadyExists exception
|
||||||
framework.ExpectEqual(err == nil || apierrs.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
|
class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -263,7 +263,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
|||||||
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
||||||
// typically this claim has already been deleted
|
// typically this claim has already been deleted
|
||||||
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
|
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -670,13 +670,13 @@ func prepareSnapshotDataSourceForProvisioning(
|
|||||||
cleanupFunc := func() {
|
cleanupFunc := func() {
|
||||||
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
||||||
err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Delete(snapshot.GetName(), nil)
|
err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Delete(snapshot.GetName(), nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("Error deleting snapshot %q. Error: %v", snapshot.GetName(), err)
|
framework.Failf("Error deleting snapshot %q. Error: %v", snapshot.GetName(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name)
|
framework.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name)
|
||||||
err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(updatedClaim.Name, nil)
|
err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(updatedClaim.Name, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
|
framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -718,7 +718,7 @@ func preparePVCDataSourceForProvisioning(
|
|||||||
cleanupFunc := func() {
|
cleanupFunc := func() {
|
||||||
framework.Logf("deleting source PVC %q/%q", sourcePVC.Namespace, sourcePVC.Name)
|
framework.Logf("deleting source PVC %q/%q", sourcePVC.Namespace, sourcePVC.Name)
|
||||||
err = client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(sourcePVC.Name, nil)
|
err = client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(sourcePVC.Name, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("Error deleting source PVC %q. Error: %v", sourcePVC.Name, err)
|
framework.Failf("Error deleting source PVC %q. Error: %v", sourcePVC.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
@ -143,7 +143,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
|||||||
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
||||||
// typically this claim has already been deleted
|
// typically this claim has already been deleted
|
||||||
err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
|
err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
|
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -182,7 +182,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
|||||||
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
||||||
// typically this snapshot has already been deleted
|
// typically this snapshot has already been deleted
|
||||||
err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
|
err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err)
|
framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -246,7 +246,7 @@ func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulabl
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
existing++
|
existing++
|
||||||
} else {
|
} else {
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
pvNames.Delete(pvName)
|
pvNames.Delete(pvName)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("Failed to get PV %s: %s", pvName, err)
|
framework.Logf("Failed to get PV %s: %s", pvName, err)
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -155,7 +155,7 @@ func CreateItems(f *framework.Framework, items ...interface{}) (func(), error) {
|
|||||||
// command line flags, because they would also start to apply
|
// command line flags, because they would also start to apply
|
||||||
// to non-namespaced items.
|
// to non-namespaced items.
|
||||||
for _, destructor := range destructors {
|
for _, destructor := range destructors {
|
||||||
if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
|
if err := destructor(); err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Logf("deleting failed: %s", err)
|
framework.Logf("deleting failed: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -581,7 +581,7 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
|
|||||||
roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
|
roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
|
||||||
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
|
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
|
||||||
_, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
|
_, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
|
||||||
return apierrs.IsNotFound(err), nil
|
return apierrors.IsNotFound(err), nil
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
|
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ import (
|
|||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -810,7 +810,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
defer func() {
|
defer func() {
|
||||||
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
||||||
err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
|
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -1039,7 +1039,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*
|
|||||||
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
|
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
|
||||||
func deleteStorageClass(c clientset.Interface, className string) {
|
func deleteStorageClass(c clientset.Interface, className string) {
|
||||||
err := c.StorageV1().StorageClasses().Delete(className, nil)
|
err := c.StorageV1().StorageClasses().Delete(className, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -246,7 +246,7 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu
|
|||||||
ginkgo.By("delete pvc")
|
ginkgo.By("delete pvc")
|
||||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||||
_, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
_, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
||||||
if !apierrs.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -112,7 +112,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
|
|||||||
for _, sspod := range ssPodsBeforeScaleDown.Items {
|
for _, sspod := range ssPodsBeforeScaleDown.Items {
|
||||||
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
|
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.ExpectEqual(apierrs.IsNotFound(err), true)
|
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||||
for _, volumespec := range sspod.Spec.Volumes {
|
for _, volumespec := range sspod.Spec.Volumes {
|
||||||
if volumespec.PersistentVolumeClaim != nil {
|
if volumespec.PersistentVolumeClaim != nil {
|
||||||
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||||
@ -60,7 +60,7 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
|
|||||||
|
|
||||||
ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
|
ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
|
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
@ -159,7 +159,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.
|
|||||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) {
|
_, err = watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) {
|
||||||
switch e.Type {
|
switch e.Type {
|
||||||
case watch.Deleted:
|
case watch.Deleted:
|
||||||
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
|
return false, apierrors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
|
||||||
}
|
}
|
||||||
switch t := e.Object.(type) {
|
switch t := e.Object.(type) {
|
||||||
case *v1.Pod:
|
case *v1.Pod:
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
||||||
@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
|
|||||||
})
|
})
|
||||||
|
|
||||||
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority)
|
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority)
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err)
|
||||||
|
|
||||||
// Create pods, starting with non-critical so that the critical preempts the other pods.
|
// Create pods, starting with non-critical so that the critical preempts the other pods.
|
||||||
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
|
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
|||||||
})
|
})
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
|||||||
})
|
})
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
|
|||||||
})
|
})
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
@ -177,7 +177,7 @@ func deleteStaticPod(dir, name, namespace string) error {
|
|||||||
|
|
||||||
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error {
|
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error {
|
||||||
_, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
_, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return goerrors.New("pod not disappear")
|
return goerrors.New("pod not disappear")
|
||||||
|
@ -40,7 +40,7 @@ import (
|
|||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -678,7 +678,7 @@ func testResourceDelete(c *testContext) {
|
|||||||
// wait for the item to be gone
|
// wait for the item to be gone
|
||||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||||
obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{})
|
obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -753,7 +753,7 @@ func testResourceDelete(c *testContext) {
|
|||||||
// wait for the item to be gone
|
// wait for the item to be gone
|
||||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||||
obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{})
|
obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -801,7 +801,7 @@ func testResourceDeletecollection(c *testContext) {
|
|||||||
// wait for the item to be gone
|
// wait for the item to be gone
|
||||||
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
|
||||||
obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{})
|
obj, err := c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -943,7 +943,7 @@ func testNamespaceDelete(c *testContext) {
|
|||||||
}
|
}
|
||||||
// verify namespace is gone
|
// verify namespace is gone
|
||||||
obj, err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{})
|
obj, err = c.client.Resource(c.gvr).Namespace(obj.GetNamespace()).Get(obj.GetName(), metav1.GetOptions{})
|
||||||
if err == nil || !errors.IsNotFound(err) {
|
if err == nil || !apierrors.IsNotFound(err) {
|
||||||
c.t.Errorf("expected namespace to be gone, got %#v, %v", obj, err)
|
c.t.Errorf("expected namespace to be gone, got %#v, %v", obj, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1048,7 +1048,7 @@ func testPodBindingEviction(c *testContext) {
|
|||||||
forceDelete := &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}
|
forceDelete := &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}
|
||||||
defer func() {
|
defer func() {
|
||||||
err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(pod.GetName(), forceDelete)
|
err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(pod.GetName(), forceDelete)
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
c.t.Error(err)
|
c.t.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1414,7 +1414,7 @@ func createOrGetResource(client dynamic.Interface, gvr schema.GroupVersionResour
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return obj, nil
|
return obj, nil
|
||||||
}
|
}
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return client.Resource(gvr).Namespace(ns).Create(stubObj, metav1.CreateOptions{})
|
return client.Resource(gvr).Namespace(ns).Create(stubObj, metav1.CreateOptions{})
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||||
@ -108,7 +108,7 @@ spec:
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
|
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
|
||||||
}
|
}
|
||||||
status, ok := err.(*errors.StatusError)
|
status, ok := err.(*apierrors.StatusError)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Expecting to get conflicts as API error")
|
t.Fatalf("Expecting to get conflicts as API error")
|
||||||
}
|
}
|
||||||
@ -299,7 +299,7 @@ spec:
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
|
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
|
||||||
}
|
}
|
||||||
status, ok := err.(*errors.StatusError)
|
status, ok := err.(*apierrors.StatusError)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Expecting to get conflicts as API error")
|
t.Fatalf("Expecting to get conflicts as API error")
|
||||||
}
|
}
|
||||||
@ -339,7 +339,7 @@ spec:
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result)
|
t.Fatalf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result)
|
||||||
}
|
}
|
||||||
status, ok = err.(*errors.StatusError)
|
status, ok = err.(*apierrors.StatusError)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Expecting to get conflicts as API error")
|
t.Fatalf("Expecting to get conflicts as API error")
|
||||||
}
|
}
|
||||||
@ -504,7 +504,7 @@ spec:
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
|
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
|
||||||
}
|
}
|
||||||
status, ok := err.(*errors.StatusError)
|
status, ok := err.(*apierrors.StatusError)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Expecting to get conflicts as API error")
|
t.Fatalf("Expecting to get conflicts as API error")
|
||||||
}
|
}
|
||||||
@ -698,7 +698,7 @@ spec:
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
|
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
|
||||||
}
|
}
|
||||||
status, ok := err.(*errors.StatusError)
|
status, ok := err.(*apierrors.StatusError)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Expecting to get conflicts as API error")
|
t.Fatalf("Expecting to get conflicts as API error")
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -276,7 +276,7 @@ func TestCreateOnApplyFailsWithUID(t *testing.T) {
|
|||||||
}`)).
|
}`)).
|
||||||
Do().
|
Do().
|
||||||
Get()
|
Get()
|
||||||
if !errors.IsConflict(err) {
|
if !apierrors.IsConflict(err) {
|
||||||
t.Fatalf("Expected conflict error but got: %v", err)
|
t.Fatalf("Expected conflict error but got: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -348,7 +348,7 @@ func TestApplyUpdateApplyConflictForced(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expecting to get conflicts when applying object")
|
t.Fatalf("Expecting to get conflicts when applying object")
|
||||||
}
|
}
|
||||||
status, ok := err.(*errors.StatusError)
|
status, ok := err.(*apierrors.StatusError)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Expecting to get conflicts as API error")
|
t.Fatalf("Expecting to get conflicts as API error")
|
||||||
}
|
}
|
||||||
@ -849,7 +849,7 @@ func TestApplyFailsWithVersionMismatch(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expecting to get version mismatch when applying object")
|
t.Fatalf("Expecting to get version mismatch when applying object")
|
||||||
}
|
}
|
||||||
status, ok := err.(*errors.StatusError)
|
status, ok := err.(*apierrors.StatusError)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Expecting to get version mismatch as API error")
|
t.Fatalf("Expecting to get version mismatch as API error")
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apiserver/pkg/server/dynamiccertificates"
|
"k8s.io/apiserver/pkg/server/dynamiccertificates"
|
||||||
@ -146,7 +146,7 @@ MnVCuBwfwDXCAiEAw/1TA+CjPq9JC5ek1ifR0FybTURjeQqYkKpve1dveps=
|
|||||||
func waitForConfigMapCAContent(t *testing.T, kubeClient kubernetes.Interface, key, content string, count int) func() (bool, error) {
|
func waitForConfigMapCAContent(t *testing.T, kubeClient kubernetes.Interface, key, content string, count int) func() (bool, error) {
|
||||||
return func() (bool, error) {
|
return func() (bool, error) {
|
||||||
clusterAuthInfo, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get("extension-apiserver-authentication", metav1.GetOptions{})
|
clusterAuthInfo, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get("extension-apiserver-authentication", metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||||
@ -60,7 +60,7 @@ func TestMaxJSONPatchOperations(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("unexpected no error")
|
t.Fatalf("unexpected no error")
|
||||||
}
|
}
|
||||||
if !errors.IsRequestEntityTooLargeError(err) {
|
if !apierrors.IsRequestEntityTooLargeError(err) {
|
||||||
t.Errorf("expected requested entity too large err, got %v", err)
|
t.Errorf("expected requested entity too large err, got %v", err)
|
||||||
}
|
}
|
||||||
if !strings.Contains(err.Error(), "The allowed maximum operations in a JSON patch is") {
|
if !strings.Contains(err.Error(), "The allowed maximum operations in a JSON patch is") {
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -46,7 +45,7 @@ func TestMaxResourceSize(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("unexpected no error")
|
t.Fatalf("unexpected no error")
|
||||||
}
|
}
|
||||||
if !errors.IsRequestEntityTooLargeError(err) {
|
if !apierrors.IsRequestEntityTooLargeError(err) {
|
||||||
t.Errorf("expected requested entity too large err, got %v", err)
|
t.Errorf("expected requested entity too large err, got %v", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -69,7 +68,7 @@ func TestMaxResourceSize(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("unexpected no error")
|
t.Fatalf("unexpected no error")
|
||||||
}
|
}
|
||||||
if !errors.IsRequestEntityTooLargeError(err) {
|
if !apierrors.IsRequestEntityTooLargeError(err) {
|
||||||
t.Errorf("expected requested entity too large err, got %v", err)
|
t.Errorf("expected requested entity too large err, got %v", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -80,7 +79,7 @@ func TestMaxResourceSize(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("unexpected no error")
|
t.Fatalf("unexpected no error")
|
||||||
}
|
}
|
||||||
if !errors.IsRequestEntityTooLargeError(err) {
|
if !apierrors.IsRequestEntityTooLargeError(err) {
|
||||||
t.Errorf("expected requested entity too large err, got %v", err)
|
t.Errorf("expected requested entity too large err, got %v", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -89,7 +88,7 @@ func TestMaxResourceSize(t *testing.T) {
|
|||||||
patchBody := []byte(`[{"op":"add","path":"/foo","value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}]`)
|
patchBody := []byte(`[{"op":"add","path":"/foo","value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}]`)
|
||||||
err = rest.Patch(types.JSONPatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
err = rest.Patch(types.JSONPatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||||
Body(patchBody).Do().Error()
|
Body(patchBody).Do().Error()
|
||||||
if err != nil && !errors.IsBadRequest(err) {
|
if err != nil && !apierrors.IsBadRequest(err) {
|
||||||
t.Errorf("expected success or bad request err, got %v", err)
|
t.Errorf("expected success or bad request err, got %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -105,7 +104,7 @@ func TestMaxResourceSize(t *testing.T) {
|
|||||||
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
|
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
|
||||||
err = rest.Patch(types.MergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
err = rest.Patch(types.MergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||||
Body(patchBody).Do().Error()
|
Body(patchBody).Do().Error()
|
||||||
if err != nil && !errors.IsBadRequest(err) {
|
if err != nil && !apierrors.IsBadRequest(err) {
|
||||||
t.Errorf("expected success or bad request err, got %v", err)
|
t.Errorf("expected success or bad request err, got %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -121,7 +120,7 @@ func TestMaxResourceSize(t *testing.T) {
|
|||||||
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
|
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
|
||||||
err = rest.Patch(types.StrategicMergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
err = rest.Patch(types.StrategicMergePatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||||
Body(patchBody).Do().Error()
|
Body(patchBody).Do().Error()
|
||||||
if err != nil && !errors.IsBadRequest(err) {
|
if err != nil && !apierrors.IsBadRequest(err) {
|
||||||
t.Errorf("expected success or bad request err, got %v", err)
|
t.Errorf("expected success or bad request err, got %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -137,7 +136,7 @@ func TestMaxResourceSize(t *testing.T) {
|
|||||||
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
|
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
|
||||||
err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
|
||||||
Body(patchBody).Do().Error()
|
Body(patchBody).Do().Error()
|
||||||
if err != nil && !errors.IsBadRequest(err) {
|
if err != nil && !apierrors.IsBadRequest(err) {
|
||||||
t.Errorf("expected success or bad request err, got %#v", err)
|
t.Errorf("expected success or bad request err, got %#v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -155,7 +154,7 @@ func TestMaxResourceSize(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("unexpected no error")
|
t.Fatalf("unexpected no error")
|
||||||
}
|
}
|
||||||
if !errors.IsRequestEntityTooLargeError(err) {
|
if !apierrors.IsRequestEntityTooLargeError(err) {
|
||||||
t.Errorf("expected requested entity too large err, got %v", err)
|
t.Errorf("expected requested entity too large err, got %v", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -86,7 +86,7 @@ func TestPatchConflicts(t *testing.T) {
|
|||||||
Do().
|
Do().
|
||||||
Get()
|
Get()
|
||||||
|
|
||||||
if errors.IsConflict(err) {
|
if apierrors.IsConflict(err) {
|
||||||
t.Logf("tolerated conflict error patching %s: %v", "secrets", err)
|
t.Logf("tolerated conflict error patching %s: %v", "secrets", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
policy "k8s.io/api/policy/v1beta1"
|
policy "k8s.io/api/policy/v1beta1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -638,14 +638,14 @@ func expect(t *testing.T, f func() error, wantErr func(error) bool) (timeout boo
|
|||||||
|
|
||||||
func expectForbidden(t *testing.T, f func() error) {
|
func expectForbidden(t *testing.T, f func() error) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
if ok, err := expect(t, f, errors.IsForbidden); !ok {
|
if ok, err := expect(t, f, apierrors.IsForbidden); !ok {
|
||||||
t.Errorf("Expected forbidden error, got %v", err)
|
t.Errorf("Expected forbidden error, got %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func expectNotFound(t *testing.T, f func() error) {
|
func expectNotFound(t *testing.T, f func() error) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
if ok, err := expect(t, f, errors.IsNotFound); !ok {
|
if ok, err := expect(t, f, apierrors.IsNotFound); !ok {
|
||||||
t.Errorf("Expected notfound error, got %v", err)
|
t.Errorf("Expected notfound error, got %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
@ -310,7 +310,7 @@ func validateDaemonSetPodsAndMarkReady(
|
|||||||
func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||||
return func() (bool, error) {
|
return func() (bool, error) {
|
||||||
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -55,7 +55,7 @@ func DryRunCreateTest(t *testing.T, rsc dynamic.ResourceInterface, obj *unstruct
|
|||||||
obj.GroupVersionKind())
|
obj.GroupVersionKind())
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := rsc.Get(obj.GetName(), metav1.GetOptions{}); !errors.IsNotFound(err) {
|
if _, err := rsc.Get(obj.GetName(), metav1.GetOptions{}); !apierrors.IsNotFound(err) {
|
||||||
t.Fatalf("object shouldn't exist: %v", err)
|
t.Fatalf("object shouldn't exist: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -92,7 +92,7 @@ func getReplicasOrFail(t *testing.T, obj *unstructured.Unstructured) int64 {
|
|||||||
|
|
||||||
func DryRunScalePatchTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
func DryRunScalePatchTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||||
obj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
obj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -119,7 +119,7 @@ func DryRunScalePatchTest(t *testing.T, rsc dynamic.ResourceInterface, name stri
|
|||||||
|
|
||||||
func DryRunScaleUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
func DryRunScaleUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string) {
|
||||||
obj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
obj, err := rsc.Get(name, metav1.GetOptions{}, "scale")
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -156,7 +156,7 @@ func DryRunUpdateTest(t *testing.T, rsc dynamic.ResourceInterface, name string)
|
|||||||
}
|
}
|
||||||
obj.SetAnnotations(map[string]string{"update": "true"})
|
obj.SetAnnotations(map[string]string{"update": "true"})
|
||||||
obj, err = rsc.Update(obj, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
|
obj, err = rsc.Update(obj, metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}})
|
||||||
if err == nil || !errors.IsConflict(err) {
|
if err == nil || !apierrors.IsConflict(err) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/api/policy/v1beta1"
|
"k8s.io/api/policy/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
@ -114,9 +114,9 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
|||||||
err := wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) {
|
err := wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction)
|
e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction)
|
||||||
switch {
|
switch {
|
||||||
case errors.IsTooManyRequests(e):
|
case apierrors.IsTooManyRequests(e):
|
||||||
return false, nil
|
return false, nil
|
||||||
case errors.IsConflict(e):
|
case apierrors.IsConflict(e):
|
||||||
return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e)
|
return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e)
|
||||||
case e == nil:
|
case e == nil:
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -132,7 +132,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
|||||||
|
|
||||||
_, err = clientSet.CoreV1().Pods(ns.Name).Get(podName, metav1.GetOptions{})
|
_, err = clientSet.CoreV1().Pods(ns.Name).Get(podName, metav1.GetOptions{})
|
||||||
switch {
|
switch {
|
||||||
case errors.IsNotFound(err):
|
case apierrors.IsNotFound(err):
|
||||||
atomic.AddUint32(&numberPodsEvicted, 1)
|
atomic.AddUint32(&numberPodsEvicted, 1)
|
||||||
// pod was evicted and deleted so return from goroutine immediately
|
// pod was evicted and deleted so return from goroutine immediately
|
||||||
return
|
return
|
||||||
@ -222,9 +222,9 @@ func TestTerminalPodEviction(t *testing.T) {
|
|||||||
err = wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) {
|
err = wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction)
|
e := clientSet.PolicyV1beta1().Evictions(ns.Name).Evict(eviction)
|
||||||
switch {
|
switch {
|
||||||
case errors.IsTooManyRequests(e):
|
case apierrors.IsTooManyRequests(e):
|
||||||
return false, nil
|
return false, nil
|
||||||
case errors.IsConflict(e):
|
case apierrors.IsConflict(e):
|
||||||
return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e)
|
return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e)
|
||||||
case e == nil:
|
case e == nil:
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -119,7 +119,7 @@ func TestClusterScopedOwners(t *testing.T) {
|
|||||||
if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) {
|
if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) {
|
||||||
_, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-missing", metav1.GetOptions{})
|
_, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-missing", metav1.GetOptions{})
|
||||||
switch {
|
switch {
|
||||||
case errors.IsNotFound(err):
|
case apierrors.IsNotFound(err):
|
||||||
return true, nil
|
return true, nil
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
@ -698,7 +698,7 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) {
|
|||||||
if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
|
if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
|
||||||
_, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{})
|
_, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
@ -766,7 +766,7 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) {
|
|||||||
if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
|
if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
|
||||||
_, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{})
|
_, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
@ -843,7 +843,7 @@ func TestDoubleDeletionWithFinalizer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if err := wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) {
|
if err := wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) {
|
||||||
_, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
_, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||||
return errors.IsNotFound(err), nil
|
return apierrors.IsNotFound(err), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("Failed waiting for pod %q to be deleted", pod.Name)
|
t.Fatalf("Failed waiting for pod %q to be deleted", pod.Name)
|
||||||
}
|
}
|
||||||
@ -950,7 +950,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) {
|
|||||||
// Ensure the owner is deleted.
|
// Ensure the owner is deleted.
|
||||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
_, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{})
|
_, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{})
|
||||||
return errors.IsNotFound(err), nil
|
return apierrors.IsNotFound(err), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("failed waiting for owner resource %q to be deleted", owner.GetName())
|
t.Fatalf("failed waiting for owner resource %q to be deleted", owner.GetName())
|
||||||
}
|
}
|
||||||
@ -960,7 +960,7 @@ func TestCustomResourceCascadingDeletion(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("expected dependent %q to be deleted", dependent.GetName())
|
t.Fatalf("expected dependent %q to be deleted", dependent.GetName())
|
||||||
} else {
|
} else {
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
t.Fatalf("unexpected error getting dependent %q: %v", dependent.GetName(), err)
|
t.Fatalf("unexpected error getting dependent %q: %v", dependent.GetName(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1028,7 +1028,7 @@ func TestMixedRelationships(t *testing.T) {
|
|||||||
// Ensure the owner is deleted.
|
// Ensure the owner is deleted.
|
||||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
_, err := resourceClient.Get(customOwner.GetName(), metav1.GetOptions{})
|
_, err := resourceClient.Get(customOwner.GetName(), metav1.GetOptions{})
|
||||||
return errors.IsNotFound(err), nil
|
return apierrors.IsNotFound(err), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("failed waiting for owner resource %q to be deleted", customOwner.GetName())
|
t.Fatalf("failed waiting for owner resource %q to be deleted", customOwner.GetName())
|
||||||
}
|
}
|
||||||
@ -1038,7 +1038,7 @@ func TestMixedRelationships(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("expected dependent %q to be deleted", coreDependent.GetName())
|
t.Fatalf("expected dependent %q to be deleted", coreDependent.GetName())
|
||||||
} else {
|
} else {
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
t.Fatalf("unexpected error getting dependent %q: %v", coreDependent.GetName(), err)
|
t.Fatalf("unexpected error getting dependent %q: %v", coreDependent.GetName(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1052,7 +1052,7 @@ func TestMixedRelationships(t *testing.T) {
|
|||||||
// Ensure the owner is deleted.
|
// Ensure the owner is deleted.
|
||||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
_, err := configMapClient.Get(coreOwner.GetName(), metav1.GetOptions{})
|
_, err := configMapClient.Get(coreOwner.GetName(), metav1.GetOptions{})
|
||||||
return errors.IsNotFound(err), nil
|
return apierrors.IsNotFound(err), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("failed waiting for owner resource %q to be deleted", coreOwner.GetName())
|
t.Fatalf("failed waiting for owner resource %q to be deleted", coreOwner.GetName())
|
||||||
}
|
}
|
||||||
@ -1062,7 +1062,7 @@ func TestMixedRelationships(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("expected dependent %q to be deleted", customDependent.GetName())
|
t.Fatalf("expected dependent %q to be deleted", customDependent.GetName())
|
||||||
} else {
|
} else {
|
||||||
if !errors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
t.Fatalf("unexpected error getting dependent %q: %v", customDependent.GetName(), err)
|
t.Fatalf("unexpected error getting dependent %q: %v", customDependent.GetName(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1123,7 +1123,7 @@ func testCRDDeletion(t *testing.T, ctx *testContext, ns *v1.Namespace, definitio
|
|||||||
// Ensure the owner is deleted.
|
// Ensure the owner is deleted.
|
||||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
_, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{})
|
_, err := resourceClient.Get(owner.GetName(), metav1.GetOptions{})
|
||||||
return errors.IsNotFound(err), nil
|
return apierrors.IsNotFound(err), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("failed waiting for owner %q to be deleted", owner.GetName())
|
t.Fatalf("failed waiting for owner %q to be deleted", owner.GetName())
|
||||||
}
|
}
|
||||||
@ -1131,7 +1131,7 @@ func testCRDDeletion(t *testing.T, ctx *testContext, ns *v1.Namespace, definitio
|
|||||||
// Ensure the dependent is deleted.
|
// Ensure the dependent is deleted.
|
||||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
_, err := configMapClient.Get(dependent.GetName(), metav1.GetOptions{})
|
_, err := configMapClient.Get(dependent.GetName(), metav1.GetOptions{})
|
||||||
return errors.IsNotFound(err), nil
|
return apierrors.IsNotFound(err), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("failed waiting for dependent %q (owned by %q) to be deleted", dependent.GetName(), owner.GetName())
|
t.Fatalf("failed waiting for dependent %q (owned by %q) to be deleted", dependent.GetName(), owner.GetName())
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user