refactor: Move *Options references to metav1

This commit is contained in:
Clayton Coleman 2017-01-24 10:38:21 -05:00
parent 7b1c715496
commit be6d2933df
No known key found for this signature in database
GPG Key ID: 3D16906B4F1C5CB3
92 changed files with 240 additions and 244 deletions

View File

@ -50,7 +50,7 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&APIServiceList{},
&kapi.ListOptions{},
&kapi.DeleteOptions{},
&metav1.DeleteOptions{},
&metav1.GetOptions{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)

View File

@ -295,7 +295,7 @@ func createDummyDeployment(client *clientset.Clientset) {
fmt.Println("[apiclient] Test deployment succeeded")
// TODO: In the future, make sure the ReplicaSet and Pod are garbage collected
if err := client.Extensions().Deployments(metav1.NamespaceSystem).Delete("dummy", &v1.DeleteOptions{}); err != nil {
if err := client.Extensions().Deployments(metav1.NamespaceSystem).Delete("dummy", &metav1.DeleteOptions{}); err != nil {
fmt.Printf("[apiclient] Failed to delete test deployment [%v] (will ignore)\n", err)
}
}

View File

@ -19,10 +19,11 @@ package coredns
import (
"encoding/json"
"fmt"
"hash/fnv"
etcdc "github.com/coreos/etcd/client"
dnsmsg "github.com/miekg/coredns/middleware/etcd/msg"
"golang.org/x/net/context"
"hash/fnv"
"k8s.io/kubernetes/federation/pkg/dnsprovider"
)

View File

@ -18,9 +18,10 @@ limitations under the License.
package stubs
import (
"strings"
etcd "github.com/coreos/etcd/client"
"golang.org/x/net/context"
"strings"
)
// Compile time check for interface conformance

View File

@ -156,7 +156,7 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &apiv1.DeleteOptions{})
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &metav1.DeleteOptions{})
return err
})
return configmapcontroller

View File

@ -176,7 +176,7 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &apiv1.DeleteOptions{})
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &metav1.DeleteOptions{})
if err != nil {
glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {

View File

@ -197,7 +197,7 @@ func NewDeploymentController(federationClient fedclientset.Interface) *Deploymen
},
func(client kubeclientset.Interface, obj runtime.Object) error {
rs := obj.(*extensionsv1.Deployment)
err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &apiv1.DeleteOptions{})
err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{})
return err
})

View File

@ -252,7 +252,7 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
ingress := obj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Attempting to delete Ingress: %v", ingress)
err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &v1.DeleteOptions{})
err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{})
return err
})
@ -281,7 +281,7 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
configMap := obj.(*v1.ConfigMap)
configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
glog.Errorf("Internal error: Incorrectly attempting to delete ConfigMap: %q", configMapName)
err := client.Core().ConfigMaps(configMap.Namespace).Delete(configMap.Name, &v1.DeleteOptions{})
err := client.Core().ConfigMaps(configMap.Namespace).Delete(configMap.Name, &metav1.DeleteOptions{})
return err
})

View File

@ -157,7 +157,7 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
},
func(client kubeclientset.Interface, obj runtime.Object) error {
namespace := obj.(*apiv1.Namespace)
err := client.Core().Namespaces().Delete(namespace.Name, &apiv1.DeleteOptions{})
err := client.Core().Namespaces().Delete(namespace.Name, &metav1.DeleteOptions{})
// IsNotFound error is fine since that means the object is deleted already.
if errors.IsNotFound(err) {
return nil
@ -492,31 +492,31 @@ func (nc *NamespaceController) removeKubernetesFinalizer(namespace *apiv1.Namesp
// Right now there are just 7 types of objects: Deployments, DaemonSets, ReplicaSet, Secret, Ingress, Events and Service.
// Temporarily these items are simply deleted one by one to squeeze this code into 1.4.
// TODO: Make it generic (like in the regular namespace controller) and parallel.
err := nc.federatedApiClient.Core().Services(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{})
err := nc.federatedApiClient.Core().Services(namespace.Name).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete service list: %v", err)
}
err = nc.federatedApiClient.Extensions().ReplicaSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{})
err = nc.federatedApiClient.Extensions().ReplicaSets(namespace.Name).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete replicaset list from namespace: %v", err)
}
err = nc.federatedApiClient.Core().Secrets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{})
err = nc.federatedApiClient.Core().Secrets(namespace.Name).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete secret list from namespace: %v", err)
}
err = nc.federatedApiClient.Extensions().Ingresses(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{})
err = nc.federatedApiClient.Extensions().Ingresses(namespace.Name).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete ingresses list from namespace: %v", err)
}
err = nc.federatedApiClient.Extensions().DaemonSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{})
err = nc.federatedApiClient.Extensions().DaemonSets(namespace.Name).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete daemonsets list from namespace: %v", err)
}
err = nc.federatedApiClient.Extensions().Deployments(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{})
err = nc.federatedApiClient.Extensions().Deployments(namespace.Name).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete deployments list from namespace: %v", err)
}
err = nc.federatedApiClient.Core().Events(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, metav1.ListOptions{})
err = nc.federatedApiClient.Core().Events(namespace.Name).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete events list from namespace: %v", err)
}

View File

@ -201,7 +201,7 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
},
func(client kubeclientset.Interface, obj runtime.Object) error {
rs := obj.(*extensionsv1.ReplicaSet)
err := client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, &apiv1.DeleteOptions{})
err := client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{})
return err
})

View File

@ -161,7 +161,7 @@ func NewSecretController(client federationclientset.Interface) *SecretController
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*apiv1.Secret)
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &apiv1.DeleteOptions{})
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{})
return err
})

View File

@ -282,7 +282,7 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface,
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
svc := obj.(*v1.Service)
err := client.Core().Services(svc.Namespace).Delete(svc.Name, &v1.DeleteOptions{})
err := client.Core().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{})
return err
})

View File

@ -22,10 +22,10 @@ import (
"net/url"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
federationapi "k8s.io/kubernetes/federation/apis/federation"
"k8s.io/kubernetes/federation/pkg/kubefed/util"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
@ -139,7 +139,7 @@ func deleteSecret(hostFactory cmdutil.Factory, name, namespace string) error {
if err != nil {
return err
}
return clientset.Core().Secrets(namespace).Delete(name, &api.DeleteOptions{})
return clientset.Core().Secrets(namespace).Delete(name, &metav1.DeleteOptions{})
}
// isNotFound checks if the given error is a NotFound status error.

View File

@ -47,7 +47,7 @@ func OpaqueIntResourceName(name string) ResourceName {
// NewDeleteOptions returns a DeleteOptions indicating the resource should
// be deleted within the specified grace period. Use zero to indicate
// immediate deletion. If you would prefer to use the default grace period,
// use &api.DeleteOptions{} directly.
// use &metav1.DeleteOptions{} directly.
func NewDeleteOptions(grace int64) *DeleteOptions {
return &DeleteOptions{GracePeriodSeconds: &grace}
}

View File

@ -138,7 +138,7 @@ func (rc *ResourceClient) Get(name string) (*unstructured.Unstructured, error) {
}
// Delete deletes the resource with the specified name.
func (rc *ResourceClient) Delete(name string, opts *v1.DeleteOptions) error {
func (rc *ResourceClient) Delete(name string, opts *metav1.DeleteOptions) error {
return rc.cl.Delete().
NamespaceIfScoped(rc.ns, rc.resource.Namespaced).
Resource(rc.resource.Name).
@ -149,7 +149,7 @@ func (rc *ResourceClient) Delete(name string, opts *v1.DeleteOptions) error {
}
// DeleteCollection deletes a collection of objects.
func (rc *ResourceClient) DeleteCollection(deleteOptions *v1.DeleteOptions, listOptions runtime.Object) error {
func (rc *ResourceClient) DeleteCollection(deleteOptions *metav1.DeleteOptions, listOptions runtime.Object) error {
parameterEncoder := rc.parameterCodec
if parameterEncoder == nil {
parameterEncoder = defaultParameterEncoder

View File

@ -624,8 +624,8 @@ func (gc *GarbageCollector) deleteObject(item objectReference) error {
return err
}
uid := item.UID
preconditions := v1.Preconditions{UID: &uid}
deleteOptions := v1.DeleteOptions{Preconditions: &preconditions}
preconditions := metav1.Preconditions{UID: &uid}
deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions}
return client.Resource(resource, item.Namespace).Delete(item.Name, &deleteOptions)
}

View File

@ -178,7 +178,7 @@ func deleteCollection(
// resource deletions generically. it will ensure all resources in the namespace are purged prior to releasing
// namespace itself.
orphanDependents := false
err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, &metav1.ListOptions{})
err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(&metav1.DeleteOptions{OrphanDependents: &orphanDependents}, &metav1.ListOptions{})
if err == nil {
return true, nil
@ -406,10 +406,10 @@ func syncNamespace(
// if the namespace is already finalized, delete it
if finalized(namespace) {
var opts *v1.DeleteOptions
var opts *metav1.DeleteOptions
uid := namespace.UID
if len(uid) > 0 {
opts = &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &uid}}
opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}
}
err = kubeClient.Core().Namespaces().Delete(namespace.Name, opts)
if err != nil && !errors.IsNotFound(err) {

View File

@ -122,7 +122,7 @@ func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
func forcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error {
var zero int64
glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name)
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &zero})
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &zero})
if err == nil {
glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name)
}

View File

@ -160,7 +160,7 @@ func (m *FakeNodeHandler) List(opts metav1.ListOptions) (*v1.NodeList, error) {
}
// Delete delets a Node from the fake store.
func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
func (m *FakeNodeHandler) Delete(id string, opt *metav1.DeleteOptions) error {
m.lock.Lock()
defer func() {
m.RequestCount++
@ -174,7 +174,7 @@ func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
}
// DeleteCollection deletes a collection of Nodes from the fake store.
func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts metav1.ListOptions) error {
func (m *FakeNodeHandler) DeleteCollection(opt *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
return nil
}

View File

@ -67,7 +67,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInfor
terminatedPodThreshold: terminatedPodThreshold,
deletePod: func(namespace, name string) error {
glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name)
return kubeClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0))
return kubeClient.Core().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
},
}

View File

@ -351,9 +351,9 @@ func (e *TokensController) deleteTokens(serviceAccount *v1.ServiceAccount) ( /*r
}
func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry*/ bool, error) {
var opts *v1.DeleteOptions
var opts *metav1.DeleteOptions
if len(uid) > 0 {
opts = &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &uid}}
opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}
}
err := e.client.Core().Secrets(ns).Delete(name, opts)
// NotFound doesn't need a retry (it's already been deleted)
@ -438,7 +438,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
if _, err = serviceAccounts.Update(liveServiceAccount); err != nil {
// we weren't able to use the token, try to clean it up.
glog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
deleteOpts := &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &createdToken.UID}}
deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
if deleteErr := e.client.Core().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil {
glog.Error(deleteErr) // if we fail, just log it
}

View File

@ -338,7 +338,7 @@ type SimpleRESTStorage struct {
stream *SimpleStream
deleted string
deleteOptions *api.DeleteOptions
deleteOptions *metav1.DeleteOptions
actualNamespace string
namespacePresent bool
@ -439,7 +439,7 @@ func (storage *SimpleRESTStorage) checkContext(ctx request.Context) {
storage.actualNamespace, storage.namespacePresent = request.NamespaceFrom(ctx)
}
func (storage *SimpleRESTStorage) Delete(ctx request.Context, id string, options *api.DeleteOptions) (runtime.Object, error) {
func (storage *SimpleRESTStorage) Delete(ctx request.Context, id string, options *metav1.DeleteOptions) (runtime.Object, error) {
storage.checkContext(ctx)
storage.deleted = id
storage.deleteOptions = options
@ -1942,7 +1942,7 @@ func TestDeleteWithOptions(t *testing.T) {
defer server.Close()
grace := int64(300)
item := &api.DeleteOptions{
item := &metav1.DeleteOptions{
GracePeriodSeconds: &grace,
}
body, err := runtime.Encode(codec, item)
@ -1983,7 +1983,7 @@ func TestDeleteWithOptionsQuery(t *testing.T) {
defer server.Close()
grace := int64(300)
item := &api.DeleteOptions{
item := &metav1.DeleteOptions{
GracePeriodSeconds: &grace,
}
@ -2020,7 +2020,7 @@ func TestDeleteWithOptionsQueryAndBody(t *testing.T) {
defer server.Close()
grace := int64(300)
item := &api.DeleteOptions{
item := &metav1.DeleteOptions{
GracePeriodSeconds: &grace,
}
body, err := runtime.Encode(codec, item)
@ -2086,7 +2086,7 @@ func TestLegacyDeleteIgnoresOptions(t *testing.T) {
server := httptest.NewServer(handler)
defer server.Close()
item := api.NewDeleteOptions(300)
item := metav1.NewDeleteOptions(300)
body, err := runtime.Encode(codec, item)
if err != nil {
t.Fatalf("unexpected error: %v", err)

View File

@ -39,7 +39,6 @@ import (
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/genericapiserver/endpoints/handlers/responsewriters"
"k8s.io/kubernetes/pkg/genericapiserver/registry/rest"
"k8s.io/kubernetes/pkg/util"
@ -811,7 +810,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco
ctx := scope.ContextFunc(req)
ctx = request.WithNamespace(ctx, namespace)
options := &api.DeleteOptions{}
options := &metav1.DeleteOptions{}
if allowsOptions {
body, err := readBody(req.Request)
if err != nil {
@ -935,7 +934,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestSco
}
}
options := &api.DeleteOptions{}
options := &metav1.DeleteOptions{}
if checkBody {
body, err := readBody(req.Request)
if err != nil {

View File

@ -531,7 +531,7 @@ var (
// - options.OrphanDependents,
// - existing finalizers of the object
// - e.DeleteStrategy.DefaultGarbageCollectionPolicy
func shouldUpdateFinalizers(e *Store, accessor metav1.Object, options *api.DeleteOptions) (shouldUpdate bool, newFinalizers []string) {
func shouldUpdateFinalizers(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (shouldUpdate bool, newFinalizers []string) {
shouldOrphan := false
// Get default orphan policy from this REST object type
if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok {
@ -608,7 +608,7 @@ func markAsDeleting(obj runtime.Object) (err error) {
// should be deleted immediately
// 4. a new output object with the state that was updated
// 5. a copy of the last existing state of the object
func (e *Store) updateForGracefulDeletion(ctx genericapirequest.Context, name, key string, options *api.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
func (e *Store) updateForGracefulDeletion(ctx genericapirequest.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
lastGraceful := int64(0)
out = e.NewFunc()
err = e.Storage.GuaranteedUpdate(
@ -670,7 +670,7 @@ func (e *Store) updateForGracefulDeletion(ctx genericapirequest.Context, name, k
// should be deleted immediately
// 4. a new output object with the state that was updated
// 5. a copy of the last existing state of the object
func (e *Store) updateForGracefulDeletionAndFinalizers(ctx genericapirequest.Context, name, key string, options *api.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
func (e *Store) updateForGracefulDeletionAndFinalizers(ctx genericapirequest.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
lastGraceful := int64(0)
var pendingFinalizers bool
out = e.NewFunc()
@ -750,7 +750,7 @@ func (e *Store) updateForGracefulDeletionAndFinalizers(ctx genericapirequest.Con
}
// Delete removes the item from storage.
func (e *Store) Delete(ctx genericapirequest.Context, name string, options *api.DeleteOptions) (runtime.Object, error) {
func (e *Store) Delete(ctx genericapirequest.Context, name string, options *metav1.DeleteOptions) (runtime.Object, error) {
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, err
@ -762,7 +762,7 @@ func (e *Store) Delete(ctx genericapirequest.Context, name string, options *api.
}
// support older consumers of delete by treating "nil" as delete immediately
if options == nil {
options = api.NewDeleteOptions(0)
options = metav1.NewDeleteOptions(0)
}
var preconditions storage.Preconditions
if options.Preconditions != nil {
@ -831,7 +831,7 @@ func (e *Store) Delete(ctx genericapirequest.Context, name string, options *api.
// are removing all objects of a given type) with the current API (it's technically
// possibly with storage API, but watch is not delivered correctly then).
// It will be possible to fix it with v3 etcd API.
func (e *Store) DeleteCollection(ctx genericapirequest.Context, options *api.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {
func (e *Store) DeleteCollection(ctx genericapirequest.Context, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {
listObj, err := e.List(ctx, listOptions)
if err != nil {
return nil, err

View File

@ -54,7 +54,7 @@ type testGracefulStrategy struct {
testRESTStrategy
}
func (t testGracefulStrategy) CheckGracefulDelete(ctx genericapirequest.Context, obj runtime.Object, options *api.DeleteOptions) bool {
func (t testGracefulStrategy) CheckGracefulDelete(ctx genericapirequest.Context, obj runtime.Object, options *metav1.DeleteOptions) bool {
return true
}
@ -322,7 +322,7 @@ func TestStoreCreate(t *testing.T) {
}
// now delete pod with graceful period set
delOpts := &api.DeleteOptions{GracePeriodSeconds: &gracefulPeriod}
delOpts := &metav1.DeleteOptions{GracePeriodSeconds: &gracefulPeriod}
_, err = registry.Delete(testContext, podA.Name, delOpts)
if err != nil {
t.Fatalf("Failed to delete pod gracefully. Unexpected error: %v", err)
@ -646,7 +646,7 @@ func TestGracefulStoreCanDeleteIfExistingGracePeriodZero(t *testing.T) {
registry.DeleteStrategy = testGracefulStrategy{defaultDeleteStrategy}
defer destroyFunc()
graceful, gracefulPending, err := rest.BeforeDelete(registry.DeleteStrategy, testContext, pod, api.NewDeleteOptions(0))
graceful, gracefulPending, err := rest.BeforeDelete(registry.DeleteStrategy, testContext, pod, metav1.NewDeleteOptions(0))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -678,7 +678,7 @@ func TestGracefulStoreHandleFinalizers(t *testing.T) {
}
// delete the pod with grace period=0, the pod should still exist because it has a finalizer
_, err = registry.Delete(testContext, podWithFinalizer.Name, api.NewDeleteOptions(0))
_, err = registry.Delete(testContext, podWithFinalizer.Name, metav1.NewDeleteOptions(0))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -820,9 +820,9 @@ func TestStoreDeleteWithOrphanDependents(t *testing.T) {
}
}
trueVar, falseVar := true, false
orphanOptions := &api.DeleteOptions{OrphanDependents: &trueVar}
nonOrphanOptions := &api.DeleteOptions{OrphanDependents: &falseVar}
nilOrphanOptions := &api.DeleteOptions{}
orphanOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
nonOrphanOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
nilOrphanOptions := &metav1.DeleteOptions{}
// defaultDeleteStrategy doesn't implement rest.GarbageCollectionDeleteStrategy.
defaultDeleteStrategy := &testRESTStrategy{api.Scheme, names.SimpleNameGenerator, true, false, true}
@ -832,7 +832,7 @@ func TestStoreDeleteWithOrphanDependents(t *testing.T) {
testcases := []struct {
pod *api.Pod
options *api.DeleteOptions
options *metav1.DeleteOptions
strategy rest.RESTDeleteStrategy
expectNotFound bool
updatedFinalizers []string

View File

@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kubernetes/pkg/api"
)
// RESTDeleteStrategy defines deletion behavior on an object that follows Kubernetes
@ -53,7 +52,7 @@ type GarbageCollectionDeleteStrategy interface {
type RESTGracefulDeleteStrategy interface {
// CheckGracefulDelete should return true if the object can be gracefully deleted and set
// any default values on the DeleteOptions.
CheckGracefulDelete(ctx genericapirequest.Context, obj runtime.Object, options *api.DeleteOptions) bool
CheckGracefulDelete(ctx genericapirequest.Context, obj runtime.Object, options *metav1.DeleteOptions) bool
}
// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object
@ -62,7 +61,7 @@ type RESTGracefulDeleteStrategy interface {
// condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with
// default values if graceful is true. Second place where we set deletionTimestamp is pkg/registry/generic/registry/store.go
// this function is responsible for setting deletionTimestamp during gracefulDeletion, other one for cascading deletions.
func BeforeDelete(strategy RESTDeleteStrategy, ctx genericapirequest.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) {
func BeforeDelete(strategy RESTDeleteStrategy, ctx genericapirequest.Context, obj runtime.Object, options *metav1.DeleteOptions) (graceful, gracefulPending bool, err error) {
objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj)
if kerr != nil {
return false, false, kerr

View File

@ -132,7 +132,7 @@ type GracefulDeleter interface {
// returned error value err when the specified resource is not found.
// Delete *may* return the object that was deleted, or a status object indicating additional
// information about deletion.
Delete(ctx genericapirequest.Context, name string, options *api.DeleteOptions) (runtime.Object, error)
Delete(ctx genericapirequest.Context, name string, options *metav1.DeleteOptions) (runtime.Object, error)
}
// GracefulDeleteAdapter adapts the Deleter interface to GracefulDeleter
@ -141,7 +141,7 @@ type GracefulDeleteAdapter struct {
}
// Delete implements RESTGracefulDeleter in terms of Deleter
func (w GracefulDeleteAdapter) Delete(ctx genericapirequest.Context, name string, options *api.DeleteOptions) (runtime.Object, error) {
func (w GracefulDeleteAdapter) Delete(ctx genericapirequest.Context, name string, options *metav1.DeleteOptions) (runtime.Object, error) {
return w.Deleter.Delete(ctx, name)
}
@ -153,7 +153,7 @@ type CollectionDeleter interface {
// them or return an invalid request error.
// DeleteCollection may not be atomic - i.e. it may delete some objects and still
// return an error after it. On success, returns a list of deleted objects.
DeleteCollection(ctx genericapirequest.Context, options *api.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error)
DeleteCollection(ctx genericapirequest.Context, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error)
}
// Creater is an object that can create an instance of a RESTful object.

View File

@ -763,7 +763,7 @@ func (t *Tester) testDeleteNoGraceful(obj runtime.Object, createFn CreateFunc, g
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(10))
obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewDeleteOptions(10))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -803,12 +803,12 @@ func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getF
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID1111"))
obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewPreconditionDeleteOptions("UID1111"))
if err == nil || !errors.IsConflict(err) {
t.Errorf("unexpected error: %v", err)
}
obj, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID0000"))
obj, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewPreconditionDeleteOptions("UID0000"))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -840,7 +840,7 @@ func (t *Tester) testDeleteGracefulHasDefault(obj runtime.Object, createFn Creat
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.Generation
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, &api.DeleteOptions{})
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, &metav1.DeleteOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -871,7 +871,7 @@ func (t *Tester) testDeleteGracefulWithValue(obj runtime.Object, createFn Create
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.Generation
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace+2))
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewDeleteOptions(expectedGrace+2))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -902,7 +902,7 @@ func (t *Tester) testDeleteGracefulExtend(obj runtime.Object, createFn CreateFun
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.Generation
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace))
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -911,7 +911,7 @@ func (t *Tester) testDeleteGracefulExtend(obj runtime.Object, createFn CreateFun
}
// second delete duration is ignored
_, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace+2))
_, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewDeleteOptions(expectedGrace+2))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -938,7 +938,7 @@ func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, createFn Create
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.Generation
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace))
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -947,7 +947,7 @@ func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, createFn Create
}
// second delete is immediate, resource is deleted
out, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(0))
out, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewDeleteOptions(0))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -997,7 +997,7 @@ func (t *Tester) testDeleteGracefulShorten(obj runtime.Object, createFn CreateFu
bigGrace = 2 * expectedGrace
}
objectMeta := t.getObjectMetaOrFail(foo)
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(bigGrace))
_, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewDeleteOptions(bigGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -1009,7 +1009,7 @@ func (t *Tester) testDeleteGracefulShorten(obj runtime.Object, createFn CreateFu
deletionTimestamp := *objectMeta.DeletionTimestamp
// second delete duration is ignored
_, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace))
_, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, metav1.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}

View File

@ -499,9 +499,9 @@ func runDelete(namespace, name string, mapping *meta.RESTMapping, c resource.RES
}
return resource.NewHelper(c, mapping).Delete(namespace, name)
}
var options *api.DeleteOptions
var options *metav1.DeleteOptions
if gracePeriod >= 0 {
options = api.NewDeleteOptions(int64(gracePeriod))
options = metav1.NewDeleteOptions(int64(gracePeriod))
}
if err := r.Stop(namespace, name, 2*time.Minute, options); err != nil {
return err

View File

@ -25,9 +25,9 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
@ -255,9 +255,9 @@ func ReapResult(r *resource.Result, f cmdutil.Factory, out io.Writer, isDefaultD
}
return cmdutil.AddSourceToErr("reaping", info.Source, err)
}
var options *api.DeleteOptions
var options *metav1.DeleteOptions
if gracePeriod >= 0 {
options = api.NewDeleteOptions(int64(gracePeriod))
options = metav1.NewDeleteOptions(int64(gracePeriod))
}
if err := reaper.Stop(info.Namespace, info.Name, timeout, options); err != nil {
return cmdutil.AddSourceToErr("stopping", info.Source, err)

View File

@ -137,11 +137,11 @@ func TestDeleteObject(t *testing.T) {
type fakeReaper struct {
namespace, name string
timeout time.Duration
deleteOptions *api.DeleteOptions
deleteOptions *metav1.DeleteOptions
err error
}
func (r *fakeReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
func (r *fakeReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
r.namespace, r.name = namespace, name
r.timeout = timeout
r.deleteOptions = gracePeriod

View File

@ -418,7 +418,7 @@ func (o *DrainOptions) getPodsForDeletion() (pods []api.Pod, err error) {
}
func (o *DrainOptions) deletePod(pod api.Pod) error {
deleteOptions := &api.DeleteOptions{}
deleteOptions := &metav1.DeleteOptions{}
if o.GracePeriodSeconds >= 0 {
gracePeriodSeconds := int64(o.GracePeriodSeconds)
deleteOptions.GracePeriodSeconds = &gracePeriodSeconds
@ -427,7 +427,7 @@ func (o *DrainOptions) deletePod(pod api.Pod) error {
}
func (o *DrainOptions) evictPod(pod api.Pod, policyGroupVersion string) error {
deleteOptions := &api.DeleteOptions{}
deleteOptions := &metav1.DeleteOptions{}
if o.GracePeriodSeconds >= 0 {
gracePeriodSeconds := int64(o.GracePeriodSeconds)
deleteOptions.GracePeriodSeconds = &gracePeriodSeconds

View File

@ -544,7 +544,7 @@ func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationContro
rc.ResourceVersion = ""
// First delete the oldName RC and orphan its pods.
trueVar := true
err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &api.DeleteOptions{OrphanDependents: &trueVar})
err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &metav1.DeleteOptions{OrphanDependents: &trueVar})
if err != nil && !errors.IsNotFound(err) {
return err
}

View File

@ -51,7 +51,7 @@ const (
// gracePeriod is time given to an API object for it to delete itself cleanly,
// e.g., pod shutdown. It may or may not be supported by the API object.
type Reaper interface {
Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error
Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error
}
type NoSuchReaperError struct {
@ -157,7 +157,7 @@ func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterfac
return matchingRCs, nil
}
func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
rc := reaper.client.ReplicationControllers(namespace)
scaler := &ReplicationControllerScaler{reaper.client}
ctrl, err := rc.Get(name, metav1.GetOptions{})
@ -215,7 +215,7 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout
}
}
falseVar := false
deleteOptions := &api.DeleteOptions{OrphanDependents: &falseVar}
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
return rc.Delete(name, deleteOptions)
}
@ -226,7 +226,7 @@ func getOverlappingReplicaSets(c extensionsclient.ReplicaSetInterface, rs *exten
return overlappingRSs, exactMatchRSs, nil
}
func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
rsc := reaper.client.ReplicaSets(namespace)
scaler := &ReplicaSetScaler{reaper.client}
rs, err := rsc.Get(name, metav1.GetOptions{})
@ -286,11 +286,11 @@ func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Durati
}
falseVar := false
deleteOptions := &api.DeleteOptions{OrphanDependents: &falseVar}
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
return rsc.Delete(name, deleteOptions)
}
func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
ds, err := reaper.client.DaemonSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return err
@ -325,7 +325,7 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio
return reaper.client.DaemonSets(namespace).Delete(name, nil)
}
func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
statefulsets := reaper.client.StatefulSets(namespace)
scaler := &StatefulSetScaler{reaper.client}
ps, err := statefulsets.Get(name, metav1.GetOptions{})
@ -369,7 +369,7 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat
return statefulsets.Delete(name, nil)
}
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
jobs := reaper.client.Jobs(namespace)
pods := reaper.podClient.Pods(namespace)
scaler := &JobScaler{reaper.client}
@ -412,7 +412,7 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra
return jobs.Delete(name, nil)
}
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
deployments := reaper.dClient.Deployments(namespace)
replicaSets := reaper.rsClient.ReplicaSets(namespace)
rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout}
@ -468,7 +468,7 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati
// Delete deployment at the end.
// Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry.
var falseVar = false
nonOrphanOption := api.DeleteOptions{OrphanDependents: &falseVar}
nonOrphanOption := metav1.DeleteOptions{OrphanDependents: &falseVar}
return deployments.Delete(name, &nonOrphanOption)
}
@ -494,7 +494,7 @@ func (reaper *DeploymentReaper) updateDeploymentWithRetries(namespace, name stri
return deployment, err
}
func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
pods := reaper.client.Pods(namespace)
_, err := pods.Get(name, metav1.GetOptions{})
if err != nil {
@ -503,7 +503,7 @@ func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gra
return pods.Delete(name, gracePeriod)
}
func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error {
services := reaper.client.Services(namespace)
_, err := services.Get(name, metav1.GetOptions{})
if err != nil {

View File

@ -535,7 +535,7 @@ type noDeleteService struct {
coreclient.ServiceInterface
}
func (c *noDeleteService) Delete(service string, o *api.DeleteOptions) error {
func (c *noDeleteService) Delete(service string, o *metav1.DeleteOptions) error {
return fmt.Errorf("I'm afraid I can't do that, Dave")
}

View File

@ -19,6 +19,7 @@ package pod
import (
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -83,7 +84,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error {
}
glog.V(2).Infof("Deleting a mirror pod %q", podFullName)
// TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager
if err := mc.apiserverClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
if err := mc.apiserverClient.Core().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err)
}
return nil

View File

@ -440,9 +440,9 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
return
}
deleteOptions := v1.NewDeleteOptions(0)
deleteOptions := metav1.NewDeleteOptions(0)
// Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace.
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(pod.UID))
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID))
if err = m.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, deleteOptions); err == nil {
glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod))
m.deletePodStatus(uid)

View File

@ -79,7 +79,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Finaliz
}
// Delete enforces life-cycle rules for namespace termination
func (r *REST) Delete(ctx genericapirequest.Context, name string, options *api.DeleteOptions) (runtime.Object, error) {
func (r *REST) Delete(ctx genericapirequest.Context, name string, options *metav1.DeleteOptions) (runtime.Object, error) {
nsObj, err := r.Get(ctx, name, &metav1.GetOptions{})
if err != nil {
return nil, err
@ -89,10 +89,10 @@ func (r *REST) Delete(ctx genericapirequest.Context, name string, options *api.D
// Ensure we have a UID precondition
if options == nil {
options = api.NewDeleteOptions(0)
options = metav1.NewDeleteOptions(0)
}
if options.Preconditions == nil {
options.Preconditions = &api.Preconditions{}
options.Preconditions = &metav1.Preconditions{}
}
if options.Preconditions.UID == nil {
options.Preconditions.UID = &namespace.UID

View File

@ -189,7 +189,7 @@ func TestIgnoreDeleteNotFound(t *testing.T) {
// delete object with grace period 0, storage will return NotFound, but the
// registry shouldn't get any error since we ignore the NotFound error.
zero := int64(0)
opt := &api.DeleteOptions{GracePeriodSeconds: &zero}
opt := &metav1.DeleteOptions{GracePeriodSeconds: &zero}
obj, err := registry.Delete(testContext, pod.Name, opt)
if err != nil {
t.Fatalf("Unexpected error: %v", err)

View File

@ -102,7 +102,7 @@ func (podStrategy) AllowUnconditionalUpdate() bool {
// CheckGracefulDelete allows a pod to be gracefully deleted. It updates the DeleteOptions to
// reflect the desired grace value.
func (podStrategy) CheckGracefulDelete(ctx genericapirequest.Context, obj runtime.Object, options *api.DeleteOptions) bool {
func (podStrategy) CheckGracefulDelete(ctx genericapirequest.Context, obj runtime.Object, options *metav1.DeleteOptions) bool {
if options == nil {
return false
}
@ -135,7 +135,7 @@ type podStrategyWithoutGraceful struct {
}
// CheckGracefulDelete prohibits graceful deletion.
func (podStrategyWithoutGraceful) CheckGracefulDelete(ctx genericapirequest.Context, obj runtime.Object, options *api.DeleteOptions) bool {
func (podStrategyWithoutGraceful) CheckGracefulDelete(ctx genericapirequest.Context, obj runtime.Object, options *metav1.DeleteOptions) bool {
return false
}

View File

@ -210,7 +210,7 @@ func TestCheckGracefulDelete(t *testing.T) {
},
}
for _, tc := range tcs {
out := &api.DeleteOptions{GracePeriodSeconds: &defaultGracePeriod}
out := &metav1.DeleteOptions{GracePeriodSeconds: &defaultGracePeriod}
Strategy.CheckGracefulDelete(genericapirequest.NewContext(), tc.in, out)
if out.GracePeriodSeconds == nil {
t.Errorf("out grace period was nil but supposed to be %v", tc.gracePeriod)

View File

@ -26,7 +26,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@ -54,7 +53,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount)
defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
defer f.PodClient().Delete(synthLoggerPodName, &metav1.DeleteOptions{})
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))

View File

@ -23,8 +23,8 @@ import (
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@ -41,7 +41,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
It("should check that logs from containers are ingested in GCL", func() {
By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount)
defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
defer f.PodClient().Delete(synthLoggerPodName, &metav1.DeleteOptions{})
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))

View File

@ -316,7 +316,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &v1.DeleteOptions{})
err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))

View File

@ -353,7 +353,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)

View File

@ -207,7 +207,7 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.Logf("running pod: %#v", pod)
By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, v1.NewDeleteOptions(30))
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
By("verifying the kubelet observed the termination notice")

View File

@ -308,7 +308,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &v1.DeleteOptions{})
err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))

View File

@ -27,7 +27,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
@ -205,7 +204,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring job was deleted")

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/annotations"
"k8s.io/kubernetes/pkg/api/v1"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
@ -185,7 +184,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalcl
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
err = reaper.Stop(ns, deployment.Name, timeout, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensuring deployment %s was deleted", deploymentName)

View File

@ -238,7 +238,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
@ -267,7 +267,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)

View File

@ -308,7 +308,7 @@ func (t *dnsConfigMapTest) createUtilPod() {
func (t *dnsConfigMapTest) deleteUtilPod() {
podClient := t.c.Core().Pods(t.f.Namespace.Name)
if err := podClient.Delete(t.utilPod.Name, v1.NewDeleteOptions(0)); err != nil {
if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Logf("Delete of pod %v:%v failed: %v",
t.utilPod.Namespace, t.utilPod.Name, err)
}

View File

@ -129,7 +129,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
}
By("Cleaning up the git vol pod")
if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0)); err != nil {
if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
}
}()
@ -223,7 +223,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
By("Cleaning up the git server pod")
if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, v1.NewDeleteOptions(0)); err != nil {
if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
}
By("Cleaning up the git server svc")

View File

@ -117,7 +117,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
framework.Logf("apiserver has recovered")

View File

@ -558,7 +558,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
func (config *NetworkingTestConfig) DeleteNetProxyPod() {
pod := config.EndpointPods[0]
config.getPodClient().Delete(pod.Name, v1.NewDeleteOptions(0))
config.getPodClient().Delete(pod.Name, metav1.NewDeleteOptions(0))
config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted.
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)

View File

@ -113,7 +113,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
// disappear before the timeout, it will fail the test.
func (c *PodClient) DeleteSync(name string, options *v1.DeleteOptions, timeout time.Duration) {
func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeout time.Duration) {
err := c.Delete(name, options)
if err != nil && !errors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err)

View File

@ -2116,7 +2116,7 @@ func (f *Framework) MatchContainerOutput(
createdPod := podClient.Create(pod)
defer func() {
By("delete the pod")
podClient.DeleteSync(createdPod.Name, &v1.DeleteOptions{}, podNoLongerRunningTimeout)
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, podNoLongerRunningTimeout)
}()
// Wait for client pod to complete.
@ -2732,7 +2732,7 @@ func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, n
}
}
func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *v1.DeleteOptions) error {
func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *metav1.DeleteOptions) error {
switch kind {
case api.Kind("ReplicationController"):
return c.Core().ReplicationControllers(ns).Delete(name, deleteOption)
@ -2884,7 +2884,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
defer ps.Stop()
startTime := time.Now()
falseVar := false
deleteOption := &v1.DeleteOptions{OrphanDependents: &falseVar}
deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar}
err = deleteResource(c, kind, ns, name, deleteOption)
if err != nil && apierrs.IsNotFound(err) {
Logf("%v %s was already deleted: %v", kind, name, err)

View File

@ -32,14 +32,14 @@ import (
. "github.com/onsi/ginkgo"
)
func getOrphanOptions() *v1.DeleteOptions {
func getOrphanOptions() *metav1.DeleteOptions {
var trueVar = true
return &v1.DeleteOptions{OrphanDependents: &trueVar}
return &metav1.DeleteOptions{OrphanDependents: &trueVar}
}
func getNonOrphanOptions() *v1.DeleteOptions {
func getNonOrphanOptions() *metav1.DeleteOptions {
var falseVar = false
return &v1.DeleteOptions{OrphanDependents: &falseVar}
return &metav1.DeleteOptions{OrphanDependents: &falseVar}
}
var zero = int64(0)
@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
}
By("delete the rc")
deleteOptions := getNonOrphanOptions()
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(rc.UID))
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
@ -249,7 +249,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
}
By("delete the rc")
deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(rc.UID))
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
@ -308,8 +308,8 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
By("delete the rc")
deleteOptions := &v1.DeleteOptions{}
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(rc.UID))
deleteOptions := &metav1.DeleteOptions{}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
@ -356,7 +356,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
By("delete the deployment")
deleteOptions := getNonOrphanOptions()
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(createdDeployment.UID))
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err)
}
@ -405,7 +405,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
By("delete the deployment")
deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(createdDeployment.UID))
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err)
}

View File

@ -170,7 +170,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("deleting the pod gracefully")
if err := podClient.Delete(pod.Name, v1.NewDeleteOptions(30)); err != nil {
if err := podClient.Delete(pod.Name, metav1.NewDeleteOptions(30)); err != nil {
framework.Failf("Failed to delete pod: %v", err)
}

View File

@ -23,7 +23,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
@ -178,7 +177,7 @@ var _ = framework.KubeDescribe("Job", func() {
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring job was deleted")

View File

@ -376,13 +376,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
framework.Logf("Node Problem Detector logs:\n %s", log)
}
By("Delete the node problem detector")
c.Core().Pods(ns).Delete(name, v1.NewDeleteOptions(0))
c.Core().Pods(ns).Delete(name, metav1.NewDeleteOptions(0))
By("Wait for the node problem detector to disappear")
Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
By("Delete the config map")
c.Core().ConfigMaps(ns).Delete(configName, nil)
By("Clean up the events")
Expect(c.Core().Events(eventNamespace).DeleteCollection(v1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
Expect(c.Core().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
By("Clean up the node condition")
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
c.Core().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do()

View File

@ -93,8 +93,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
By("cleaning up PD-RW test environment")
podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
podClient.Delete(host1Pod.Name, metav1.NewDeleteOptions(0))
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}()
@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("deleting host0Pod")
// Delete pod with 0 grace period
framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod)
@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
By("deleting host1Pod")
framework.ExpectNoError(podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1Pod")
framework.ExpectNoError(podClient.Delete(host1Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host1Pod")
By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name)
@ -157,8 +157,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
By("cleaning up PD-RW test environment")
podClient.Delete(host0Pod.Name, &v1.DeleteOptions{})
podClient.Delete(host1Pod.Name, &v1.DeleteOptions{})
podClient.Delete(host0Pod.Name, &metav1.DeleteOptions{})
podClient.Delete(host1Pod.Name, &metav1.DeleteOptions{})
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}()
@ -179,7 +179,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("deleting host0Pod")
// Delete pod with default grace period 30s
framework.ExpectNoError(podClient.Delete(host0Pod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, &metav1.DeleteOptions{}), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod)
@ -197,7 +197,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
By("deleting host1Pod")
framework.ExpectNoError(podClient.Delete(host1Pod.Name, &v1.DeleteOptions{}), "Failed to delete host1Pod")
framework.ExpectNoError(podClient.Delete(host1Pod.Name, &metav1.DeleteOptions{}), "Failed to delete host1Pod")
By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name)
@ -221,9 +221,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0))
podClient.Delete(rwPod.Name, metav1.NewDeleteOptions(0))
podClient.Delete(host0ROPod.Name, metav1.NewDeleteOptions(0))
podClient.Delete(host1ROPod.Name, metav1.NewDeleteOptions(0))
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}()
@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(err, "Failed to create rwPod")
framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
// Delete pod with 0 grace period
framework.ExpectNoError(podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
framework.ExpectNoError(podClient.Delete(rwPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes")
@ -248,10 +248,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))
By("deleting host0ROPod")
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0ROPod")
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0ROPod")
By("deleting host1ROPod")
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1ROPod")
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host1ROPod")
By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name)
@ -273,9 +273,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
podClient.Delete(rwPod.Name, &v1.DeleteOptions{})
podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{})
podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{})
podClient.Delete(rwPod.Name, &metav1.DeleteOptions{})
podClient.Delete(host0ROPod.Name, &metav1.DeleteOptions{})
podClient.Delete(host1ROPod.Name, &metav1.DeleteOptions{})
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}()
@ -284,7 +284,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(err, "Failed to create rwPod")
framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
// Delete pod with default grace period 30s
framework.ExpectNoError(podClient.Delete(rwPod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")
framework.ExpectNoError(podClient.Delete(rwPod.Name, &metav1.DeleteOptions{}), "Failed to delete host0Pod")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes")
@ -300,10 +300,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))
By("deleting host0ROPod")
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host0ROPod")
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, &metav1.DeleteOptions{}), "Failed to delete host0ROPod")
By("deleting host1ROPod")
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host1ROPod")
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, &metav1.DeleteOptions{}), "Failed to delete host1ROPod")
By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name)
@ -324,7 +324,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
if host0Pod != nil {
podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
}
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
}()
@ -356,7 +356,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
}
By("Test completed successfully, waiting for PD to safely detach")
@ -379,7 +379,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
if host0Pod != nil {
podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
}
detachAndDeletePDs(disk1Name, []types.NodeName{host0Name})
detachAndDeletePDs(disk2Name, []types.NodeName{host0Name})
@ -415,7 +415,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
}
By("Test completed successfully, waiting for PD to safely detach")
@ -439,7 +439,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
defer func() {
By("Cleaning up PD-RW test env")
podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0))
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
}()
@ -521,10 +521,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* should exist*/))
By("deleting api object of host0")
framework.ExpectNoError(nodeClient.Delete(string(host0Name), v1.NewDeleteOptions(0)), "Unable to delete host0")
framework.ExpectNoError(nodeClient.Delete(string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0")
By("deleting host0pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Unable to delete host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod")
// The disk should be detached from host0 on its deletion
By("Waiting for pd to detach from host0")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name), "Timed out waiting for detach pd")

View File

@ -188,7 +188,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
@ -410,7 +410,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
@ -449,7 +449,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
@ -497,7 +497,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
@ -527,7 +527,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")

View File

@ -802,7 +802,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
pod := runPausePod(f, conf)
By("Explicitly delete pod here to free the resource it takes.")
err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
return pod.Spec.NodeName

View File

@ -1145,7 +1145,7 @@ var _ = framework.KubeDescribe("Services", func() {
} else {
for _, pod := range pods.Items {
var gracePeriodSeconds int64 = 0
err := podClient.Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
err := podClient.Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
if err != nil {
framework.Logf("warning: error force deleting pod '%s': %s", pod.Name, err)
}

View File

@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
}
By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
@ -772,7 +772,7 @@ func (s *statefulSetTester) saturate(ss *apps.StatefulSet) {
func (s *statefulSetTester) deleteStatefulPodAtIndex(index int, ss *apps.StatefulSet) {
name := getPodNameAtIndex(index, ss)
noGrace := int64(0)
if err := s.c.Core().Pods(ss.Namespace).Delete(name, &v1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
if err := s.c.Core().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
framework.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
}
}

View File

@ -26,7 +26,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/test/e2e/framework"
@ -163,7 +162,7 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() {
// Need to manually do the serialization because otherwise the
// Content-Type header is set to protobuf, the thirdparty codec in
// the API server side only accepts JSON.
deleteOptionsData, err := json.Marshal(v1.NewDeleteOptions(10))
deleteOptionsData, err := json.Marshal(metav1.NewDeleteOptions(10))
framework.ExpectNoError(err)
if _, err := f.ClientSet.Core().RESTClient().Delete().
AbsPath("/apis/company.com/v1/namespaces/default/foos/foo").

View File

@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", fun
daemonset := createDaemonSetOrFail(f.FederationClientset, nsName)
defer func() { // Cleanup
By(fmt.Sprintf("Deleting daemonset %q in namespace %q", daemonset.Name, nsName))
err := f.FederationClientset.Extensions().DaemonSets(nsName).Delete(daemonset.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Extensions().DaemonSets(nsName).Delete(daemonset.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting daemonset %q in namespace %q", daemonset.Name, nsName)
}()
// wait for daemonset shards being created
@ -190,7 +190,7 @@ func createDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string)
func deleteDaemonSetOrFail(clientset *fedclientset.Clientset, nsName string, daemonsetName string, orphanDependents *bool) {
By(fmt.Sprintf("Deleting daemonset %q in namespace %q", daemonsetName, nsName))
err := clientset.Extensions().DaemonSets(nsName).Delete(daemonsetName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
err := clientset.Extensions().DaemonSets(nsName).Delete(daemonsetName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting daemonset %q in namespace %q", daemonsetName, nsName)
// Wait for the daemonset to be deleted.

View File

@ -61,7 +61,7 @@ var _ = framework.KubeDescribe("Federation deployments [Feature:Federation]", fu
deployment := createDeploymentOrFail(f.FederationClientset, nsName)
By(fmt.Sprintf("Creation of deployment %q in namespace %q succeeded. Deleting deployment.", deployment.Name, nsName))
// Cleanup
err := f.FederationClientset.Extensions().Deployments(nsName).Delete(deployment.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Extensions().Deployments(nsName).Delete(deployment.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting deployment %q in namespace %q", deployment.Name, deployment.Namespace)
By(fmt.Sprintf("Deletion of deployment %q in namespace %q succeeded.", deployment.Name, nsName))
})
@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("Federation deployments [Feature:Federation]", fu
dep.Spec.Replicas = &replicas
f.FederationClientset.Deployments(nsName).Update(dep)
waitForDeploymentOrFail(f.FederationClientset, nsName, dep.Name, clusters)
f.FederationClientset.Deployments(nsName).Delete(dep.Name, &v1.DeleteOptions{})
f.FederationClientset.Deployments(nsName).Delete(dep.Name, &metav1.DeleteOptions{})
}()
waitForDeploymentOrFail(f.FederationClientset, nsName, dep.Name, clusters)
@ -261,7 +261,7 @@ func updateDeploymentOrFail(clientset *fedclientset.Clientset, namespace string)
func deleteDeploymentOrFail(clientset *fedclientset.Clientset, nsName string, deploymentName string, orphanDependents *bool) {
By(fmt.Sprintf("Deleting deployment %q in namespace %q", deploymentName, nsName))
err := clientset.Extensions().Deployments(nsName).Delete(deploymentName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
err := clientset.Extensions().Deployments(nsName).Delete(deploymentName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting deployment %q in namespace %q", deploymentName, nsName)
// Wait for the deployment to be deleted.

View File

@ -72,7 +72,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
ingress := createIngressOrFail(f.FederationClientset, nsName)
By(fmt.Sprintf("Creation of ingress %q in namespace %q succeeded. Deleting ingress.", ingress.Name, nsName))
// Cleanup
err := f.FederationClientset.Extensions().Ingresses(nsName).Delete(ingress.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Extensions().Ingresses(nsName).Delete(ingress.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting ingress %q in namespace %q", ingress.Name, ingress.Namespace)
By(fmt.Sprintf("Deletion of ingress %q in namespace %q succeeded.", ingress.Name, nsName))
})
@ -205,7 +205,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
// Deletes all Ingresses in the given namespace name.
func deleteAllIngressesOrFail(clientset *fedclientset.Clientset, nsName string) {
orphanDependents := false
err := clientset.Extensions().Ingresses(nsName).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, metav1.ListOptions{})
err := clientset.Extensions().Ingresses(nsName).DeleteCollection(&metav1.DeleteOptions{OrphanDependents: &orphanDependents}, metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Error in deleting ingresses in namespace: %s", nsName))
}
@ -329,7 +329,7 @@ func deleteIngressOrFail(clientset *fedclientset.Clientset, namespace string, in
if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteIngressOrFail: clientset: %v, namespace: %v, ingress: %v", clientset, namespace, ingressName))
}
err := clientset.Ingresses(namespace).Delete(ingressName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
err := clientset.Ingresses(namespace).Delete(ingressName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting ingress %q from namespace %q", ingressName, namespace)
// Wait for the ingress to be deleted.
err = wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
@ -349,7 +349,7 @@ func deleteClusterIngressOrFail(clusterName string, clientset *kubeclientset.Cli
if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteClusterIngressOrFail: cluster: %q, clientset: %v, namespace: %v, ingress: %v", clusterName, clientset, namespace, ingressName))
}
err := clientset.Ingresses(namespace).Delete(ingressName, v1.NewDeleteOptions(0))
err := clientset.Ingresses(namespace).Delete(ingressName, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting cluster ingress %q/%q from cluster %q", namespace, ingressName, clusterName)
}

View File

@ -197,7 +197,7 @@ func createNamespace(nsClient clientset.NamespaceInterface) string {
return ns.Name
}
func deleteAllTestNamespaces(orphanDependents *bool, lister func(metav1.ListOptions) (*api_v1.NamespaceList, error), deleter func(string, *api_v1.DeleteOptions) error) {
func deleteAllTestNamespaces(orphanDependents *bool, lister func(metav1.ListOptions) (*api_v1.NamespaceList, error), deleter func(string, *metav1.DeleteOptions) error) {
list, err := lister(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to get all namespaes: %v", err)
@ -206,7 +206,7 @@ func deleteAllTestNamespaces(orphanDependents *bool, lister func(metav1.ListOpti
for _, namespace := range list.Items {
if strings.HasPrefix(namespace.Name, namespacePrefix) {
By(fmt.Sprintf("Deleting ns: %s, found by listing", namespace.Name))
err := deleter(namespace.Name, &api_v1.DeleteOptions{OrphanDependents: orphanDependents})
err := deleter(namespace.Name, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
if err != nil {
framework.Failf("Failed to set %s for deletion: %v", namespace.Name, err)
}

View File

@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", fu
replicaset := createReplicaSetOrFail(f.FederationClientset, nsName)
By(fmt.Sprintf("Creation of replicaset %q in namespace %q succeeded. Deleting replicaset.", replicaset.Name, nsName))
// Cleanup
err := f.FederationClientset.Extensions().ReplicaSets(nsName).Delete(replicaset.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Extensions().ReplicaSets(nsName).Delete(replicaset.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting replicaset %q in namespace %q", replicaset.Name, replicaset.Namespace)
By(fmt.Sprintf("Deletion of replicaset %q in namespace %q succeeded.", replicaset.Name, nsName))
})
@ -102,7 +102,7 @@ var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", fu
rs.Spec.Replicas = &replicas
f.FederationClientset.ReplicaSets(nsName).Update(rs)
waitForReplicaSetOrFail(f.FederationClientset, nsName, rs.Name, clusters)
f.FederationClientset.ReplicaSets(nsName).Delete(rs.Name, &v1.DeleteOptions{})
f.FederationClientset.ReplicaSets(nsName).Delete(rs.Name, &metav1.DeleteOptions{})
}()
waitForReplicaSetOrFail(f.FederationClientset, nsName, rs.Name, clusters)
@ -247,7 +247,7 @@ func createReplicaSetOrFail(clientset *fedclientset.Clientset, namespace string)
func deleteReplicaSetOrFail(clientset *fedclientset.Clientset, nsName string, replicaSetName string, orphanDependents *bool) {
By(fmt.Sprintf("Deleting replica set %q in namespace %q", replicaSetName, nsName))
err := clientset.Extensions().ReplicaSets(nsName).Delete(replicaSetName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
err := clientset.Extensions().ReplicaSets(nsName).Delete(replicaSetName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting replica set %q in namespace %q", replicaSetName, nsName)
// Wait for the replicaSet to be deleted.

View File

@ -170,7 +170,7 @@ func createSecretOrFail(clientset *fedclientset.Clientset, nsName string) *v1.Se
func deleteSecretOrFail(clientset *fedclientset.Clientset, nsName string, secretName string, orphanDependents *bool) {
By(fmt.Sprintf("Deleting secret %q in namespace %q", secretName, nsName))
err := clientset.Core().Secrets(nsName).Delete(secretName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
err := clientset.Core().Secrets(nsName).Delete(secretName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting secret %q in namespace %q", secretName, nsName)
// Wait for the secret to be deleted.

View File

@ -103,7 +103,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
By(fmt.Sprintf("Creation of service %q in namespace %q succeeded. Deleting service.", service.Name, nsName))
// Cleanup
err := f.FederationClientset.Services(nsName).Delete(service.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Services(nsName).Delete(service.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace)
By(fmt.Sprintf("Deletion of service %q in namespace %q succeeded.", service.Name, nsName))
})
@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
service = createServiceOrFail(f.FederationClientset, nsName, FederatedServiceName)
defer func() { // Cleanup
By(fmt.Sprintf("Deleting service %q in namespace %q", service.Name, nsName))
err := f.FederationClientset.Services(nsName).Delete(service.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Services(nsName).Delete(service.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, nsName)
}()
waitForServiceShardsOrFail(nsName, service, clusters)
@ -259,7 +259,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
By("Verified that DNS rules are working as expected")
By("Deleting the service to verify that DNS rules still work")
err := f.FederationClientset.Services(nsName).Delete(FederatedServiceName, &v1.DeleteOptions{})
err := f.FederationClientset.Services(nsName).Delete(FederatedServiceName, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace)
// Service is deleted, unset the test block-global service variable.
service = nil

View File

@ -44,7 +44,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func
clusterList, err := f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, cluster := range clusterList.Items {
err := f.FederationClientset.Federation().Clusters().Delete(cluster.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Federation().Clusters().Delete(cluster.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func
framework.Logf("Deleting %d clusters", len(contexts))
for _, context := range contexts {
framework.Logf("Deleting cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
err := f.FederationClientset.Federation().Clusters().Delete(context.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Federation().Clusters().Delete(context.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, fmt.Sprintf("unexpected error in deleting cluster %s: %+v", context.Name, err))
framework.Logf("Successfully deleted cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
}

View File

@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("Federation events [Feature:Federation]", func()
eventList, err := f.FederationClientset.Core().Events(nsName).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, event := range eventList.Items {
err := f.FederationClientset.Core().Events(nsName).Delete(event.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Core().Events(nsName).Delete(event.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
@ -58,7 +58,7 @@ var _ = framework.KubeDescribe("Federation events [Feature:Federation]", func()
event := createEventOrFail(f.FederationClientset, nsName)
By(fmt.Sprintf("Creation of event %q in namespace %q succeeded. Deleting event.", event.Name, nsName))
// Cleanup
err := f.FederationClientset.Core().Events(nsName).Delete(event.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Core().Events(nsName).Delete(event.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting event %q in namespace %q", event.Name, event.Namespace)
By(fmt.Sprintf("Deletion of event %q in namespace %q succeeded.", event.Name, nsName))
})

View File

@ -170,7 +170,7 @@ func unregisterClusters(clusters map[string]*cluster, f *fedframework.Framework)
for name, c := range clusters {
if c.namespaceCreated {
if _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}); !errors.IsNotFound(err) {
err := c.Clientset.Core().Namespaces().Delete(nsName, &v1.DeleteOptions{})
err := c.Clientset.Core().Namespaces().Delete(nsName, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster %q: %v", nsName, name, err)
}
framework.Logf("Namespace %s deleted in cluster %q", nsName, name)
@ -181,7 +181,7 @@ func unregisterClusters(clusters map[string]*cluster, f *fedframework.Framework)
clusterList, err := f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{})
framework.ExpectNoError(err, "Error listing clusters")
for _, cluster := range clusterList.Items {
err := f.FederationClientset.Federation().Clusters().Delete(cluster.Name, &v1.DeleteOptions{})
err := f.FederationClientset.Federation().Clusters().Delete(cluster.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting cluster %q", cluster.Name)
}
}
@ -291,7 +291,7 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se
if clientset == nil || len(namespace) == 0 || len(serviceName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName))
}
err := clientset.Services(namespace).Delete(serviceName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
err := clientset.Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
// Wait for the service to be deleted.
err = wait.Poll(5*time.Second, 3*wait.ForeverTestTimeout, func() (bool, error) {
@ -345,7 +345,7 @@ func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Serv
func cleanupServiceShard(clientset *kubeclientset.Clientset, clusterName, namespace string, service *v1.Service, timeout time.Duration) error {
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
err := clientset.Services(namespace).Delete(service.Name, &v1.DeleteOptions{})
err := clientset.Services(namespace).Delete(service.Name, &metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
// Deletion failed with an error, try again.
framework.Logf("Failed to delete service %q in namespace %q, in cluster %q", service.Name, namespace, clusterName)
@ -454,7 +454,7 @@ func discoverService(f *fedframework.Framework, name string, exists bool, podNam
By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, nsName))
defer func() {
By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, nsName))
err := f.ClientSet.Core().Pods(nsName).Delete(podName, v1.NewDeleteOptions(0))
err := f.ClientSet.Core().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, nsName)
By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, nsName))
}()
@ -506,7 +506,7 @@ The test fails if there are any errors.
func deleteOneBackendPodOrFail(c *cluster) {
pod := c.backendPod
Expect(pod).ToNot(BeNil())
err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, v1.NewDeleteOptions(0))
err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
if errors.IsNotFound(err) {
By(fmt.Sprintf("Pod %q in namespace %q in cluster %q does not exist. No need to delete it.", pod.Name, pod.Namespace, c.name))
} else {

View File

@ -83,7 +83,7 @@ func (f *Framework) deleteFederationNs() {
// First delete the namespace from federation apiserver.
// Also delete the corresponding namespaces from underlying clusters.
orphanDependents := false
if err := clientset.Core().Namespaces().Delete(ns.Name, &v1.DeleteOptions{OrphanDependents: &orphanDependents}); err != nil {
if err := clientset.Core().Namespaces().Delete(ns.Name, &metav1.DeleteOptions{OrphanDependents: &orphanDependents}); err != nil {
framework.Failf("Error while deleting federation namespace %s: %s", ns.Name, err)
}
// Verify that it got deleted.

View File

@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(guaranteedPod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
Expect(f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("pod" + podUID))
f.PodClient().Create(pod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
@ -236,7 +236,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(bestEffortPod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
Expect(f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("BestEffort/pod" + podUID))
f.PodClient().Create(pod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
@ -280,7 +280,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(burstablePod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
Expect(f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
pod := makePodToVerifyCgroupRemoved(cm.CgroupName("Burstable/pod" + podUID))
f.PodClient().Create(pod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)

View File

@ -63,7 +63,7 @@ func (cc *ConformanceContainer) Create() {
}
func (cc *ConformanceContainer) Delete() error {
return cc.PodClient.Delete(cc.podName, v1.NewDeleteOptions(0))
return cc.PodClient.Delete(cc.podName, metav1.NewDeleteOptions(0))
}
func (cc *ConformanceContainer) IsReady() (bool, error) {

View File

@ -97,9 +97,9 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
if !isImageSupported() || !evictionOptionIsSet() { // Skip the after each
return
}
podClient.DeleteSync(busyPodName, &v1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(idlePodName, &v1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(verifyPodName, &v1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(busyPodName, &metav1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(idlePodName, &metav1.DeleteOptions{}, podDisappearTimeout)
podClient.DeleteSync(verifyPodName, &metav1.DeleteOptions{}, podDisappearTimeout)
// Wait for 2 container gc loop to ensure that the containers are deleted. The containers
// created in this test consume a lot of disk, we don't want them to trigger disk eviction

View File

@ -231,7 +231,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
AfterEach(func() {
for _, pod := range test.testPods {
By(fmt.Sprintf("Deleting Pod %v", pod.podName))
f.PodClient().DeleteSync(pod.podName, &v1.DeleteOptions{}, defaultRuntimeRequestTimeoutDuration)
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, defaultRuntimeRequestTimeoutDuration)
}
By("Making sure all containers get cleaned up")

View File

@ -270,7 +270,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
By("deleting pods")
for _, spec := range podTestSpecs {
By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
f.PodClient().DeleteSync(spec.pod.Name, &v1.DeleteOptions{}, podDisappearTimeout)
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, podDisappearTimeout)
}
if CurrentGinkgoTestDescription().Failed {

View File

@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
})
It("should be possible to delete", func() {
err := podClient.Delete(podName, &v1.DeleteOptions{})
err := podClient.Delete(podName, &metav1.DeleteOptions{})
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
})
})

View File

@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
podClient.WaitForSuccess(podCheckHook.Name, postStartWaitTimeout)
}
By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), podWaitTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
By("create the hook check pod")
podClient.Create(podCheckHook)
@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
}
By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, v1.NewDeleteOptions(15), podWaitTimeout)
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), podWaitTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
By("check prestop hook")
Eventually(func() error {

View File

@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID
By("delete the mirror pod with grace period 30s")
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, v1.NewDeleteOptions(30))
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30))
Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to be recreated")
@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
uid := pod.UID
By("delete the mirror pod with grace period 0s")
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, v1.NewDeleteOptions(0))
err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0))
Expect(err).ShouldNot(HaveOccurred())
By("wait for the mirror pod to be recreated")

View File

@ -374,7 +374,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
go func(pod *v1.Pod) {
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, v1.NewDeleteOptions(30))
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),

View File

@ -114,7 +114,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
})
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
gp := int64(1)
f.PodClient().Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &gp})
f.PodClient().Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
if err == nil {
break
}

View File

@ -68,7 +68,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
}
var gracePeriodSeconds int64 = 30
deleteOption := &v1.DeleteOptions{
deleteOption := &metav1.DeleteOptions{
GracePeriodSeconds: &gracePeriodSeconds,
}
@ -213,7 +213,7 @@ func newPDB() *v1beta1.PodDisruptionBudget {
}
}
func newEviction(ns, evictionName string, deleteOption *v1.DeleteOptions) *v1beta1.Eviction {
func newEviction(ns, evictionName string, deleteOption *metav1.DeleteOptions) *v1beta1.Eviction {
return &v1beta1.Eviction{
TypeMeta: metav1.TypeMeta{
APIVersion: "Policy/v1beta1",

View File

@ -96,7 +96,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &v1.DeleteOptions{}); err != nil {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil {
glog.Errorf("Error while deleting Node: %v", err)
}
}

View File

@ -46,14 +46,14 @@ import (
"k8s.io/kubernetes/test/integration/framework"
)
func getOrphanOptions() *v1.DeleteOptions {
func getOrphanOptions() *metav1.DeleteOptions {
var trueVar = true
return &v1.DeleteOptions{OrphanDependents: &trueVar}
return &metav1.DeleteOptions{OrphanDependents: &trueVar}
}
func getNonOrphanOptions() *v1.DeleteOptions {
func getNonOrphanOptions() *metav1.DeleteOptions {
var falseVar = false
return &v1.DeleteOptions{OrphanDependents: &falseVar}
return &metav1.DeleteOptions{OrphanDependents: &falseVar}
}
const garbageCollectedPodName = "test.pod.1"
@ -273,7 +273,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
}
}
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *v1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
defer wg.Done()
rcClient := clientSet.Core().ReplicationControllers(namespace)
podClient := clientSet.Core().Pods(namespace)

View File

@ -282,7 +282,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
}
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, v1.NewDeleteOptions(0))
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, metav1.NewDeleteOptions(0))
if err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
@ -421,11 +421,11 @@ func TestMultiScheduler(t *testing.T) {
}
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
err = clientSet.Core().Pods(ns.Name).Delete(testPod.Name, v1.NewDeleteOptions(0))
err = clientSet.Core().Pods(ns.Name).Delete(testPod.Name, metav1.NewDeleteOptions(0))
if err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
err = clientSet.Core().Pods(ns.Name).Delete(testPodFitsDefault.Name, v1.NewDeleteOptions(0))
err = clientSet.Core().Pods(ns.Name).Delete(testPodFitsDefault.Name, metav1.NewDeleteOptions(0))
if err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
@ -574,7 +574,7 @@ func TestAllocatable(t *testing.T) {
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
}
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &v1.DeleteOptions{}); err != nil {
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &metav1.DeleteOptions{}); err != nil {
t.Fatalf("Failed to remove first resource pod: %v", err)
}

View File

@ -1039,7 +1039,7 @@ func (config *SecretConfig) Run() error {
}
func (config *SecretConfig) Stop() error {
if err := config.Client.Core().Secrets(config.Namespace).Delete(config.Name, &v1.DeleteOptions{}); err != nil {
if err := config.Client.Core().Secrets(config.Namespace).Delete(config.Name, &metav1.DeleteOptions{}); err != nil {
return fmt.Errorf("Error deleting secret: %v", err)
}
config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name)