diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index 265fe7e453d..a0b244d3efb 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -431,7 +431,7 @@ func RunDeleteTokens(out io.Writer, client clientset.Interface, tokenIDsOrTokens tokenSecretName := bootstraputil.BootstrapTokenSecretName(tokenID) klog.V(1).Infof("[token] deleting token %q", tokenID) - if err := client.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), tokenSecretName, nil); err != nil { + if err := client.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), tokenSecretName, metav1.DeleteOptions{}); err != nil { return errors.Wrapf(err, "failed to delete bootstrap token %q", tokenID) } fmt.Fprintf(out, "bootstrap token %q deleted\n", tokenID) diff --git a/pkg/controller/certificates/cleaner/cleaner.go b/pkg/controller/certificates/cleaner/cleaner.go index fd4d19e6a55..b4cd971e19d 100644 --- a/pkg/controller/certificates/cleaner/cleaner.go +++ b/pkg/controller/certificates/cleaner/cleaner.go @@ -109,7 +109,7 @@ func (ccc *CSRCleanerController) handle(csr *capi.CertificateSigningRequest) err return err } if isIssuedPastDeadline(csr) || isDeniedPastDeadline(csr) || isPendingPastDeadline(csr) || isIssuedExpired { - if err := ccc.csrClient.Delete(context.TODO(), csr.Name, nil); err != nil { + if err := ccc.csrClient.Delete(context.TODO(), csr.Name, metav1.DeleteOptions{}); err != nil { return fmt.Errorf("unable to delete CSR %q: %v", csr.Name, err) } } diff --git a/pkg/controller/client_builder.go b/pkg/controller/client_builder.go index b9d8421c5e0..9c592f941a7 100644 --- a/pkg/controller/client_builder.go +++ b/pkg/controller/client_builder.go @@ -157,7 +157,7 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro if !valid { klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Namespace, sa.Name) // try to delete the secret containing the invalid token - if err := b.CoreClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + if err := b.CoreClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Namespace, sa.Name, err) } // continue watching for good tokens diff --git a/pkg/controller/cloud/node_lifecycle_controller.go b/pkg/controller/cloud/node_lifecycle_controller.go index 8faea71617c..078b2bf17c2 100644 --- a/pkg/controller/cloud/node_lifecycle_controller.go +++ b/pkg/controller/cloud/node_lifecycle_controller.go @@ -23,6 +23,7 @@ import ( "time" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -190,7 +191,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes() { fmt.Sprintf("Deleting node %v because it does not exist in the cloud provider", node.Name), "Node %s event: %s", node.Name, deleteNodeEvent) - if err := c.kubeClient.CoreV1().Nodes().Delete(context.TODO(), node.Name, nil); err != nil { + if err := c.kubeClient.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{}); err != nil { klog.Errorf("unable to delete node %q: %v", node.Name, err) } } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 772c2a218f7..3b9c583465f 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -602,7 +602,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime return fmt.Errorf("object does not have ObjectMeta, %v", err) } klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) - if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, nil); err != nil && !apierrors.IsNotFound(err) { + if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) } diff --git a/pkg/controller/cronjob/injection.go b/pkg/controller/cronjob/injection.go index eea61f94a62..5f9504bdd52 100644 --- a/pkg/controller/cronjob/injection.go +++ b/pkg/controller/cronjob/injection.go @@ -120,7 +120,7 @@ func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1. func (r realJobControl) DeleteJob(namespace string, name string) error { background := metav1.DeletePropagationBackground - return r.KubeClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{PropagationPolicy: &background}) + return r.KubeClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &background}) } type fakeJobControl struct { @@ -222,7 +222,7 @@ func (r realPodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1 } func (r realPodControl) DeletePod(namespace string, name string) error { - return r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, nil) + return r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) } type fakePodControl struct { diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index 7abe32b744f..c3e31ffee35 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -171,7 +171,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps. continue } // Clean up - err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), history.Name, nil) + err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), history.Name, metav1.DeleteOptions{}) if err != nil { return err } @@ -227,7 +227,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor } } // Remove duplicates - err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), cur.Name, nil) + err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), cur.Name, metav1.DeleteOptions{}) if err != nil { return nil, err } diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index 89889e41a63..8e50f10fe26 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -458,7 +458,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep continue } klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name) - if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(context.TODO(), rs.Name, nil); err != nil && !errors.IsNotFound(err) { + if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(context.TODO(), rs.Name, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { // Return error instead of aggregating and continuing DELETEs on the theory // that we may be overloading the api server. return err diff --git a/pkg/controller/disruption/disruption_test.go b/pkg/controller/disruption/disruption_test.go index 8458dcaeb1a..f8d50477394 100644 --- a/pkg/controller/disruption/disruption_test.go +++ b/pkg/controller/disruption/disruption_test.go @@ -1133,7 +1133,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) { }) // (A) Delete one pod - if err := dc.coreClient.CoreV1().Pods("default").Delete(context.TODO(), podNames[0], &metav1.DeleteOptions{}); err != nil { + if err := dc.coreClient.CoreV1().Pods("default").Delete(context.TODO(), podNames[0], metav1.DeleteOptions{}); err != nil { t.Fatal(err) } if err := waitForCacheCount(dc.podStore, len(podNames)-1); err != nil { diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 5fdabd1238b..bc77b3751fc 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -369,7 +369,7 @@ func (e *EndpointController) syncService(key string) error { // service is deleted. However, if we're down at the time when // the service is deleted, we will miss that deletion, so this // doesn't completely solve the problem. See #6877. - err = e.client.CoreV1().Endpoints(namespace).Delete(context.TODO(), name, nil) + err = e.client.CoreV1().Endpoints(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { return err } diff --git a/pkg/controller/endpointslice/reconciler.go b/pkg/controller/endpointslice/reconciler.go index 4adc7073a3f..9055b52c14a 100644 --- a/pkg/controller/endpointslice/reconciler.go +++ b/pkg/controller/endpointslice/reconciler.go @@ -231,7 +231,7 @@ func (r *reconciler) finalize( } for _, endpointSlice := range slicesToDelete { - err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Delete(context.TODO(), endpointSlice.Name, &metav1.DeleteOptions{}) + err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{}) if err != nil { errs = append(errs, fmt.Errorf("Error deleting %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)) } else { diff --git a/pkg/controller/history/controller_history.go b/pkg/controller/history/controller_history.go index 6bac086c2f7..dabcd399d4d 100644 --- a/pkg/controller/history/controller_history.go +++ b/pkg/controller/history/controller_history.go @@ -289,7 +289,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio } func (rh *realHistory) DeleteControllerRevision(revision *apps.ControllerRevision) error { - return rh.client.AppsV1().ControllerRevisions(revision.Namespace).Delete(context.TODO(), revision.Name, nil) + return rh.client.AppsV1().ControllerRevisions(revision.Namespace).Delete(context.TODO(), revision.Name, metav1.DeleteOptions{}) } type objectForPatch struct { diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager.go b/pkg/controller/nodelifecycle/scheduler/taint_manager.go index 108d48931b0..873258bd48b 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -109,7 +109,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced } var err error for i := 0; i < retries; i++ { - err = c.CoreV1().Pods(ns).Delete(context.TODO(), name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err == nil { break } diff --git a/pkg/controller/podgc/gc_controller_test.go b/pkg/controller/podgc/gc_controller_test.go index b3ddd33b73c..0036e220f79 100644 --- a/pkg/controller/podgc/gc_controller_test.go +++ b/pkg/controller/podgc/gc_controller_test.go @@ -349,7 +349,7 @@ func TestGCOrphaned(t *testing.T) { client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) } for _, node := range test.deletedClientNodes { - client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{}) + client.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{}) } for _, node := range test.addedInformerNodes { nodeInformer.Informer().GetStore().Add(node) diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index ceded6f7710..7e90cd2ca57 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -1203,7 +1203,7 @@ func TestExpectationsOnRecreate(t *testing.T) { t.Fatal("Unexpected item in the queue") } - err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(context.TODO(), oldRS.Name, &metav1.DeleteOptions{}) + err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(context.TODO(), oldRS.Name, metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index 824671ee6a8..eff625d7068 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -136,7 +136,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod } func (spc *realStatefulPodControl) DeleteStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error { - err := spc.client.CoreV1().Pods(set.Namespace).Delete(context.TODO(), pod.Name, nil) + err := spc.client.CoreV1().Pods(set.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) spc.recordPodEvent("delete", set, pod, err) return err } diff --git a/pkg/controller/util/node/controller_utils.go b/pkg/controller/util/node/controller_utils.go index 13007a47114..89560eaa1e1 100644 --- a/pkg/controller/util/node/controller_utils.go +++ b/pkg/controller/util/node/controller_utils.go @@ -80,7 +80,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record. klog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name) recorder.Eventf(pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) - if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, nil); err != nil { + if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { if apierrors.IsNotFound(err) { // NotFound error means that pod was already deleted. // There is nothing left to do with this pod. diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index de40a033d53..c67276d5442 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -1227,7 +1227,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist klog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) // Delete the volume - if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(context.TODO(), volume.Name, nil); err != nil { + if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(context.TODO(), volume.Name, metav1.DeleteOptions{}); err != nil { // Oops, could not delete the volume and therefore the controller will // try to delete the volume again on next update. We _could_ maintain a // cache of "recently deleted volumes" and avoid unnecessary deletion, diff --git a/pkg/controller/volume/scheduling/scheduler_binder_test.go b/pkg/controller/volume/scheduling/scheduler_binder_test.go index bbf6f033a72..9c4dda0c73f 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder_test.go +++ b/pkg/controller/volume/scheduling/scheduler_binder_test.go @@ -1813,7 +1813,7 @@ func TestBindPodVolumes(t *testing.T) { delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) { pvc := pvcs[0] // Delete PVC will fail check - if err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, &metav1.DeleteOptions{}); err != nil { + if err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}); err != nil { t.Errorf("failed to delete PVC %q: %v", pvc.Name, err) } }, diff --git a/pkg/kubelet/pod/mirror_client.go b/pkg/kubelet/pod/mirror_client.go index 551d27df444..3ead280e658 100644 --- a/pkg/kubelet/pod/mirror_client.go +++ b/pkg/kubelet/pod/mirror_client.go @@ -124,7 +124,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string, uid *types.UID) } klog.V(2).Infof("Deleting a mirror pod %q (uid %#v)", podFullName, uid) var GracePeriodSeconds int64 - if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil { + if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil { // Unfortunately, there's no generic error for failing a precondition if !(apierrors.IsNotFound(err) || apierrors.IsConflict(err)) { // We should return the error here, but historically this routine does diff --git a/pkg/kubemark/controller.go b/pkg/kubemark/controller.go index b8f11643664..d9c95962add 100644 --- a/pkg/kubemark/controller.go +++ b/pkg/kubemark/controller.go @@ -249,7 +249,7 @@ func (kubemarkController *KubemarkController) RemoveNodeFromNodeGroup(nodeGroup var err error for i := 0; i < numRetries; i++ { err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(namespaceKubemark).Delete(context.TODO(), pod.ObjectMeta.Labels["name"], - &metav1.DeleteOptions{PropagationPolicy: &policy}) + metav1.DeleteOptions{PropagationPolicy: &policy}) if err == nil { klog.Infof("marking node %s for deletion", node) // Mark node for deletion from kubemark cluster. @@ -374,7 +374,7 @@ func (kubemarkCluster *kubemarkCluster) removeUnneededNodes(oldObj interface{}, defer kubemarkCluster.nodesToDeleteLock.Unlock() if kubemarkCluster.nodesToDelete[node.Name] { kubemarkCluster.nodesToDelete[node.Name] = false - if err := kubemarkCluster.client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{}); err != nil { + if err := kubemarkCluster.client.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{}); err != nil { klog.Errorf("failed to delete node %s from kubemark cluster, err: %v", node.Name, err) } } diff --git a/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go b/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go index c017f6b2f19..e7322fc5f6c 100644 --- a/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go +++ b/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go @@ -205,7 +205,7 @@ func writeConfigMap(configMapClient corev1client.ConfigMapsGetter, required *cor // 1. request is so big the generic request catcher finds it // 2. the content is so large that that the server sends a validation error "Too long: must have at most 1048576 characters" if apierrors.IsRequestEntityTooLargeError(err) || (apierrors.IsInvalid(err) && strings.Contains(err.Error(), "Too long")) { - if deleteErr := configMapClient.ConfigMaps(required.Namespace).Delete(context.TODO(), required.Name, nil); deleteErr != nil { + if deleteErr := configMapClient.ConfigMaps(required.Namespace).Delete(context.TODO(), required.Name, metav1.DeleteOptions{}); deleteErr != nil { return deleteErr } return err diff --git a/pkg/master/reconcilers/endpointsadapter.go b/pkg/master/reconcilers/endpointsadapter.go index eb7b8358d77..b3d456059b8 100644 --- a/pkg/master/reconcilers/endpointsadapter.go +++ b/pkg/master/reconcilers/endpointsadapter.go @@ -99,7 +99,7 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri // required for transition from IP to IPv4 address type. if currentEndpointSlice.AddressType != endpointSlice.AddressType { - err = adapter.endpointSliceClient.EndpointSlices(namespace).Delete(context.TODO(), endpointSlice.Name, &metav1.DeleteOptions{}) + err = adapter.endpointSliceClient.EndpointSlices(namespace).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{}) if err != nil { return err } diff --git a/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go b/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go index 6b136bba4f7..737dab62c8b 100644 --- a/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go +++ b/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go @@ -106,5 +106,5 @@ func (c ClusterRoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, er } func (c ClusterRoleBindingClientAdapter) Delete(namespace, name string, uid types.UID) error { - return c.Client.Delete(context.TODO(), name, &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) + return c.Client.Delete(context.TODO(), name, metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) } diff --git a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go index 70d47921caf..8cf2052c294 100644 --- a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go +++ b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go @@ -112,5 +112,5 @@ func (c RoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) { } func (c RoleBindingClientAdapter) Delete(namespace, name string, uid types.UID) error { - return c.Client.RoleBindings(namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) + return c.Client.RoleBindings(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index bc5792aaaa6..2b1cc3f8539 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -774,7 +774,7 @@ func (p *podPreemptorImpl) getUpdatedPod(pod *v1.Pod) (*v1.Pod, error) { } func (p *podPreemptorImpl) deletePod(pod *v1.Pod) error { - return p.Client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{}) + return p.Client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) } func (p *podPreemptorImpl) setNominatedNodeName(pod *v1.Pod, nominatedNodeName string) error { diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index fc5c3a28bb8..ef06ac8fccd 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -394,7 +394,7 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { attachID = getAttachmentName(volID, driverName, string(nodeName)) } - if err := c.k8s.StorageV1().VolumeAttachments().Delete(context.TODO(), attachID, nil); err != nil { + if err := c.k8s.StorageV1().VolumeAttachments().Delete(context.TODO(), attachID, metav1.DeleteOptions{}); err != nil { if apierrors.IsNotFound(err) { // object deleted or never existed, done klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID)) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 6a5bb15465e..8419039d265 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -840,7 +840,7 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi klog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr) } klog.V(3).Infof("failed to update endpoint, deleting %s", endpoint) - err = kubeClient.CoreV1().Services(epNamespace).Delete(context.TODO(), epServiceName, nil) + err = kubeClient.CoreV1().Services(epNamespace).Delete(context.TODO(), epServiceName, metav1.DeleteOptions{}) if err != nil && errors.IsNotFound(err) { klog.V(1).Infof("service %s does not exist in namespace %s", epServiceName, epNamespace) err = nil @@ -921,7 +921,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi if kubeClient == nil { return fmt.Errorf("failed to get kube client when deleting endpoint service") } - err = kubeClient.CoreV1().Services(namespace).Delete(context.TODO(), epServiceName, nil) + err = kubeClient.CoreV1().Services(namespace).Delete(context.TODO(), epServiceName, metav1.DeleteOptions{}) if err != nil { return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) } diff --git a/pkg/volume/util/recyclerclient/recycler_client.go b/pkg/volume/util/recyclerclient/recycler_client.go index 0558c1dced4..b1ca71f9035 100644 --- a/pkg/volume/util/recyclerclient/recycler_client.go +++ b/pkg/volume/util/recyclerclient/recycler_client.go @@ -186,7 +186,7 @@ func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { } func (c *realRecyclerClient) DeletePod(name, namespace string) error { - return c.client.CoreV1().Pods(namespace).Delete(context.TODO(), name, nil) + return c.client.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) } func (c *realRecyclerClient) Event(eventtype, message string) { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go index 4bfe23f9854..7b6203597f5 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go @@ -504,7 +504,7 @@ func isWatchCachePrimed(crd *apiextensionsv1.CustomResourceDefinition, dynamicCl // DeleteCustomResourceDefinition deletes a CRD and waits until it disappears from discovery. func DeleteCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) error { - if err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, nil); err != nil { + if err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, metav1.DeleteOptions{}); err != nil { return err } for _, version := range servedVersions(crd) { @@ -521,7 +521,7 @@ func DeleteCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefi // DeleteV1CustomResourceDefinition deletes a CRD and waits until it disappears from discovery. func DeleteV1CustomResourceDefinition(crd *apiextensionsv1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) error { - if err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, nil); err != nil { + if err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, metav1.DeleteOptions{}); err != nil { return err } for _, version := range servedV1Versions(crd) { @@ -542,7 +542,7 @@ func DeleteV1CustomResourceDefinitions(deleteListOpts metav1.ListOptions, apiExt if err != nil { return err } - if err = apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().DeleteCollection(context.TODO(), nil, deleteListOpts); err != nil { + if err = apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, deleteListOpts); err != nil { return err } for _, crd := range list.Items { diff --git a/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go b/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go index eff29931d4b..224dbc12519 100644 --- a/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go +++ b/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go @@ -156,7 +156,7 @@ func main() { prompt() fmt.Println("Deleting deployment...") deletePolicy := metav1.DeletePropagationForeground - if err := deploymentsClient.Delete(context.TODO(), "demo-deployment", &metav1.DeleteOptions{ + if err := deploymentsClient.Delete(context.TODO(), "demo-deployment", metav1.DeleteOptions{ PropagationPolicy: &deletePolicy, }); err != nil { panic(err) diff --git a/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go b/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go index b8795d421ee..ce0c0b63ea5 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go @@ -241,7 +241,7 @@ func addEvictionSupport(t *testing.T, k *fake.Clientset) { eviction := *action.(ktest.CreateAction).GetObject().(*policyv1beta1.Eviction) // Avoid the lock go func() { - err := k.CoreV1().Pods(eviction.Namespace).Delete(context.TODO(), eviction.Name, &metav1.DeleteOptions{}) + err := k.CoreV1().Pods(eviction.Namespace).Delete(context.TODO(), eviction.Name, metav1.DeleteOptions{}) if err != nil { // Errorf because we can't call Fatalf from another goroutine t.Errorf("failed to delete pod: %s/%s", eviction.Namespace, eviction.Name) diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index 635517d2a6f..c7c6b27d43f 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -102,16 +102,16 @@ var _ = SIGDescribe("Aggregator", func() { func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) { // delete the APIService first to avoid causing discovery errors - _ = aggrclient.ApiregistrationV1().APIServices().Delete(context.TODO(), "v1alpha1.wardle.example.com", nil) + _ = aggrclient.ApiregistrationV1().APIServices().Delete(context.TODO(), "v1alpha1.wardle.example.com", metav1.DeleteOptions{}) - _ = client.AppsV1().Deployments(namespace).Delete(context.TODO(), "sample-apiserver-deployment", nil) - _ = client.CoreV1().Secrets(namespace).Delete(context.TODO(), "sample-apiserver-secret", nil) - _ = client.CoreV1().Services(namespace).Delete(context.TODO(), "sample-api", nil) - _ = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), "sample-apiserver", nil) - _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), "wardler-auth-reader", nil) - _ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":auth-delegator", nil) - _ = client.RbacV1().ClusterRoles().Delete(context.TODO(), "sample-apiserver-reader", nil) - _ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":sample-apiserver-reader", nil) + _ = client.AppsV1().Deployments(namespace).Delete(context.TODO(), "sample-apiserver-deployment", metav1.DeleteOptions{}) + _ = client.CoreV1().Secrets(namespace).Delete(context.TODO(), "sample-apiserver-secret", metav1.DeleteOptions{}) + _ = client.CoreV1().Services(namespace).Delete(context.TODO(), "sample-api", metav1.DeleteOptions{}) + _ = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), "sample-apiserver", metav1.DeleteOptions{}) + _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), "wardler-auth-reader", metav1.DeleteOptions{}) + _ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":auth-delegator", metav1.DeleteOptions{}) + _ = client.RbacV1().ClusterRoles().Delete(context.TODO(), "sample-apiserver-reader", metav1.DeleteOptions{}) + _ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":sample-apiserver-reader", metav1.DeleteOptions{}) } // TestSampleAPIServer is a basic test if the sample-apiserver code from 1.10 and compiled against 1.10 diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index 01fcd3f999f..e6acbe27d43 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -209,10 +209,10 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", }) func cleanCRDWebhookTest(client clientset.Interface, namespaceName string) { - _ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceCRDName, nil) - _ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentCRDName, nil) - _ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretCRDName, nil) - _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingCRDName, nil) + _ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceCRDName, metav1.DeleteOptions{}) + _ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentCRDName, metav1.DeleteOptions{}) + _ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretCRDName, metav1.DeleteOptions{}) + _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingCRDName, metav1.DeleteOptions{}) } func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespace string) { diff --git a/test/e2e/apimachinery/generated_clientset.go b/test/e2e/apimachinery/generated_clientset.go index 3e1d4b69b76..65476988412 100644 --- a/test/e2e/apimachinery/generated_clientset.go +++ b/test/e2e/apimachinery/generated_clientset.go @@ -264,7 +264,7 @@ var _ = SIGDescribe("Generated clientset", func() { ginkgo.By("deleting the cronJob") // Use DeletePropagationBackground so the CronJob is really gone when the call returns. propagationPolicy := metav1.DeletePropagationBackground - if err := cronJobClient.Delete(context.TODO(), cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { + if err := cronJobClient.Delete(context.TODO(), cronJob.Name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { framework.Failf("Failed to delete cronJob: %v", err) } diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index 180eb873b07..2289533eb64 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -117,7 +117,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) ginkgo.By("Deleting the namespace") - err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, nil) + err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) ginkgo.By("Waiting for the namespace to be removed.") @@ -174,7 +174,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name) ginkgo.By("Deleting the namespace") - err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, nil) + err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) ginkgo.By("Waiting for the namespace to be removed.") diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index a5d0e4cb964..422f4a5ea54 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -112,7 +112,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a Service") - err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), service.Name, nil) + err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -180,7 +180,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a secret") - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, nil) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -346,7 +346,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a ConfigMap") - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, nil) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -397,7 +397,7 @@ var _ = SIGDescribe("ResourceQuota", func() { // detached. ReplicationControllers default to "orphan", which // is different from most resources. (Why? To preserve a common // workflow from prior to the GC's introduction.) - err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(context.TODO(), replicationController.Name, &metav1.DeleteOptions{ + err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(context.TODO(), replicationController.Name, metav1.DeleteOptions{ PropagationPolicy: func() *metav1.DeletionPropagation { p := metav1.DeletePropagationBackground return &p @@ -448,7 +448,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a ReplicaSet") - err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(context.TODO(), replicaSet.Name, nil) + err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(context.TODO(), replicaSet.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -497,7 +497,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a PersistentVolumeClaim") - err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, nil) + err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -554,7 +554,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a PersistentVolumeClaim") - err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, nil) + err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -588,7 +588,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) err = updateResourceQuotaUntilUsageAppears(f.ClientSet, f.Namespace.Name, quotaName, v1.ResourceName(countResourceName)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(context.TODO(), quotaName, nil) + err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(context.TODO(), quotaName, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Counting existing ResourceQuota") @@ -1662,7 +1662,7 @@ func createResourceQuota(c clientset.Interface, namespace string, resourceQuota // deleteResourceQuota with the specified name func deleteResourceQuota(c clientset.Interface, namespace, name string) error { - return c.CoreV1().ResourceQuotas(namespace).Delete(context.TODO(), name, nil) + return c.CoreV1().ResourceQuotas(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) } // countResourceQuota counts the number of ResourceQuota in the specified namespace diff --git a/test/e2e/apimachinery/watch.go b/test/e2e/apimachinery/watch.go index 569347324ea..c75f4babd45 100644 --- a/test/e2e/apimachinery/watch.go +++ b/test/e2e/apimachinery/watch.go @@ -110,7 +110,7 @@ var _ = SIGDescribe("Watchers", func() { expectNoEvent(watchB, watch.Modified, testConfigMapA) ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapA.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapA.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns) expectEvent(watchA, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil) @@ -124,7 +124,7 @@ var _ = SIGDescribe("Watchers", func() { expectNoEvent(watchA, watch.Added, testConfigMapB) ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapB.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapB.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns) expectEvent(watchB, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil) @@ -166,7 +166,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns) ginkgo.By("deleting the configmap") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("creating a watch on configmaps from the resource version returned by the first update") @@ -235,7 +235,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion) ginkgo.By("deleting the configmap") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed") @@ -310,7 +310,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a third time", configMapName, ns) ginkgo.By("deleting the configmap") - err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored") @@ -482,7 +482,7 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns) case deleteEvent: idx := rand.Intn(len(existing)) - err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name(existing[idx]), &metav1.DeleteOptions{}) + err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name(existing[idx]), metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns) existing = append(existing[:idx], existing[idx+1:]...) default: diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 8dd47aa6652..4fe6ed8b057 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -423,7 +423,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { }) framework.ExpectNoError(err, "Creating validating webhook configuration") defer func() { - err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), hook.Name, nil) + err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), hook.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting validating webhook configuration") }() @@ -436,7 +436,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err == nil { - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return false, nil } @@ -466,7 +466,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { } return false, nil } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return true, nil }) @@ -483,7 +483,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err == nil { - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return false, nil } @@ -518,7 +518,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { }) framework.ExpectNoError(err, "Creating mutating webhook configuration") defer func() { - err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), hook.Name, nil) + err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), hook.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting mutating webhook configuration") }() @@ -540,7 +540,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return !ok, nil @@ -560,7 +560,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return ok, nil @@ -610,7 +610,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{}) if err == nil { - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return false, nil } @@ -622,7 +622,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be denied creation by validating webhook", f.Namespace.Name) ginkgo.By("Deleting the collection of validation webhooks") - err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().DeleteCollection(context.TODO(), nil, selectorListOpts) + err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, selectorListOpts) framework.ExpectNoError(err, "Deleting collection of validating webhook configurations") ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") @@ -635,7 +635,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { } return false, nil } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") return true, nil }) @@ -686,7 +686,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return ok, nil @@ -694,7 +694,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be mutated", f.Namespace.Name) ginkgo.By("Deleting the collection of validation webhooks") - err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().DeleteCollection(context.TODO(), nil, selectorListOpts) + err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, selectorListOpts) framework.ExpectNoError(err, "Deleting collection of mutating webhook configurations") ginkgo.By("Creating a configMap that should not be mutated") @@ -704,7 +704,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return !ok, nil @@ -909,7 +909,7 @@ func registerWebhook(f *framework.Framework, configName string, certCtx *certCon framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -961,7 +961,7 @@ func registerWebhookForAttachingPod(f *framework.Framework, configName string, c framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -987,7 +987,7 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, configName stri err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -1055,7 +1055,7 @@ func registerMutatingWebhookForPod(f *framework.Framework, configName string, ce framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -1181,7 +1181,7 @@ func testWebhook(f *framework.Framework) { }}) framework.ExpectNoError(err, "creating namespace %q", skippedNamespaceName) // clean up the namespace - defer client.CoreV1().Namespaces().Delete(context.TODO(), skippedNamespaceName, nil) + defer client.CoreV1().Namespaces().Delete(context.TODO(), skippedNamespaceName, metav1.DeleteOptions{}) ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace") configmap = nonCompliantConfigMap(f) @@ -1274,7 +1274,7 @@ func registerFailClosedWebhook(f *framework.Framework, configName string, certCt err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -1289,7 +1289,7 @@ func testFailClosedWebhook(f *framework.Framework) { }, }}) framework.ExpectNoError(err, "creating namespace %q", failNamespaceName) - defer client.CoreV1().Namespaces().Delete(context.TODO(), failNamespaceName, nil) + defer client.CoreV1().Namespaces().Delete(context.TODO(), failNamespaceName, metav1.DeleteOptions{}) ginkgo.By("create a configmap should be unconditionally rejected by the webhook") configmap := &v1.ConfigMap{ @@ -1360,7 +1360,7 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) } } @@ -1421,7 +1421,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) } } @@ -1489,7 +1489,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str ginkgo.By("Deleting the validating-webhook-configuration, which should be possible to remove") - err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) ginkgo.By("Creating a dummy mutating-webhook-configuration object") @@ -1545,7 +1545,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str ginkgo.By("Deleting the mutating-webhook-configuration, which should be possible to remove") - err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) } @@ -1695,10 +1695,10 @@ func updateCustomResource(c dynamic.ResourceInterface, ns, name string, update u } func cleanWebhookTest(client clientset.Interface, namespaceName string) { - _ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceName, nil) - _ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentName, nil) - _ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretName, nil) - _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingName, nil) + _ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) + _ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{}) + _ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretName, metav1.DeleteOptions{}) + _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingName, metav1.DeleteOptions{}) } func registerWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) func() { @@ -1748,7 +1748,7 @@ func registerWebhookForCustomResource(f *framework.Framework, configName string, err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -1826,7 +1826,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, configName framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -2058,7 +2058,7 @@ func registerValidatingWebhookForCRD(f *framework.Framework, configName string, err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -2188,7 +2188,7 @@ func registerSlowWebhook(f *framework.Framework, configName string, certCtx *cer framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, metav1.DeleteOptions{}) } } @@ -2212,7 +2212,7 @@ func testSlowWebhookTimeoutNoError(f *framework.Framework) { name := "e2e-test-slow-webhook-configmap" _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.BeNil()) - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) gomega.Expect(err).To(gomega.BeNil()) } @@ -2422,7 +2422,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error { return false, err } // best effort cleanup of markers that are no longer needed - _ = cmClient.Delete(context.TODO(), marker.GetName(), nil) + _ = cmClient.Delete(context.TODO(), marker.GetName(), metav1.DeleteOptions{}) framework.Logf("Waiting for webhook configuration to be ready...") return false, nil }) diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 3786a98b717..61722019086 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -364,7 +364,7 @@ func getCronJob(c clientset.Interface, ns, name string) (*batchv1beta1.CronJob, func deleteCronJob(c clientset.Interface, ns, name string) error { propagationPolicy := metav1.DeletePropagationBackground // Also delete jobs and pods related to cronjob - return c.BatchV1beta1().CronJobs(ns).Delete(context.TODO(), name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + return c.BatchV1beta1().CronJobs(ns).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) } // Wait for at least given amount of active jobs. diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index ed345e0950f..a5742a930c9 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -166,7 +166,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.") podList := listDaemonPods(c, ns, label) pod := podList.Items[0] - err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, nil) + err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to revive") diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index aae984c3e74..b4f3d028637 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -607,7 +607,7 @@ func testIterativeDeployments(f *framework.Framework) { } name := podList.Items[p].Name framework.Logf("%02d: deleting deployment pod %q", i, name) - err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, nil) + err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index b785218fd61..69ec790be66 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -404,7 +404,7 @@ func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, nam } func deletePDBOrDie(cs kubernetes.Interface, ns string, name string) { - err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(context.TODO(), name, &metav1.DeleteOptions{}) + err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Deleting pdb in namespace %s", ns) waitForPdbToBeDeleted(cs, ns, name) } @@ -423,7 +423,7 @@ func listPDBs(cs kubernetes.Interface, ns string, labelSelector string, count in func deletePDBCollection(cs kubernetes.Interface, ns string) { ginkgo.By("deleting a collection of PDBs") - err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{}) + err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) framework.ExpectNoError(err, "Deleting PDB set in namespace %s", ns) waitForPDBCollectionToBeDeleted(cs, ns) diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 4fb21042804..560c8bcbd71 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -1268,7 +1268,7 @@ func restorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error { func deleteStatefulPodAtIndex(c clientset.Interface, index int, ss *appsv1.StatefulSet) { name := getStatefulSetPodNameAtIndex(index, ss) noGrace := int64(0) - if err := c.CoreV1().Pods(ss.Namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { + if err := c.CoreV1().Pods(ss.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { framework.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err) } } diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 9700a0e4f4a..2fa83866601 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -225,7 +225,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { _, err = f.ClientSet.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to create list deployments") - err = f.ClientSet.AppsV1().Deployments(namespace).Delete(context.TODO(), "audit-deployment", &metav1.DeleteOptions{}) + err = f.ClientSet.AppsV1().Deployments(namespace).Delete(context.TODO(), "audit-deployment", metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete deployments") expectEvents(f, []utils.AuditEvent{ @@ -358,7 +358,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list config maps") - err = f.ClientSet.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configMap.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete audit-configmap") expectEvents(f, []utils.AuditEvent{ @@ -490,7 +490,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { _, err = f.ClientSet.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list secrets") - err = f.ClientSet.CoreV1().Secrets(namespace).Delete(context.TODO(), secret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(namespace).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete audit-secret") expectEvents(f, []utils.AuditEvent{ diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index 5882e2cd3d1..45bc30fa8ea 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -376,7 +376,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { return len(missingReport.MissingEvents) == 0, nil }) framework.ExpectNoError(err, "after %v failed to observe audit events", pollingTimeout) - err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Delete(context.TODO(), "test", &metav1.DeleteOptions{}) + err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Delete(context.TODO(), "test", metav1.DeleteOptions{}) framework.ExpectNoError(err, "could not delete audit configuration") }) }) diff --git a/test/e2e/auth/certificates.go b/test/e2e/auth/certificates.go index 561b9583dae..a426e929abd 100644 --- a/test/e2e/auth/certificates.go +++ b/test/e2e/auth/certificates.go @@ -119,6 +119,6 @@ var _ = SIGDescribe("Certificates API", func() { newClient, err := v1beta1client.NewForConfig(rcfg) framework.ExpectNoError(err) - framework.ExpectNoError(newClient.CertificateSigningRequests().Delete(context.TODO(), csrName, nil)) + framework.ExpectNoError(newClient.CertificateSigningRequests().Delete(context.TODO(), csrName, metav1.DeleteOptions{})) }) }) diff --git a/test/e2e/auth/node_authz.go b/test/e2e/auth/node_authz.go index 0710bb04d67..f235c0cf724 100644 --- a/test/e2e/auth/node_authz.go +++ b/test/e2e/auth/node_authz.go @@ -179,7 +179,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { ginkgo.It("A node shouldn't be able to delete another node", func() { ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) - err := c.CoreV1().Nodes().Delete(context.TODO(), "foo", &metav1.DeleteOptions{}) + err := c.CoreV1().Nodes().Delete(context.TODO(), "foo", metav1.DeleteOptions{}) framework.ExpectEqual(apierrors.IsForbidden(err), true) }) }) diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index 264a7d258cc..3d87baa5593 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -246,7 +246,7 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu return psp, func() { // Cleanup non-namespaced PSP object. - f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Delete(context.TODO(), name, &metav1.DeleteOptions{}) + f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Delete(context.TODO(), name, metav1.DeleteOptions{}) } } diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 34659ce2106..d4eb0771e04 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -83,7 +83,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { // delete the referenced secret ginkgo.By("deleting the service account token") - framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secrets[0].Name, nil)) + framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secrets[0].Name, metav1.DeleteOptions{})) // wait for the referenced secret to be removed, and another one autocreated framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 7ae78971900..67ad3dff68e 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -1039,7 +1039,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str _, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb, metav1.CreateOptions{}) defer func() { - f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdb.Name, &metav1.DeleteOptions{}) + f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdb.Name, metav1.DeleteOptions{}) }() framework.ExpectNoError(err) @@ -1879,7 +1879,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) { var finalErr error for _, newPdbName := range newPdbs { ginkgo.By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName)) - err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(context.TODO(), newPdbName, &metav1.DeleteOptions{}) + err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(context.TODO(), newPdbName, metav1.DeleteOptions{}) if err != nil { // log error, but attempt to remove other pdbs klog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err) @@ -1942,7 +1942,7 @@ func createPriorityClasses(f *framework.Framework) func() { return func() { for className := range priorityClasses { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), className, nil) + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), className, metav1.DeleteOptions{}) if err != nil { klog.Errorf("Error deleting priority class: %v", err) } diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index 0d7099e5609..d3a7862d338 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -280,7 +280,7 @@ func (tc *CustomMetricTestCase) Run() { if err != nil { framework.Failf("Failed to create HPA: %v", err) } - defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(context.TODO(), tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{}) + defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(context.TODO(), tc.hpa.ObjectMeta.Name, metav1.DeleteOptions{}) waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas) } @@ -303,10 +303,10 @@ func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, dep func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) { if deployment != nil { - _ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), deployment.ObjectMeta.Name, &metav1.DeleteOptions{}) + _ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), deployment.ObjectMeta.Name, metav1.DeleteOptions{}) } if pod != nil { - _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), pod.ObjectMeta.Name, &metav1.DeleteOptions{}) + _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), pod.ObjectMeta.Name, metav1.DeleteOptions{}) } } diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 14dff8eaafc..f81f22a8a28 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -274,7 +274,7 @@ func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { } func deleteDNSScalingConfigMap(c clientset.Interface) error { - if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), DNSAutoscalerLabelName, nil); err != nil { + if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), DNSAutoscalerLabelName, metav1.DeleteOptions{}); err != nil { return err } framework.Logf("DNS autoscaling ConfigMap deleted.") @@ -335,7 +335,7 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error { } podName := pods.Items[0].Name - if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), podName, nil); err != nil { + if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), podName, metav1.DeleteOptions{}); err != nil { return err } framework.Logf("DNS autoscaling pod %v deleted.", podName) diff --git a/test/e2e/cloud/gcp/addon_update.go b/test/e2e/cloud/gcp/addon_update.go index 62b4554a3b7..f1ee3c0ad93 100644 --- a/test/e2e/cloud/gcp/addon_update.go +++ b/test/e2e/cloud/gcp/addon_update.go @@ -302,7 +302,7 @@ var _ = SIGDescribe("Addon update", func() { // Delete the "ensure exist class" addon at the end. defer func() { framework.Logf("Cleaning up ensure exist class addon.") - err := f.ClientSet.CoreV1().Services(addonNsName).Delete(context.TODO(), "addon-ensure-exists-test", nil) + err := f.ClientSet.CoreV1().Services(addonNsName).Delete(context.TODO(), "addon-ensure-exists-test", metav1.DeleteOptions{}) framework.ExpectNoError(err) }() diff --git a/test/e2e/common/configmap.go b/test/e2e/common/configmap.go index db1d3ca391b..106bd4fc40d 100644 --- a/test/e2e/common/configmap.go +++ b/test/e2e/common/configmap.go @@ -212,7 +212,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { } framework.ExpectEqual(testConfigMapFound, true, "failed to find ConfigMap in list") - err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{ + err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: "test-configmap-static=true", }) framework.ExpectNoError(err, "failed to delete ConfigMap collection with LabelSelector") diff --git a/test/e2e/common/configmap_volume.go b/test/e2e/common/configmap_volume.go index 38f2dd718b5..e88a3cccd21 100644 --- a/test/e2e/common/configmap_volume.go +++ b/test/e2e/common/configmap_volume.go @@ -453,7 +453,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) @@ -594,7 +594,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace) // Ensure that immutable config map can be deleted. - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete config map %q in namespace %q", configMap.Name, configMap.Namespace) }) diff --git a/test/e2e/common/kubelet.go b/test/e2e/common/kubelet.go index 579ca7c4358..1c0502d2802 100644 --- a/test/e2e/common/kubelet.go +++ b/test/e2e/common/kubelet.go @@ -130,7 +130,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted. */ framework.ConformanceIt("should be possible to delete [NodeConformance]", func() { - err := podClient.Delete(context.TODO(), podName, &metav1.DeleteOptions{}) + err := podClient.Delete(context.TODO(), podName, metav1.DeleteOptions{}) gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err)) }) }) diff --git a/test/e2e/common/lease.go b/test/e2e/common/lease.go index 7bb1b64c71c..f87ca02e9a6 100644 --- a/test/e2e/common/lease.go +++ b/test/e2e/common/lease.go @@ -144,14 +144,14 @@ var _ = framework.KubeDescribe("Lease", func() { framework.ExpectEqual(len(leases.Items), 2) selector := labels.Set(map[string]string{"deletecollection": "true"}).AsSelector() - err = leaseClient.DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()}) + err = leaseClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err, "couldn't delete collection") leases, err = leaseClient.List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "couldn't list Leases") framework.ExpectEqual(len(leases.Items), 1) - err = leaseClient.Delete(context.TODO(), name, &metav1.DeleteOptions{}) + err = leaseClient.Delete(context.TODO(), name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting Lease failed") _, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/test/e2e/common/podtemplates.go b/test/e2e/common/podtemplates.go index f289797466c..420528a49cf 100644 --- a/test/e2e/common/podtemplates.go +++ b/test/e2e/common/podtemplates.go @@ -84,7 +84,7 @@ var _ = ginkgo.Describe("[sig-architecture] PodTemplates", func() { framework.ExpectEqual(podTemplateRead.ObjectMeta.Labels["podtemplate"], "patched", "failed to patch template, new label not found") // delete the PodTemplate - err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(context.TODO(), podTemplateName, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(context.TODO(), podTemplateName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete PodTemplate") // list the PodTemplates diff --git a/test/e2e/common/projected_configmap.go b/test/e2e/common/projected_configmap.go index bbf92234f81..76ebed5366c 100644 --- a/test/e2e/common/projected_configmap.go +++ b/test/e2e/common/projected_configmap.go @@ -380,7 +380,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) diff --git a/test/e2e/common/projected_secret.go b/test/e2e/common/projected_secret.go index 306f8039ed2..3da67ba7715 100644 --- a/test/e2e/common/projected_secret.go +++ b/test/e2e/common/projected_secret.go @@ -382,7 +382,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) diff --git a/test/e2e/common/runtime.go b/test/e2e/common/runtime.go index 2c161a355c6..939d58acda6 100644 --- a/test/e2e/common/runtime.go +++ b/test/e2e/common/runtime.go @@ -303,7 +303,7 @@ while true; do sleep 1; done ginkgo.By("create image pull secret") _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) framework.ExpectNoError(err) - defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, nil) + defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) container.ImagePullSecrets = []string{secret.Name} } // checkContainerStatus checks whether the container status matches expectation. diff --git a/test/e2e/common/runtimeclass.go b/test/e2e/common/runtimeclass.go index b4217955c4e..6b7b5b4776f 100644 --- a/test/e2e/common/runtimeclass.go +++ b/test/e2e/common/runtimeclass.go @@ -66,7 +66,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { rcClient := f.ClientSet.NodeV1beta1().RuntimeClasses() ginkgo.By("Deleting RuntimeClass "+rcName, func() { - err := rcClient.Delete(context.TODO(), rcName, nil) + err := rcClient.Delete(context.TODO(), rcName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete RuntimeClass %s", rcName) ginkgo.By("Waiting for the RuntimeClass to disappear") diff --git a/test/e2e/common/secrets.go b/test/e2e/common/secrets.go index 5cca4a2d023..538dd7fd7c7 100644 --- a/test/e2e/common/secrets.go +++ b/test/e2e/common/secrets.go @@ -209,7 +209,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { framework.ExpectEqual(string(secretDecodedstring), "value1", "found secret, but the data wasn't updated from the patch") ginkgo.By("deleting the secret using a LabelSelector") - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{ + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: "testsecret=true", }) framework.ExpectNoError(err, "failed to delete patched secret") diff --git a/test/e2e/common/secrets_volume.go b/test/e2e/common/secrets_volume.go index 102440a5d21..80c8b3001a5 100644 --- a/test/e2e/common/secrets_volume.go +++ b/test/e2e/common/secrets_volume.go @@ -348,7 +348,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) @@ -412,7 +412,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace) // Ensure that immutable secret can be deleted. - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", secret.Name, secret.Namespace) }) diff --git a/test/e2e/common/volumes.go b/test/e2e/common/volumes.go index 1d1b0975fee..092bad691de 100644 --- a/test/e2e/common/volumes.go +++ b/test/e2e/common/volumes.go @@ -45,7 +45,9 @@ package common import ( "context" + "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -131,7 +133,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() { name := config.Prefix + "-server" defer func() { volume.TestServerCleanup(f, config) - err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, nil) + err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "defer: Gluster delete endpoints failed") }() diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 9477e4879ee..54a3ddbb4f9 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -141,7 +141,7 @@ func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { return } defer func() { - if err := c.CoreV1().Pods(ns).Delete(context.TODO(), p.Name, nil); err != nil { + if err := c.CoreV1().Pods(ns).Delete(context.TODO(), p.Name, metav1.DeleteOptions{}); err != nil { framework.Logf("Failed to delete pod %v: %v", p.Name, err) } }() diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go index 2d3e1fa8fb1..6cbeb51600d 100644 --- a/test/e2e/framework/autoscaling/autoscaling_utils.go +++ b/test/e2e/framework/autoscaling/autoscaling_utils.go @@ -415,9 +415,9 @@ func (rc *ResourceConsumer) CleanUp() { time.Sleep(10 * time.Second) kind := rc.kind.GroupKind() framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, nil)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, metav1.DeleteOptions{})) framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, schema.GroupKind{Kind: "ReplicationController"}, rc.nsName, rc.controllerName)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.controllerName, nil)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.controllerName, metav1.DeleteOptions{})) } func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) { @@ -538,7 +538,7 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma // DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources. func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) { - rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, nil) + rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{}) } // runReplicaSet launches (and verifies correctness) of a replicaset. diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 0fbe3d129dd..e574a8ee3b9 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -386,7 +386,7 @@ func (f *Framework) AfterEach() { if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentGinkgoTestDescription().Failed) { for _, ns := range f.namespacesToDelete { ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) - if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil { if !apierrors.IsNotFound(err) { nsDeletionErrors[ns.Name] = err diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 9684aa8a6cd..23ae490f909 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -653,7 +653,7 @@ func (j *TestJig) tryDeleteGivenIngress(ing *networkingv1beta1.Ingress) { // runDelete runs the required command to delete the given ingress. func (j *TestJig) runDelete(ing *networkingv1beta1.Ingress) error { if j.Class != MulticlusterIngressClassValue { - return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(context.TODO(), ing.Name, nil) + return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(context.TODO(), ing.Name, metav1.DeleteOptions{}) } // Use kubemci to delete a multicluster ingress. filePath := framework.TestContext.OutputDir + "/mci.yaml" @@ -1145,12 +1145,12 @@ func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *appsv1.Depl } } if svc != nil { - if err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil); err != nil { + if err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) } } if deploy != nil { - if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(context.TODO(), deploy.Name, nil); err != nil { + if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(context.TODO(), deploy.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", deploy.Namespace, deploy.Name, err)) } } diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index cea7e30ed0c..c71c183f92e 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -553,7 +553,7 @@ func (config *NetworkingTestConfig) createSessionAffinityService(selector map[st // DeleteNodePortService deletes NodePort service. func (config *NetworkingTestConfig) DeleteNodePortService() { - err := config.getServiceClient().Delete(context.TODO(), config.NodePortService.Name, nil) + err := config.getServiceClient().Delete(context.TODO(), config.NodePortService.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "error while deleting NodePortService. err:%v)", err) time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted. } diff --git a/test/e2e/framework/pod/delete.go b/test/e2e/framework/pod/delete.go index 6a292457916..2da92d4d8e0 100644 --- a/test/e2e/framework/pod/delete.go +++ b/test/e2e/framework/pod/delete.go @@ -25,6 +25,7 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) @@ -37,7 +38,7 @@ const ( // DeletePodOrFail deletes the pod of the specified namespace and name. func DeletePodOrFail(c clientset.Interface, ns, name string) { ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) - err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, nil) + err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) expectNoError(err, "failed to delete pod %s in namespace %s", name, ns) } @@ -54,7 +55,7 @@ func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error { // not existing. func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error { e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace) - err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, nil) + err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{}) if err != nil { if apierrors.IsNotFound(err) { return nil // assume pod was already deleted diff --git a/test/e2e/framework/pv/pv.go b/test/e2e/framework/pv/pv.go index 7fa5f4a24e3..7741db13136 100644 --- a/test/e2e/framework/pv/pv.go +++ b/test/e2e/framework/pv/pv.go @@ -186,7 +186,7 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa func DeletePersistentVolume(c clientset.Interface, pvName string) error { if c != nil && len(pvName) > 0 { framework.Logf("Deleting PersistentVolume %q", pvName) - err := c.CoreV1().PersistentVolumes().Delete(context.TODO(), pvName, nil) + err := c.CoreV1().PersistentVolumes().Delete(context.TODO(), pvName, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("PV Delete API error: %v", err) } @@ -198,7 +198,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error { func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error { if c != nil && len(pvcName) > 0 { framework.Logf("Deleting PersistentVolumeClaim %q", pvcName) - err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, nil) + err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("PVC Delete API error: %v", err) } diff --git a/test/e2e/framework/service/wait.go b/test/e2e/framework/service/wait.go index 627a8d21273..747d01ab7eb 100644 --- a/test/e2e/framework/service/wait.go +++ b/test/e2e/framework/service/wait.go @@ -33,7 +33,7 @@ import ( // WaitForServiceDeletedWithFinalizer waits for the service with finalizer to be deleted. func WaitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name string) { ginkgo.By("Delete service with finalizer") - if err := cs.CoreV1().Services(namespace).Delete(context.TODO(), name, nil); err != nil { + if err := cs.CoreV1().Services(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { framework.Failf("Failed to delete service %s/%s", namespace, name) } diff --git a/test/e2e/framework/statefulset/rest.go b/test/e2e/framework/statefulset/rest.go index d93fa565967..d9376b177eb 100644 --- a/test/e2e/framework/statefulset/rest.go +++ b/test/e2e/framework/statefulset/rest.go @@ -87,7 +87,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { framework.Logf("Deleting statefulset %v", ss.Name) // Use OrphanDependents=false so it's deleted synchronously. // We already made sure the Pods are gone inside Scale(). - if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(context.TODO(), ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { + if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { errList = append(errList, fmt.Sprintf("%v", err)) } } @@ -105,7 +105,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { pvNames.Insert(pvc.Spec.VolumeName) // TODO: Double check that there are no pods referencing the pvc framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName) - if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvc.Name, nil); err != nil { + if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}); err != nil { return false, nil } } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 8c836c1c452..34417d09d98 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -253,7 +253,7 @@ OUTER: go func(nsName string) { defer wg.Done() defer ginkgo.GinkgoRecover() - gomega.Expect(c.CoreV1().Namespaces().Delete(context.TODO(), nsName, nil)).To(gomega.Succeed()) + gomega.Expect(c.CoreV1().Namespaces().Delete(context.TODO(), nsName, metav1.DeleteOptions{})).To(gomega.Succeed()) Logf("namespace : %v api call to delete is complete ", nsName) }(item.Name) } diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index d48899ab62a..996110dffaf 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -312,7 +312,7 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { } if config.WaitForCompletion { framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace)) - framework.ExpectNoError(podClient.Delete(context.TODO(), serverPod.Name, nil)) + framework.ExpectNoError(podClient.Delete(context.TODO(), serverPod.Name, metav1.DeleteOptions{})) } else { framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod)) if pod == nil { diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index 553ba62e215..277b5a0ab24 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -124,7 +124,7 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c if err != nil { framework.Failf("Failed to create ClusterRoleBindings: %v", err) } - defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, &metav1.DeleteOptions{}) + defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, metav1.DeleteOptions{}) // Run application that exports the metric _, err = createSDExporterPods(f, kubeClient) @@ -172,7 +172,7 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, if err != nil { framework.Failf("Failed to create ClusterRoleBindings: %v", err) } - defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, &metav1.DeleteOptions{}) + defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, metav1.DeleteOptions{}) // Run application that exports the metric pod, err := createSDExporterPods(f, kubeClient) @@ -258,11 +258,11 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric } func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) { - err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod1, &metav1.DeleteOptions{}) + err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod1, metav1.DeleteOptions{}) if err != nil { framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err) } - err = cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod2, &metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod2, metav1.DeleteOptions{}) if err != nil { framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err) } diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go index 9a9a22cbd47..321591344db 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go +++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go @@ -77,7 +77,7 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) { _ = e2epod.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) { pod.Spec.Containers[0].Name = uniqueContainerName }) - defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), uniqueContainerName, &metav1.DeleteOptions{}) + defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), uniqueContainerName, metav1.DeleteOptions{}) // Wait a short amount of time for Metadata Agent to be created and metadata to be exported time.Sleep(metadataWaitTime) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index d9bff2464af..846b38c00a9 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -625,7 +625,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) - gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test", nil)).To(gomega.BeNil()) + gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test", metav1.DeleteOptions{})).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach without stdin") runOutput = framework.NewKubectlCommand(ns, fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). @@ -634,7 +634,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) - gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", nil)).To(gomega.BeNil()) + gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") runOutput = framework.NewKubectlCommand(ns, nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). @@ -660,7 +660,7 @@ var _ = SIGDescribe("Kubectl client", func() { }) gomega.Expect(err).To(gomega.BeNil()) - gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", nil)).To(gomega.BeNil()) + gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil()) }) ginkgo.It("should contain last line of the log", func() { diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go index d4d5ca27179..d44e6ca6425 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go @@ -43,7 +43,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { ginkgo.AfterEach(func() { if len(secretNeedClean) > 0 { ginkgo.By("delete the bootstrap token secret") - err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), secretNeedClean, &metav1.DeleteOptions{}) + err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), secretNeedClean, metav1.DeleteOptions{}) framework.ExpectNoError(err) secretNeedClean = "" } @@ -118,7 +118,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) ginkgo.By("delete the bootstrap token secret") - err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), bootstrapapi.BootstrapTokenSecretPrefix+tokenID, &metav1.DeleteOptions{}) + err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("wait for the bootstrap token removed from cluster-info ConfigMap") diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go index 7eaeeb3bb68..c0c136d7e77 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go @@ -43,7 +43,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { ginkgo.AfterEach(func() { if len(secretNeedClean) > 0 { ginkgo.By("delete the bootstrap token secret") - err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), secretNeedClean, &metav1.DeleteOptions{}) + err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), secretNeedClean, metav1.DeleteOptions{}) secretNeedClean = "" framework.ExpectNoError(err) } diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index 81df6aed721..7ff48f6de28 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -143,7 +143,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { ginkgo.By("deleting the test headless service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{}) }() regularServiceName := "test-service-2" @@ -154,7 +154,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { ginkgo.By("deleting the test service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, metav1.DeleteOptions{}) }() // All the names we need to be able to resolve. @@ -198,7 +198,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { ginkgo.By("deleting the test headless service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{}) }() regularServiceName := "test-service-2" @@ -208,7 +208,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { ginkgo.By("deleting the test service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, metav1.DeleteOptions{}) }() // All the names we need to be able to resolve. @@ -256,7 +256,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { ginkgo.By("deleting the test headless service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{}) }() hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) @@ -298,7 +298,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { ginkgo.By("deleting the test headless service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, metav1.DeleteOptions{}) }() hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) @@ -337,7 +337,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { ginkgo.By("deleting the test externalName service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, metav1.DeleteOptions{}) }() hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy") @@ -468,7 +468,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { framework.Logf("Deleting configmap %s...", corednsConfig.Name) - err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), corednsConfig.Name, nil) + err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), corednsConfig.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %s: %v", corednsConfig.Name) }() diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 82204067bb8..ff0424dc322 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -184,14 +184,14 @@ func (t *dnsTestCommon) restoreDNSConfigMap(configMapData map[string]string) { t.setConfigMap(&v1.ConfigMap{Data: configMapData}) t.deleteCoreDNSPods() } else { - t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, nil) + t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, metav1.DeleteOptions{}) } } func (t *dnsTestCommon) deleteConfigMap() { ginkgo.By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) t.cm = nil - err := t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, nil) + err := t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete config map: %s", t.name) } diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index ed8593a5026..9bf545000f5 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -58,7 +58,7 @@ var _ = SIGDescribe("DNS configMap federations [Feature:Federation]", func() { func (t *dnsFederationsConfigMapTest) run() { t.init() - defer t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, nil) + defer t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, metav1.DeleteOptions{}) t.createUtilPodLabel("e2e-dns-configmap") defer t.deleteUtilPod() originalConfigMapData := t.fetchDNSConfigMapData() @@ -425,8 +425,8 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { defer func() { ginkgo.By("deleting the test externalName service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, nil) - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameServiceLocal.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, metav1.DeleteOptions{}) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameServiceLocal.Name, metav1.DeleteOptions{}) }() if isIPv6 { diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index 426fa5527ca..8efe59bd347 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -99,7 +99,7 @@ var _ = SIGDescribe("Firewall rule", func() { svc.Spec.LoadBalancerSourceRanges = nil }) framework.ExpectNoError(err) - err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) + err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted") localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false) @@ -159,7 +159,7 @@ var _ = SIGDescribe("Firewall rule", func() { defer func() { framework.Logf("Cleaning up the netexec pod: %v", podName) - err = cs.CoreV1().Pods(ns).Delete(context.TODO(), podName, nil) + err = cs.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{}) framework.ExpectNoError(err) }() } diff --git a/test/e2e/network/fixture.go b/test/e2e/network/fixture.go index bfe07231415..269d80a0b91 100644 --- a/test/e2e/network/fixture.go +++ b/test/e2e/network/fixture.go @@ -103,7 +103,7 @@ func (t *TestFixture) CreateService(service *v1.Service) (*v1.Service, error) { // DeleteService deletes a service, and remove it from the cleanup list func (t *TestFixture) DeleteService(serviceName string) error { - err := t.Client.CoreV1().Services(t.Namespace).Delete(context.TODO(), serviceName, nil) + err := t.Client.CoreV1().Services(t.Namespace).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) if err == nil { delete(t.services, serviceName) } @@ -139,7 +139,7 @@ func (t *TestFixture) Cleanup() []error { } // TODO(mikedanese): Wait. // Then, delete the RC altogether. - if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(context.TODO(), rcName, nil); err != nil { + if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(context.TODO(), rcName, metav1.DeleteOptions{}); err != nil { if !apierrors.IsNotFound(err) { errs = append(errs, err) } @@ -148,7 +148,7 @@ func (t *TestFixture) Cleanup() []error { for serviceName := range t.services { ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace) - err := t.Client.CoreV1().Services(t.Namespace).Delete(context.TODO(), serviceName, nil) + err := t.Client.CoreV1().Services(t.Namespace).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) if err != nil { if !apierrors.IsNotFound(err) { errs = append(errs, err) diff --git a/test/e2e/network/ingressclass.go b/test/e2e/network/ingressclass.go index e2e4e51ed88..8df59460814 100644 --- a/test/e2e/network/ingressclass.go +++ b/test/e2e/network/ingressclass.go @@ -118,6 +118,6 @@ func createBasicIngress(cs clientset.Interface, namespace string) (*networkingv1 } func deleteIngressClass(cs clientset.Interface, name string) { - err := cs.NetworkingV1beta1().IngressClasses().Delete(context.TODO(), name, &metav1.DeleteOptions{}) + err := cs.NetworkingV1beta1().IngressClasses().Delete(context.TODO(), name, metav1.DeleteOptions{}) framework.ExpectNoError(err) } diff --git a/test/e2e/network/network_policy.go b/test/e2e/network/network_policy.go index c8dbab391b0..f0d5ac74362 100644 --- a/test/e2e/network/network_policy.go +++ b/test/e2e/network/network_policy.go @@ -858,7 +858,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { podClient := createNetworkClientPodWithRestartPolicy(f, f.Namespace, "client-a", service, allowedPort, v1.RestartPolicyOnFailure) defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podClient.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podClient.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() @@ -1491,7 +1491,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se podClient := createNetworkClientPod(f, ns, podName, service, targetPort) defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) - if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() @@ -1503,7 +1503,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, podClient := createNetworkClientPod(f, ns, podName, service, targetPort) defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) - if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() @@ -1670,11 +1670,11 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) { ginkgo.By("Cleaning up the server.") - if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup pod %v: %v", pod.Name, err) } ginkgo.By("Cleaning up the server's service.") - if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup svc %v: %v", service.Name, err) } } @@ -1740,7 +1740,7 @@ func updatePodLabel(f *framework.Framework, namespace *v1.Namespace, podName str func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) { ginkgo.By("Cleaning up the policy.") - if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(context.TODO(), policy.Name, nil); err != nil { + if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(context.TODO(), policy.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to cleanup policy %v: %v", policy.Name, err) } } diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index 27f716facc7..8c786407755 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -135,7 +135,7 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { f.Logger.Infof("Cleaning up ingresses...") for _, ing := range f.ScaleTestIngs { if ing != nil { - if err := f.Clientset.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(context.TODO(), ing.Name, nil); err != nil { + if err := f.Clientset.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(context.TODO(), ing.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) } } @@ -143,14 +143,14 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { f.Logger.Infof("Cleaning up services...") for _, svc := range f.ScaleTestSvcs { if svc != nil { - if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil); err != nil { + if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) } } } if f.ScaleTestDeploy != nil { f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name) - if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(context.TODO(), f.ScaleTestDeploy.Name, nil); err != nil { + if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(context.TODO(), f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil { errs = append(errs, fmt.Errorf("error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err)) } } diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go index 0f229fc16fc..e4b54f0e974 100644 --- a/test/e2e/network/scale/localrun/ingress_scale.go +++ b/test/e2e/network/scale/localrun/ingress_scale.go @@ -144,7 +144,7 @@ func main() { if cleanup { defer func() { klog.Infof("Deleting namespace %s...", ns.Name) - if err := cs.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, nil); err != nil { + if err := cs.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil { klog.Errorf("Failed to delete namespace %s: %v", ns.Name, err) testSuccessFlag = false } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 42db66cfe38..64f3363efa2 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -255,7 +255,7 @@ func StopServeHostnameService(clientset clientset.Interface, ns, name string) er if err := e2erc.DeleteRCAndWaitForGC(clientset, ns, name); err != nil { return err } - if err := clientset.CoreV1().Services(ns).Delete(context.TODO(), name, nil); err != nil { + if err := clientset.CoreV1().Services(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { return err } return nil @@ -735,7 +735,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating service " + serviceName + " in namespace " + ns) defer func() { - err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() _, err := jig.CreateTCPServiceWithPort(nil, 80) @@ -747,7 +747,7 @@ var _ = SIGDescribe("Services", func() { names := map[string]bool{} defer func() { for name := range names { - err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -788,7 +788,7 @@ var _ = SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) defer func() { - err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() @@ -820,7 +820,7 @@ var _ = SIGDescribe("Services", func() { names := map[string]bool{} defer func() { for name := range names { - err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -886,7 +886,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the sourceip test service") - err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() serviceIP := tcpService.Spec.ClusterIP @@ -909,7 +909,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(f.WaitForPodReady(pod.Name)) defer func() { framework.Logf("Cleaning up the echo server pod") - err := cs.CoreV1().Pods(ns).Delete(context.TODO(), serverPodName, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), serverPodName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s on node", serverPodName) }() @@ -922,7 +922,7 @@ var _ = SIGDescribe("Services", func() { defer func() { framework.Logf("Deleting deployment") - err = cs.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) + err = cs.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) }() @@ -1544,7 +1544,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the updating NodePorts test service") - err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() framework.Logf("Service Port TCP: %v", tcpService.Spec.Ports[0].Port) @@ -1616,7 +1616,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the ExternalName to ClusterIP test service") - err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() @@ -1655,7 +1655,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the ExternalName to NodePort test service") - err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() @@ -1693,7 +1693,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the ClusterIP to ExternalName test service") - err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() @@ -1735,7 +1735,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the NodePort to ExternalName test service") - err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() @@ -2081,7 +2081,7 @@ var _ = SIGDescribe("Services", func() { } else { for _, pod := range pods.Items { var gracePeriodSeconds int64 = 0 - err := podClient.Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}) + err := podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}) if err != nil { framework.Logf("warning: error force deleting pod '%s': %s", pod.Name, err) } @@ -2733,7 +2733,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { err := TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold) framework.ExpectNoError(err) } - err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) + err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }() @@ -2759,7 +2759,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { svc, err := jig.CreateOnlyLocalNodePortService(true) framework.ExpectNoError(err) defer func() { - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }() @@ -2802,7 +2802,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { defer func() { err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }() @@ -2863,7 +2863,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { defer func() { err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }() @@ -2878,7 +2878,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { defer func() { framework.Logf("Deleting deployment") - err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) + err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) }() @@ -2926,7 +2926,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { defer func() { err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) - err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }() @@ -3094,7 +3094,7 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) defer func() { framework.Logf("Cleaning up the exec pod") - err := cs.CoreV1().Pods(ns).Delete(context.TODO(), execPod.Name, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), execPod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns) }() err = jig.CheckServiceReachability(svc, execPod) @@ -3161,7 +3161,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) defer func() { framework.Logf("Cleaning up the exec pod") - err := cs.CoreV1().Pods(ns).Delete(context.TODO(), execPod.Name, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), execPod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns) }() err = jig.CheckServiceReachability(svc, execPod) diff --git a/test/e2e/node/events.go b/test/e2e/node/events.go index 6112aa98fbf..50729b4ad56 100644 --- a/test/e2e/node/events.go +++ b/test/e2e/node/events.go @@ -70,7 +70,7 @@ var _ = SIGDescribe("Events", func() { ginkgo.By("submitting the pod to kubernetes") defer func() { ginkgo.By("deleting the pod") - podClient.Delete(context.TODO(), pod.Name, nil) + podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) }() if _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { framework.Failf("Failed to create pod: %v", err) diff --git a/test/e2e/node/pods.go b/test/e2e/node/pods.go index 4f2a023ee23..a5e64304b19 100644 --- a/test/e2e/node/pods.go +++ b/test/e2e/node/pods.go @@ -292,7 +292,7 @@ var _ = SIGDescribe("Pods Extended", func() { t := time.Duration(rand.Intn(delay)) * time.Millisecond time.Sleep(t) - err := podClient.Delete(context.TODO(), pod.Name, nil) + err := podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete pod") events, ok := <-ch diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index c805c8e6b72..2f71205278e 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -66,7 +66,7 @@ func testPreStop(c clientset.Interface, ns string) { // At the end of the test, clean up by removing the pod. defer func() { ginkgo.By("Deleting the server pod") - c.CoreV1().Pods(ns).Delete(context.TODO(), podDescr.Name, nil) + c.CoreV1().Pods(ns).Delete(context.TODO(), podDescr.Name, metav1.DeleteOptions{}) }() ginkgo.By("Waiting for pods to come up.") @@ -113,7 +113,7 @@ func testPreStop(c clientset.Interface, ns string) { defer func() { if deletePreStop { ginkgo.By("Deleting the tester pod") - c.CoreV1().Pods(ns).Delete(context.TODO(), preStopDescr.Name, nil) + c.CoreV1().Pods(ns).Delete(context.TODO(), preStopDescr.Name, metav1.DeleteOptions{}) } }() @@ -122,7 +122,7 @@ func testPreStop(c clientset.Interface, ns string) { // Delete the pod with the preStop handler. ginkgo.By("Deleting pre-stop pod") - if err := c.CoreV1().Pods(ns).Delete(context.TODO(), preStopDescr.Name, nil); err == nil { + if err := c.CoreV1().Pods(ns).Delete(context.TODO(), preStopDescr.Name, metav1.DeleteOptions{}); err == nil { deletePreStop = false } framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 32314e76eca..9fbbf9297c4 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -267,7 +267,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } // remove RuntimeClass - cs.NodeV1beta1().RuntimeClasses().Delete(context.TODO(), e2enode.PreconfiguredRuntimeClassHandler(framework.TestContext.ContainerRuntime), nil) + cs.NodeV1beta1().RuntimeClasses().Delete(context.TODO(), e2enode.PreconfiguredRuntimeClassHandler(framework.TestContext.ContainerRuntime), metav1.DeleteOptions{}) }) ginkgo.It("verify pod overhead is accounted for", func() { diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index 462e4230229..956735bf649 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -144,7 +144,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { // Defer the cleanup defer func() { framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) - err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, nil) + err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) if err != nil { framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) } diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 8a050537a3c..53637bc2ab9 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -187,7 +187,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name)) claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) if err == nil { - cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, nil) + cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute) } @@ -195,7 +195,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { for _, sc := range m.sc { ginkgo.By(fmt.Sprintf("Deleting storageclass %s", sc.Name)) - cs.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, nil) + cs.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{}) } ginkgo.By("Cleaning up resources") @@ -787,7 +787,7 @@ func destroyCSIDriver(cs clientset.Interface, driverName string) { framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name) // Uncomment the following line to get full dump of CSIDriver object // framework.Logf("%s", framework.PrettyPrint(driverGet)) - cs.StorageV1beta1().CSIDrivers().Delete(context.TODO(), driverName, nil) + cs.StorageV1beta1().CSIDrivers().Delete(context.TODO(), driverName, metav1.DeleteOptions{}) } } diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 6999d5c5101..9d721e8fa76 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -325,7 +325,7 @@ func (v *glusterVolume) DeleteVolume() { name := v.prefix + "-server" framework.Logf("Deleting Gluster endpoints %q...", name) - err := cs.CoreV1().Endpoints(ns.Name).Delete(context.TODO(), name, nil) + err := cs.CoreV1().Endpoints(ns.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { if !apierrors.IsNotFound(err) { framework.Failf("Gluster delete endpoints failed: %v", err) @@ -1944,7 +1944,7 @@ func cleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, se if secret != nil { framework.Logf("Deleting server secret %q...", secret.Name) - err := cs.CoreV1().Secrets(ns.Name).Delete(context.TODO(), secret.Name, &metav1.DeleteOptions{}) + err := cs.CoreV1().Secrets(ns.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}) if err != nil { framework.Logf("Delete secret failed: %v", err) } diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 6fcadeebdd8..4368e5bba0b 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -148,11 +148,11 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { defer func() { ginkgo.By("Cleaning up the secret") - if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to delete secret %v: %v", secret.Name, err) } ginkgo.By("Cleaning up the configmap") - if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to delete configmap %v: %v", configMap.Name, err) } ginkgo.By("Cleaning up the pod") @@ -264,7 +264,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) } ginkgo.By("Cleaning up the git server svc") - if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), gitServerSvc.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), gitServerSvc.Name, metav1.DeleteOptions{}); err != nil { framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) } } @@ -313,7 +313,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { func deleteConfigMaps(f *framework.Framework, configMapNames []string) { ginkgo.By("Cleaning up the configMaps") for _, configMapName := range configMapNames { - err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMapName, nil) + err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMapName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "unable to delete configMap %v", configMapName) } } diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 9b2d273771d..3246a83d9b3 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -155,7 +155,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { ginkgo.By("Creating a deployment with the provisioned volume") deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) - defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) + defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index c1e28cbe35e..763975208a6 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -122,7 +122,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { ginkgo.By("Creating a deployment with selected PVC") deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) - defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) + defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) // PVC should be bound at this point ginkgo.By("Checking for bound PVC") diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index df81345fabd..8b0343e4b58 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -154,7 +154,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk [Flaky]", func() { ginkgo.By("Deleting the Namespace") - err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, nil) + err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}) framework.ExpectNoError(err) err = framework.WaitForNamespacesDeleted(c, []string{ns}, framework.DefaultNamespaceDeletionTimeout) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 470e6849185..ca2d06c5b49 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -487,7 +487,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { if localVolume.pv.Name != pv.Name { continue } - err = config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, &metav1.DeleteOptions{}) + err = config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) pvConfig := makeLocalPVConfig(config, localVolume) localVolume.pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig)) @@ -628,7 +628,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { return } ginkgo.By(fmt.Sprintf("Clean PV %s", pv.Name)) - err := config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, &metav1.DeleteOptions{}) + err := config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) @@ -673,7 +673,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error { framework.Logf("Deleting pod %v", pod.Name) - if err := config.client.CoreV1().Pods(config.ns).Delete(context.TODO(), pod.Name, nil); err != nil { + if err := config.client.CoreV1().Pods(config.ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { return err } @@ -796,7 +796,7 @@ func setupStorageClass(config *localTestConfig, mode *storagev1.VolumeBindingMod } func cleanupStorageClass(config *localTestConfig) { - framework.ExpectNoError(config.client.StorageV1().StorageClasses().Delete(context.TODO(), config.scName, nil)) + framework.ExpectNoError(config.client.StorageV1().StorageClasses().Delete(context.TODO(), config.scName, metav1.DeleteOptions{})) } // podNode wraps RunKubectl to get node where pod is running diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 48068a88908..c63b49e5361 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -362,7 +362,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ss, err = e2esset.Scale(c, ss, 0) framework.ExpectNoError(err) e2esset.WaitForStatusReplicas(c, ss, 0) - err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, &metav1.DeleteOptions{}) + err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating a new Statefulset and validating the data") diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 3b87d27dc27..f1746a3143c 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -188,7 +188,7 @@ func testZonalFailover(c clientset.Interface, ns string) { framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, nil), + framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, metav1.DeleteOptions{}), "Error deleting StorageClass %s", class.Name) }() @@ -201,12 +201,12 @@ func testZonalFailover(c clientset.Interface, ns string) { defer func() { framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name) // typically this claim has already been deleted - framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(context.TODO(), statefulSet.Name, nil), + framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(context.TODO(), statefulSet.Name, metav1.DeleteOptions{}), "Error deleting StatefulSet %s", statefulSet.Name) framework.Logf("deleting claims in namespace %s", ns) pvc := getPVC(c, ns, regionalPDLabels) - framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, nil), + framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}), "Error deleting claim %s.", pvc.Name) if pvc.Spec.VolumeName != "" { err = framework.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout) @@ -244,7 +244,7 @@ func testZonalFailover(c clientset.Interface, ns string) { }() ginkgo.By("deleting StatefulSet pod") - err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) // Verify the pod is scheduled in the other zone. ginkgo.By("verifying the pod is scheduled in a different zone.") diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 0ace7c28119..1f619102e2a 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -408,7 +408,7 @@ func isDelayedBinding(sc *storagev1.StorageClass) bool { // deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" func deleteStorageClass(cs clientset.Interface, className string) error { - err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, nil) + err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { return err } diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 4dc4aeb6d8e..58cad0d3708 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -334,7 +334,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, nil)) + framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, metav1.DeleteOptions{})) }() } @@ -344,7 +344,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) // typically this claim has already been deleted - err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, nil) + err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } @@ -358,7 +358,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { pv := t.checkProvisioning(client, claim, class) ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) - framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, nil)) + framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{})) // Wait for the PV to get deleted if reclaim policy is Delete. (If it's // Retain, there's no use waiting because the PV won't be auto-deleted and @@ -770,7 +770,7 @@ func prepareSnapshotDataSourceForProvisioning( } framework.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name) - err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(context.TODO(), updatedClaim.Name, nil) + err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(context.TODO(), updatedClaim.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err) } @@ -819,13 +819,13 @@ func preparePVCDataSourceForProvisioning( cleanupFunc := func() { framework.Logf("deleting source PVC %q/%q", sourcePVC.Namespace, sourcePVC.Name) - err := client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(context.TODO(), sourcePVC.Name, nil) + err := client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(context.TODO(), sourcePVC.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting source PVC %q. Error: %v", sourcePVC.Name, err) } if class != nil { framework.Logf("deleting class %q", class.Name) - err := client.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, nil) + err := client.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting storage class %q. Error: %v", class.Name, err) } diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 4bb9f28f177..73702cd3e02 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -137,7 +137,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, nil)) + framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, metav1.DeleteOptions{})) }() ginkgo.By("creating a claim") @@ -146,7 +146,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt defer func() { framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) // typically this claim has already been deleted - err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, nil) + err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) } diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index dfd754eeeb8..438ce54d592 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -220,19 +220,19 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String) error { var cleanupErrors []string if runningPodName != "" { - err := cs.CoreV1().Pods(ns).Delete(context.TODO(), runningPodName, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), runningPodName, metav1.DeleteOptions{}) if err != nil { cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete pod %s: %s", runningPodName, err)) } } if unschedulablePodName != "" { - err := cs.CoreV1().Pods(ns).Delete(context.TODO(), unschedulablePodName, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), unschedulablePodName, metav1.DeleteOptions{}) if err != nil { cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete pod %s: %s", unschedulablePodName, err)) } } for _, pvc := range pvcs { - err := cs.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvc.Name, nil) + err := cs.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) if err != nil { cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete PVC %s: %s", pvc.Name, err)) } diff --git a/test/e2e/storage/utils/create.go b/test/e2e/storage/utils/create.go index 3483559451b..e510e65dd90 100644 --- a/test/e2e/storage/utils/create.go +++ b/test/e2e/storage/utils/create.go @@ -402,7 +402,7 @@ func (*serviceAccountFactory) Create(f *framework.Framework, i interface{}) (fun return nil, errors.Wrap(err, "create ServiceAccount") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -424,7 +424,7 @@ func (*clusterRoleFactory) Create(f *framework.Framework, i interface{}) (func() return nil, errors.Wrap(err, "create ClusterRole") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -445,7 +445,7 @@ func (*clusterRoleBindingFactory) Create(f *framework.Framework, i interface{}) return nil, errors.Wrap(err, "create ClusterRoleBinding") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -466,7 +466,7 @@ func (*roleFactory) Create(f *framework.Framework, i interface{}) (func() error, return nil, errors.Wrap(err, "create Role") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -487,7 +487,7 @@ func (*roleBindingFactory) Create(f *framework.Framework, i interface{}) (func() return nil, errors.Wrap(err, "create RoleBinding") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -508,7 +508,7 @@ func (*serviceFactory) Create(f *framework.Framework, i interface{}) (func() err return nil, errors.Wrap(err, "create Service") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -529,7 +529,7 @@ func (*statefulSetFactory) Create(f *framework.Framework, i interface{}) (func() return nil, errors.Wrap(err, "create StatefulSet") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -550,7 +550,7 @@ func (*daemonSetFactory) Create(f *framework.Framework, i interface{}) (func() e return nil, errors.Wrap(err, "create DaemonSet") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -571,7 +571,7 @@ func (*storageClassFactory) Create(f *framework.Framework, i interface{}) (func( return nil, errors.Wrap(err, "create StorageClass") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -592,7 +592,7 @@ func (*csiDriverFactory) Create(f *framework.Framework, i interface{}) (func() e return nil, errors.Wrap(err, "create CSIDriver") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } @@ -613,7 +613,7 @@ func (*secretFactory) Create(f *framework.Framework, i interface{}) (func() erro return nil, errors.Wrap(err, "create Secret") } return func() error { - return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{}) }, nil } diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 8c4c4dd2858..c0caf76d4a1 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -301,7 +301,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f if forceDelete { err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0)) } else { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{}) } framework.ExpectNoError(err) @@ -387,7 +387,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra if forceDelete { err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0)) } else { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{}) } framework.ExpectNoError(err, "Failed to delete pod.") @@ -579,7 +579,7 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, }, } - roleBindingClient.Delete(context.TODO(), binding.GetName(), &metav1.DeleteOptions{}) + roleBindingClient.Delete(context.TODO(), binding.GetName(), metav1.DeleteOptions{}) err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) { _, err := roleBindingClient.Get(context.TODO(), binding.GetName(), metav1.GetOptions{}) return apierrors.IsNotFound(err), nil diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index eaef9afb367..fa877ca8c57 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } if invalidSc != nil { - err := c.StorageV1().StorageClasses().Delete(context.TODO(), invalidSc.Name, nil) + err := c.StorageV1().StorageClasses().Delete(context.TODO(), invalidSc.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "Error deleting storageclass %v: %v", invalidSc.Name, err) invalidSc = nil } diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 2595de595c6..23662b60fa2 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -790,7 +790,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, nil)) + framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, metav1.DeleteOptions{})) }() ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner") @@ -803,7 +803,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(err) defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) - err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, nil) + err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } @@ -1037,7 +1037,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]* // deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" func deleteStorageClass(c clientset.Interface, className string) { - err := c.StorageV1().StorageClasses().Delete(context.TODO(), className, nil) + err := c.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index 001cc2ff5e7..7595514eabe 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("Volumes", func() { framework.Failf("unable to create test configmap: %v", err) } defer func() { - _ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(context.TODO(), configMap.Name, nil) + _ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}) }() // Test one ConfigMap mounted several times to test #28502 diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index 7ebd17001a4..550a8d0eded 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -207,7 +207,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() */ ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { ginkgo.By("Deleting the Namespace") - err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, nil) + err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}) framework.ExpectNoError(err) err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute) diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index 3624487851b..9ea76b67328 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -139,7 +139,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{}) gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty") framework.ExpectNoError(err, "Failed to create storage class") - defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, metav1.DeleteOptions{}) scArrays[index] = sc } diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 2099324e457..d4b2ccd774e 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -78,7 +78,7 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "") sc, err := client.StorageV1().StorageClasses().Create(context.TODO(), scSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{}) ginkgo.By("Creating statefulset") diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index c7644112166..7312e0db2b5 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -109,7 +109,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun } gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, metav1.DeleteOptions{}) scArrays[index] = sc } diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index 34f89f28bc2..55f6e724d7c 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -82,7 +82,7 @@ func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, ginkgo.By("Creating Storage Class With Invalid Datastore") storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index e23c3b57c1d..e5acb836b71 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -113,7 +113,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{}) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) @@ -121,7 +121,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st framework.ExpectNoError(err) defer func() { - client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.TODO(), pvclaimSpec.Name, nil) + client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.TODO(), pvclaimSpec.Name, metav1.DeleteOptions{}) }() ginkgo.By("Waiting for claim to be in bound phase") diff --git a/test/e2e/storage/vsphere/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go index ca0b1a19882..c74159e8805 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -71,7 +71,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { ginkgo.By("Creating Storage Class") storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index c305046aff3..20da5cbed7a 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -153,7 +153,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass), metav1.CreateOptions{}) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 7f99f8d3ac9..b400a0d3d1d 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -82,7 +82,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "") storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass) @@ -99,7 +99,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", ginkgo.By("Creating a Deployment") deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) - defer client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) + defer client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) ginkgo.By("Get pod from the deployment") podList, err := e2edeploy.GetPodsForDeployment(client, deployment) diff --git a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index e351ddcfdd8..c2396aa5fe3 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -83,7 +83,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { e2epv.DeletePersistentVolumeClaim(client, claim.Name, namespace) } ginkgo.By("Deleting StorageClass") - err = client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + err = client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index 37eecee6193..424a421ce95 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { scList := getTestStorageClasses(client, policyName, datastoreName) defer func(scList []*storagev1.StorageClass) { for _, sc := range scList { - client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, nil) + client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{}) } }(scList) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 6570b90a8bc..3d714c615b2 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -261,7 +261,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n ginkgo.By("Creating Storage Class With storage policy params") storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -293,7 +293,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc ginkgo.By("Creating Storage Class With storage policy params") storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -313,7 +313,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN ginkgo.By("Creating Storage Class With storage policy params") storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index f30a5b345ab..dd496511dc6 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -378,7 +378,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -420,7 +420,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error { storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -462,7 +462,7 @@ func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist func verifyPodSchedulingFails(client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -481,7 +481,7 @@ func verifyPodSchedulingFails(client clientset.Interface, namespace string, node func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error { storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -502,7 +502,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) { storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) ginkgo.By("Creating PVC using the storage class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go index 3a253492aa8..edf85e8e724 100644 --- a/test/e2e/upgrades/storage/volume_mode.go +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -121,7 +121,7 @@ func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) { framework.ExpectNoError(e2epod.DeletePodWithWait(f.ClientSet, t.pod)) ginkgo.By("Deleting the PVC") - framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(context.TODO(), t.pvc.Name, nil)) + framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(context.TODO(), t.pvc.Name, metav1.DeleteOptions{})) ginkgo.By("Waiting for the PV to be deleted") framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute)) diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index 39ac26f3e1e..1cebe10d25a 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -313,7 +313,7 @@ func createRBACRoleForGmsa(f *framework.Framework) (string, func(), error) { } cleanUpFunc := func() { - f.ClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), roleName, &metav1.DeleteOptions{}) + f.ClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), roleName, metav1.DeleteOptions{}) } _, err := f.ClientSet.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{}) diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 0553caa76e1..185985e3971 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -205,7 +205,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions) + err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, deleteOptions) framework.ExpectNoError(err) waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) _, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), dp.Name, getOptions) @@ -237,7 +237,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2))) ginkgo.By("By deleting the pods and waiting for container removal") - err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions) + err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, deleteOptions) framework.ExpectNoError(err) waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) @@ -269,7 +269,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { }, 30*time.Second, framework.Poll).Should(gomega.Equal(devsLen)) ginkgo.By("by deleting the pods and waiting for container removal") - err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions) + err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, deleteOptions) framework.ExpectNoError(err) waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index ea089db565b..3dbaa53d8df 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -929,7 +929,7 @@ func recreateConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error // deleteConfigMapFunc simply deletes tc.configMap func deleteConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { - return f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Delete(context.TODO(), tc.configMap.Name, &metav1.DeleteOptions{}) + return f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Delete(context.TODO(), tc.configMap.Name, metav1.DeleteOptions{}) } // createConfigMapFunc creates tc.configMap and updates the UID and ResourceVersion on tc.configMap diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 022379bea85..a4ea7ec5d9f 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -307,7 +307,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{}) + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) specs := []podEvictSpec{ @@ -364,7 +364,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{}) + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) specs := []podEvictSpec{ @@ -417,7 +417,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{}) + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, metav1.DeleteOptions{}) framework.ExpectNoError(err) }) specs := []podEvictSpec{ diff --git a/test/e2e_node/gpu_device_plugin_test.go b/test/e2e_node/gpu_device_plugin_test.go index d7dc0b072c4..c1becd18de0 100644 --- a/test/e2e_node/gpu_device_plugin_test.go +++ b/test/e2e_node/gpu_device_plugin_test.go @@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi continue } - f.PodClient().Delete(context.TODO(), p.Name, &metav1.DeleteOptions{}) + f.PodClient().Delete(context.TODO(), p.Name, metav1.DeleteOptions{}) } }) @@ -135,7 +135,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi framework.ExpectEqual(devID1, devID2) ginkgo.By("Deleting device plugin.") - f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), devicePluginPod.Name, &metav1.DeleteOptions{}) + f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), devicePluginPod.Name, metav1.DeleteOptions{}) ginkgo.By("Waiting for GPUs to become unavailable on the local node") gomega.Eventually(func() bool { node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 2c69fb8f117..c26ce838221 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -379,7 +379,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete ginkgo.By("Wait for the node problem detector to disappear") gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed()) ginkgo.By("Delete the config map") - c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, nil) + c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, metav1.DeleteOptions{}) ginkgo.By("Clean up the events") gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed()) ginkgo.By("Clean up the node condition") diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index 0595313ad82..4551c8a4cbe 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := f.PodClient().Delete(context.TODO(), guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := f.PodClient().Delete(context.TODO(), guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("pod" + podUID) f.PodClient().Create(pod) @@ -248,7 +248,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := f.PodClient().Delete(context.TODO(), bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := f.PodClient().Delete(context.TODO(), bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID) f.PodClient().Create(pod) @@ -293,7 +293,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := f.PodClient().Delete(context.TODO(), burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := f.PodClient().Delete(context.TODO(), burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID) f.PodClient().Create(pod) diff --git a/test/e2e_node/topology_manager_test.go b/test/e2e_node/topology_manager_test.go index b535c976b9d..d01939b4171 100644 --- a/test/e2e_node/topology_manager_test.go +++ b/test/e2e_node/topology_manager_test.go @@ -489,16 +489,16 @@ func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) { } ginkgo.By("Delete SRIOV device plugin pod %s/%s") - err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(context.TODO(), sd.pod.Name, &deleteOptions) + err = f.ClientSet.CoreV1().Pods(sd.pod.Namespace).Delete(context.TODO(), sd.pod.Name, deleteOptions) framework.ExpectNoError(err) waitForContainerRemoval(sd.pod.Spec.Containers[0].Name, sd.pod.Name, sd.pod.Namespace) ginkgo.By(fmt.Sprintf("Deleting configMap %v/%v", metav1.NamespaceSystem, sd.configMap.Name)) - err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), sd.configMap.Name, &deleteOptions) + err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), sd.configMap.Name, deleteOptions) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Deleting serviceAccount %v/%v", metav1.NamespaceSystem, sd.serviceAccount.Name)) - err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(context.TODO(), sd.serviceAccount.Name, &deleteOptions) + err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Delete(context.TODO(), sd.serviceAccount.Name, deleteOptions) framework.ExpectNoError(err) } diff --git a/test/e2e_node/volume_manager_test.go b/test/e2e_node/volume_manager_test.go index fcaaffe3f53..b068d943d29 100644 --- a/test/e2e_node/volume_manager_test.go +++ b/test/e2e_node/volume_manager_test.go @@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() { }) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) gp := int64(1) - f.PodClient().Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) + f.PodClient().Delete(context.TODO(), pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp}) if err == nil { break } diff --git a/test/integration/apiserver/admissionwebhook/broken_webhook_test.go b/test/integration/apiserver/admissionwebhook/broken_webhook_test.go index 43b83f39cb2..3546760e31f 100644 --- a/test/integration/apiserver/admissionwebhook/broken_webhook_test.go +++ b/test/integration/apiserver/admissionwebhook/broken_webhook_test.go @@ -96,7 +96,7 @@ func TestBrokenWebhook(t *testing.T) { } t.Logf("Deleting the broken webhook to fix the cluster") - err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(context.TODO(), brokenWebhookName, nil) + err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(context.TODO(), brokenWebhookName, metav1.DeleteOptions{}) if err != nil { t.Fatalf("Failed to delete broken webhook: %v", err) } diff --git a/test/integration/apiserver/admissionwebhook/client_auth_test.go b/test/integration/apiserver/admissionwebhook/client_auth_test.go index 030e6d63f28..87534edf14c 100644 --- a/test/integration/apiserver/admissionwebhook/client_auth_test.go +++ b/test/integration/apiserver/admissionwebhook/client_auth_test.go @@ -186,7 +186,7 @@ plugins: t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), mutatingCfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), mutatingCfg.GetName(), metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/apiserver/admissionwebhook/load_balance_test.go b/test/integration/apiserver/admissionwebhook/load_balance_test.go index 96d68131584..5e3a775ef25 100644 --- a/test/integration/apiserver/admissionwebhook/load_balance_test.go +++ b/test/integration/apiserver/admissionwebhook/load_balance_test.go @@ -135,7 +135,7 @@ func TestWebhookLoadBalance(t *testing.T) { t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), mutatingCfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), mutatingCfg.GetName(), metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/apiserver/admissionwebhook/reinvocation_test.go b/test/integration/apiserver/admissionwebhook/reinvocation_test.go index fa53c5cf0f6..8688cb67ac1 100644 --- a/test/integration/apiserver/admissionwebhook/reinvocation_test.go +++ b/test/integration/apiserver/admissionwebhook/reinvocation_test.go @@ -386,7 +386,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), cfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), cfg.GetName(), metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/apiserver/admissionwebhook/timeout_test.go b/test/integration/apiserver/admissionwebhook/timeout_test.go index 255bded03a3..ac39901904f 100644 --- a/test/integration/apiserver/admissionwebhook/timeout_test.go +++ b/test/integration/apiserver/admissionwebhook/timeout_test.go @@ -217,7 +217,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), mutatingCfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), mutatingCfg.GetName(), metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -251,7 +251,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(context.TODO(), validatingCfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(context.TODO(), validatingCfg.GetName(), metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/auth/dynamic_client_test.go b/test/integration/auth/dynamic_client_test.go index 4a5fc4dd6f5..6797260a26d 100644 --- a/test/integration/auth/dynamic_client_test.go +++ b/test/integration/auth/dynamic_client_test.go @@ -107,7 +107,7 @@ func TestDynamicClientBuilder(t *testing.T) { // We want to trigger token rotation here by deleting service account // the dynamic client was using. - if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), saName, nil); err != nil { + if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), saName, metav1.DeleteOptions{}); err != nil { t.Fatalf("delete service account %s failed: %v", saName, err) } time.Sleep(time.Second * 10) diff --git a/test/integration/auth/node_test.go b/test/integration/auth/node_test.go index 2d15af82d0b..834dbd676dc 100644 --- a/test/integration/auth/node_test.go +++ b/test/integration/auth/node_test.go @@ -218,7 +218,7 @@ func TestNodeAuthorizer(t *testing.T) { deleteNode2NormalPod := func(client clientset.Interface) func() error { return func() error { zero := int64(0) - return client.CoreV1().Pods("ns").Delete(context.TODO(), "node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + return client.CoreV1().Pods("ns").Delete(context.TODO(), "node2normalpod", metav1.DeleteOptions{GracePeriodSeconds: &zero}) } } @@ -240,7 +240,7 @@ func TestNodeAuthorizer(t *testing.T) { deleteNode2MirrorPod := func(client clientset.Interface) func() error { return func() error { zero := int64(0) - return client.CoreV1().Pods("ns").Delete(context.TODO(), "node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + return client.CoreV1().Pods("ns").Delete(context.TODO(), "node2mirrorpod", metav1.DeleteOptions{GracePeriodSeconds: &zero}) } } @@ -289,7 +289,7 @@ func TestNodeAuthorizer(t *testing.T) { } deleteNode2 := func(client clientset.Interface) func() error { return func() error { - return client.CoreV1().Nodes().Delete(context.TODO(), "node2", nil) + return client.CoreV1().Nodes().Delete(context.TODO(), "node2", metav1.DeleteOptions{}) } } createNode2NormalPodEviction := func(client clientset.Interface) func() error { @@ -388,7 +388,7 @@ func TestNodeAuthorizer(t *testing.T) { } deleteNode1Lease := func(client clientset.Interface) func() error { return func() error { - return client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Delete(context.TODO(), "node1", &metav1.DeleteOptions{}) + return client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Delete(context.TODO(), "node1", metav1.DeleteOptions{}) } } @@ -445,7 +445,7 @@ func TestNodeAuthorizer(t *testing.T) { } deleteNode1CSINode := func(client clientset.Interface) func() error { return func() error { - return client.StorageV1().CSINodes().Delete(context.TODO(), "node1", &metav1.DeleteOptions{}) + return client.StorageV1().CSINodes().Delete(context.TODO(), "node1", metav1.DeleteOptions{}) } } diff --git a/test/integration/auth/rbac_test.go b/test/integration/auth/rbac_test.go index e39bcb6e32a..5274b227cc0 100644 --- a/test/integration/auth/rbac_test.go +++ b/test/integration/auth/rbac_test.go @@ -742,7 +742,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { t.Fatalf("Failed to update `system:basic-user` ClusterRoleBinding: %v", err) } t.Logf("Deleting default `system:public-info-viewer` ClusterRoleBinding") - if err = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "system:public-info-viewer", &metav1.DeleteOptions{}); err != nil { + if err = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "system:public-info-viewer", metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete `system:public-info-viewer` ClusterRoleBinding: %v", err) } diff --git a/test/integration/auth/svcaccttoken_test.go b/test/integration/auth/svcaccttoken_test.go index 882d69b0023..a5099da7a5e 100644 --- a/test/integration/auth/svcaccttoken_test.go +++ b/test/integration/auth/svcaccttoken_test.go @@ -803,7 +803,7 @@ func createDeleteSvcAcct(t *testing.T, cs clientset.Interface, sa *v1.ServiceAcc return } done = true - if err := cs.CoreV1().ServiceAccounts(sa.Namespace).Delete(context.TODO(), sa.Name, nil); err != nil { + if err := cs.CoreV1().ServiceAccounts(sa.Namespace).Delete(context.TODO(), sa.Name, metav1.DeleteOptions{}); err != nil { t.Fatalf("err: %v", err) } } @@ -822,7 +822,7 @@ func createDeletePod(t *testing.T, cs clientset.Interface, pod *v1.Pod) (*v1.Pod return } done = true - if err := cs.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, nil); err != nil { + if err := cs.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { t.Fatalf("err: %v", err) } } @@ -841,7 +841,7 @@ func createDeleteSecret(t *testing.T, cs clientset.Interface, sec *v1.Secret) (* return } done = true - if err := cs.CoreV1().Secrets(sec.Namespace).Delete(context.TODO(), sec.Name, nil); err != nil { + if err := cs.CoreV1().Secrets(sec.Namespace).Delete(context.TODO(), sec.Name, metav1.DeleteOptions{}); err != nil { t.Fatalf("err: %v", err) } } diff --git a/test/integration/configmap/configmap_test.go b/test/integration/configmap/configmap_test.go index a52ded4027a..6620db7e630 100644 --- a/test/integration/configmap/configmap_test.go +++ b/test/integration/configmap/configmap_test.go @@ -119,7 +119,7 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) } func deleteConfigMapOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name, nil); err != nil { + if err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { t.Errorf("unable to delete ConfigMap %v: %v", name, err) } } diff --git a/test/integration/cronjob/cronjob_test.go b/test/integration/cronjob/cronjob_test.go index 8c47ba1ac34..6e98af0e49f 100644 --- a/test/integration/cronjob/cronjob_test.go +++ b/test/integration/cronjob/cronjob_test.go @@ -89,7 +89,7 @@ func newCronJob(name, namespace, schedule string) *batchv1beta1.CronJob { func cleanupCronJobs(t *testing.T, cjClient clientbatchv1beta1.CronJobInterface, name string) { deletePropagation := metav1.DeletePropagationForeground - err := cjClient.Delete(context.TODO(), name, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation}) + err := cjClient.Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &deletePropagation}) if err != nil { t.Errorf("Failed to delete CronJob: %v", err) } diff --git a/test/integration/etcd/crd_overlap_storage_test.go b/test/integration/etcd/crd_overlap_storage_test.go index 07397190d88..8a8a3b67600 100644 --- a/test/integration/etcd/crd_overlap_storage_test.go +++ b/test/integration/etcd/crd_overlap_storage_test.go @@ -174,7 +174,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { if err != nil { t.Fatal(err) } - err = apiServiceClient.APIServices().Delete(context.TODO(), testAPIService.Name, &metav1.DeleteOptions{}) + err = apiServiceClient.APIServices().Delete(context.TODO(), testAPIService.Name, metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -197,7 +197,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { } // Delete the overlapping CRD - err = crdClient.CustomResourceDefinitions().Delete(context.TODO(), crdCRD.Name, &metav1.DeleteOptions{}) + err = crdClient.CustomResourceDefinitions().Delete(context.TODO(), crdCRD.Name, metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -348,7 +348,7 @@ func TestOverlappingCustomResourceCustomResourceDefinition(t *testing.T) { } // Delete the overlapping CRD - err = crdClient.CustomResourceDefinitions().Delete(context.TODO(), crdCRD.Name, &metav1.DeleteOptions{}) + err = crdClient.CustomResourceDefinitions().Delete(context.TODO(), crdCRD.Name, metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/framework/perf_utils.go b/test/integration/framework/perf_utils.go index 834b66c2fa5..9b2063e74f4 100644 --- a/test/integration/framework/perf_utils.go +++ b/test/integration/framework/perf_utils.go @@ -125,7 +125,7 @@ func (p *IntegrationTestNodePreparer) CleanupNodes() error { klog.Fatalf("Error listing nodes: %v", err) } for i := range nodes.Items { - if err := p.client.CoreV1().Nodes().Delete(context.TODO(), nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil { + if err := p.client.CoreV1().Nodes().Delete(context.TODO(), nodes.Items[i].Name, metav1.DeleteOptions{}); err != nil { klog.Errorf("Error while deleting Node: %v", err) } } diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index 3fdd25ca027..bc19783aebf 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -310,7 +310,7 @@ func createNamespaceOrDie(name string, c clientset.Interface, t *testing.T) *v1. func deleteNamespaceOrDie(name string, c clientset.Interface, t *testing.T) { zero := int64(0) background := metav1.DeletePropagationBackground - err := c.CoreV1().Namespaces().Delete(context.TODO(), name, &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}) + err := c.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}) if err != nil { t.Fatalf("failed to delete namespace %q: %v", name, err) } @@ -1045,7 +1045,7 @@ func TestMixedRelationships(t *testing.T) { } // Delete the core owner. - err = configMapClient.Delete(context.TODO(), coreOwner.GetName(), &metav1.DeleteOptions{PropagationPolicy: &foreground}) + err = configMapClient.Delete(context.TODO(), coreOwner.GetName(), metav1.DeleteOptions{PropagationPolicy: &foreground}) if err != nil { t.Fatalf("failed to delete owner resource %q: %v", coreOwner.GetName(), err) } diff --git a/test/integration/ipamperf/util.go b/test/integration/ipamperf/util.go index 3c64be20118..c6082218429 100644 --- a/test/integration/ipamperf/util.go +++ b/test/integration/ipamperf/util.go @@ -63,7 +63,7 @@ func deleteNodes(apiURL string, config *Config) { Burst: config.CreateQPS, }) noGrace := int64(0) - if err := clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}, metav1.ListOptions{}); err != nil { + if err := clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{GracePeriodSeconds: &noGrace}, metav1.ListOptions{}); err != nil { klog.Errorf("Error deleting node: %v", err) } } diff --git a/test/integration/master/audit_dynamic_test.go b/test/integration/master/audit_dynamic_test.go index 2f3410fb142..c1713035a1f 100644 --- a/test/integration/master/audit_dynamic_test.go +++ b/test/integration/master/audit_dynamic_test.go @@ -109,7 +109,7 @@ func TestDynamicAudit(t *testing.T) { // test deletes an audit sink, generates audit events, and ensures they don't arrive in the corresponding server success = t.Run("delete sink", func(t *testing.T) { - err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Delete(context.TODO(), sinkConfig2.Name, &metav1.DeleteOptions{}) + err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Delete(context.TODO(), sinkConfig2.Name, metav1.DeleteOptions{}) require.NoError(t, err, "failed to delete audit sink2") t.Log("deleted audit sink2") diff --git a/test/integration/master/audit_test.go b/test/integration/master/audit_test.go index 2b6769f7521..1dee7af9c45 100644 --- a/test/integration/master/audit_test.go +++ b/test/integration/master/audit_test.go @@ -384,7 +384,7 @@ func configMapOperations(t *testing.T, kubeclient kubernetes.Interface) { _, err = kubeclient.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) expectNoError(t, err, "failed to list config maps") - err = kubeclient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configMap.Name, &metav1.DeleteOptions{}) + err = kubeclient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{}) expectNoError(t, err, "failed to delete audit-configmap") } diff --git a/test/integration/master/synthetic_master_test.go b/test/integration/master/synthetic_master_test.go index 5c415e5a9b3..2c29cfb59cd 100644 --- a/test/integration/master/synthetic_master_test.go +++ b/test/integration/master/synthetic_master_test.go @@ -702,7 +702,7 @@ func TestServiceAlloc(t *testing.T) { } // Delete the first service. - if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(context.TODO(), svc(1).ObjectMeta.Name, nil); err != nil { + if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(context.TODO(), svc(1).ObjectMeta.Name, metav1.DeleteOptions{}); err != nil { t.Fatalf("got unexpected error: %v", err) } @@ -736,7 +736,7 @@ func TestUpdateNodeObjects(t *testing.T) { iterations := 10000 for i := 0; i < nodes*6; i++ { - c.Nodes().Delete(context.TODO(), fmt.Sprintf("node-%d", i), nil) + c.Nodes().Delete(context.TODO(), fmt.Sprintf("node-%d", i), metav1.DeleteOptions{}) _, err := c.Nodes().Create(context.TODO(), &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("node-%d", i), diff --git a/test/integration/namespace/ns_conditions_test.go b/test/integration/namespace/ns_conditions_test.go index 2ca3fc8b170..6230b8efd3b 100644 --- a/test/integration/namespace/ns_conditions_test.go +++ b/test/integration/namespace/ns_conditions_test.go @@ -78,7 +78,7 @@ func TestNamespaceCondition(t *testing.T) { t.Fatal(err) } - if err = kubeClient.CoreV1().Namespaces().Delete(context.TODO(), nsName, nil); err != nil { + if err = kubeClient.CoreV1().Namespaces().Delete(context.TODO(), nsName, metav1.DeleteOptions{}); err != nil { t.Fatal(err) } diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index 89214688d43..0cfa9563e09 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -559,7 +559,7 @@ func TestDeletingAndFailedPods(t *testing.T) { updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { pod.Finalizers = []string{"fake.example.com/blockDeletion"} }) - if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, metav1.DeleteOptions{}); err != nil { t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err) } @@ -911,7 +911,7 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) { } rsClient := c.AppsV1().ReplicaSets(ns.Name) - err := rsClient.Delete(context.TODO(), rs.Name, nil) + err := rsClient.Delete(context.TODO(), rs.Name, metav1.DeleteOptions{}) if err != nil { t.Fatalf("Failed to delete rs: %v", err) } @@ -944,5 +944,5 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) { rs.Finalizers = finalizers }) - rsClient.Delete(context.TODO(), rs.Name, nil) + rsClient.Delete(context.TODO(), rs.Name, metav1.DeleteOptions{}) } diff --git a/test/integration/replicationcontroller/replicationcontroller_test.go b/test/integration/replicationcontroller/replicationcontroller_test.go index 93ef5787a74..d6a8d4b9be1 100644 --- a/test/integration/replicationcontroller/replicationcontroller_test.go +++ b/test/integration/replicationcontroller/replicationcontroller_test.go @@ -520,7 +520,7 @@ func TestDeletingAndFailedPods(t *testing.T) { updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { pod.Finalizers = []string{"fake.example.com/blockDeletion"} }) - if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, metav1.DeleteOptions{}); err != nil { t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err) } diff --git a/test/integration/scheduler/extender_test.go b/test/integration/scheduler/extender_test.go index 4b794622640..255d29c67e8 100644 --- a/test/integration/scheduler/extender_test.go +++ b/test/integration/scheduler/extender_test.go @@ -359,7 +359,7 @@ func TestSchedulerExtender(t *testing.T) { func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) goodCondition := v1.NodeCondition{ Type: v1.NodeReady, @@ -418,7 +418,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) t.Fatalf("Failed to schedule using extender, expected machine2, got %v", myPod.Spec.NodeName) } var gracePeriod int64 - if err := cs.CoreV1().Pods(ns.Name).Delete(context.TODO(), myPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil { + if err := cs.CoreV1().Pods(ns.Name).Delete(context.TODO(), myPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil { t.Fatalf("Failed to delete pod: %v", err) } _, err = cs.CoreV1().Pods(ns.Name).Get(context.TODO(), myPod.Name, metav1.GetOptions{}) diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index 7d6e5bf527d..781c9640214 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -1220,7 +1220,7 @@ func TestPDBInPreemption(t *testing.T) { // Cleanup pods = append(pods, preemptor) testutils.CleanupPods(cs, t, pods) - cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) - cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) } } diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 1c52f5333e9..882f3b232dd 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -62,7 +62,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) { defer framework.DeleteTestingNamespace(ns, s, t) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) informerFactory := informers.NewSharedInformerFactory(clientSet, 0) for i, test := range []struct { @@ -304,7 +304,7 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) { defer framework.DeleteTestingNamespace(ns, s, t) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) informerFactory := informers.NewSharedInformerFactory(clientSet, 0) @@ -341,7 +341,7 @@ func TestUnschedulableNodes(t *testing.T) { nodeLister := testCtx.InformerFactory.Core().V1().Nodes().Lister() // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) goodCondition := v1.NodeCondition{ Type: v1.NodeReady, @@ -453,7 +453,7 @@ func TestUnschedulableNodes(t *testing.T) { if err := deletePod(testCtx.ClientSet, myPod.Name, myPod.Namespace); err != nil { t.Errorf("Failed to delete pod: %v", err) } - err = testCtx.ClientSet.CoreV1().Nodes().Delete(context.TODO(), schedNode.Name, nil) + err = testCtx.ClientSet.CoreV1().Nodes().Delete(context.TODO(), schedNode.Name, metav1.DeleteOptions{}) if err != nil { t.Errorf("Failed to delete node: %v", err) } @@ -826,7 +826,7 @@ func TestSchedulerInformers(t *testing.T) { // Cleanup pods = append(pods, unschedulable) testutils.CleanupPods(cs, t, pods) - cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) - cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) } } diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 1b4447346f8..ababa4221f6 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -469,7 +469,7 @@ func noPodsInNamespace(c clientset.Interface, podNamespace string) wait.Conditio // cleanupPodsInNamespace deletes the pods in the given namespace and waits for them to // be actually deleted. func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) { - if err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), nil, metav1.ListOptions{}); err != nil { + if err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil { t.Errorf("error while listing pod in namespace %v: %v", ns, err) return } diff --git a/test/integration/secrets/secrets_test.go b/test/integration/secrets/secrets_test.go index f626d9d4fd5..b10751b414d 100644 --- a/test/integration/secrets/secrets_test.go +++ b/test/integration/secrets/secrets_test.go @@ -32,7 +32,7 @@ import ( ) func deleteSecretOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.CoreV1().Secrets(ns).Delete(context.TODO(), name, nil); err != nil { + if err := c.CoreV1().Secrets(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { t.Errorf("unable to delete secret %v: %v", name, err) } } diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index ba7ee1f9408..30a6fc14a4f 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -83,7 +83,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { } // Delete service account - err = c.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), defaultUser.Name, nil) + err = c.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), defaultUser.Name, metav1.DeleteOptions{}) if err != nil { t.Fatalf("Could not delete default serviceaccount: %v", err) } @@ -127,7 +127,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Delete token - err = c.CoreV1().Secrets(ns).Delete(context.TODO(), token1Name, nil) + err = c.CoreV1().Secrets(ns).Delete(context.TODO(), token1Name, metav1.DeleteOptions{}) if err != nil { t.Fatalf("Could not delete token: %v", err) } @@ -168,7 +168,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Delete service account - err = c.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), name, nil) + err = c.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -314,7 +314,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { roClient := clientset.NewForConfigOrDie(&roClientConfig) doServiceAccountAPIRequests(t, roClient, myns, true, true, false) doServiceAccountAPIRequests(t, roClient, otherns, true, false, false) - err = c.CoreV1().Secrets(myns).Delete(context.TODO(), roTokenName, nil) + err = c.CoreV1().Secrets(myns).Delete(context.TODO(), roTokenName, metav1.DeleteOptions{}) if err != nil { t.Fatalf("could not delete token: %v", err) } @@ -584,7 +584,9 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string _, err := c.CoreV1().Secrets(ns).Create(context.TODO(), testSecret, metav1.CreateOptions{}) return err }, - func() error { return c.CoreV1().Secrets(ns).Delete(context.TODO(), testSecret.Name, nil) }, + func() error { + return c.CoreV1().Secrets(ns).Delete(context.TODO(), testSecret.Name, metav1.DeleteOptions{}) + }, } for _, op := range readOps { diff --git a/test/integration/statefulset/statefulset_test.go b/test/integration/statefulset/statefulset_test.go index ddb0f733da5..7e1d92fd750 100644 --- a/test/integration/statefulset/statefulset_test.go +++ b/test/integration/statefulset/statefulset_test.go @@ -178,7 +178,7 @@ func TestDeletingAndFailedPods(t *testing.T) { updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { pod.Finalizers = []string{"fake.example.com/blockDeletion"} }) - if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, metav1.DeleteOptions{}); err != nil { t.Fatalf("error deleting pod %s: %v", deletingPod.Name, err) } diff --git a/test/integration/storageclasses/storage_classes_test.go b/test/integration/storageclasses/storage_classes_test.go index ae61dd8e8f3..c8ee33d452f 100644 --- a/test/integration/storageclasses/storage_classes_test.go +++ b/test/integration/storageclasses/storage_classes_test.go @@ -87,13 +87,13 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Names } func deleteStorageClassOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.StorageV1().StorageClasses().Delete(context.TODO(), name, nil); err != nil { + if err := c.StorageV1().StorageClasses().Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { t.Errorf("unable to delete storage class %v: %v", name, err) } } func deletePersistentVolumeClaimOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), name, nil); err != nil { + if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { t.Errorf("unable to delete persistent volume claim %v: %v", name, err) } } diff --git a/test/integration/ttlcontroller/ttlcontroller_test.go b/test/integration/ttlcontroller/ttlcontroller_test.go index e6cddac6164..1952a99c8c6 100644 --- a/test/integration/ttlcontroller/ttlcontroller_test.go +++ b/test/integration/ttlcontroller/ttlcontroller_test.go @@ -75,7 +75,7 @@ func deleteNodes(t *testing.T, client *clientset.Clientset, startIndex, endIndex go func(idx int) { defer wg.Done() name := fmt.Sprintf("node-%d", idx) - if err := client.CoreV1().Nodes().Delete(context.TODO(), name, &metav1.DeleteOptions{}); err != nil { + if err := client.CoreV1().Nodes().Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete node: %v", err) } }(i) diff --git a/test/integration/util/util.go b/test/integration/util/util.go index c4fc6fff52f..172f0970345 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -186,7 +186,7 @@ func CleanupTest(t *testing.T, testCtx *TestContext) { // Kill the scheduler. testCtx.CancelFn() // Cleanup nodes. - testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) framework.DeleteTestingNamespace(testCtx.NS, testCtx.HTTPServer, t) testCtx.CloseFn() } diff --git a/test/integration/utils.go b/test/integration/utils.go index e34d8687a61..d23eb8257c2 100644 --- a/test/integration/utils.go +++ b/test/integration/utils.go @@ -35,7 +35,7 @@ import ( // DeletePodOrErrorf deletes a pod or fails with a call to t.Errorf. func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, nil); err != nil { + if err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { t.Errorf("unable to delete pod %v: %v", name, err) } } diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index c37f1712820..3c5f0e978f2 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -117,7 +117,7 @@ func TestPersistentVolumeRecycler(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -147,7 +147,7 @@ func TestPersistentVolumeRecycler(t *testing.T) { klog.V(2).Infof("TestPersistentVolumeRecycler pvc bound") // deleting a claim releases the volume, after which it can be recycled - if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, nil); err != nil { + if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } klog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted") @@ -172,7 +172,7 @@ func TestPersistentVolumeDeleter(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -199,7 +199,7 @@ func TestPersistentVolumeDeleter(t *testing.T) { klog.V(2).Infof("TestPersistentVolumeDeleter pvc bound") // deleting a claim releases the volume, after which it can be recycled - if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, nil); err != nil { + if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } klog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted") @@ -232,7 +232,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -302,7 +302,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -383,7 +383,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -483,7 +483,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -549,7 +549,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { } // deleting a claim releases the volume - if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, nil); err != nil { + if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } t.Log("claim deleted") @@ -573,7 +573,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) controllerStopCh := make(chan struct{}) informers.Start(controllerStopCh) @@ -862,8 +862,8 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes and StorageClasses). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) - defer testClient.StorageV1().StorageClasses().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + defer testClient.StorageV1().StorageClasses().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) storageClass := storage.StorageClass{ TypeMeta: metav1.TypeMeta{ @@ -922,7 +922,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // Delete the claims for i := 0; i < objCount; i++ { - _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvcs[i].Name, nil) + _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvcs[i].Name, metav1.DeleteOptions{}) } // Wait for the PVs to get deleted by listing remaining volumes @@ -957,7 +957,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -1014,7 +1014,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { } // deleting a claim releases the volume - if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, nil); err != nil { + if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } t.Log("claim deleted") diff --git a/test/integration/volumescheduling/util.go b/test/integration/volumescheduling/util.go index 49b2bfbc3a7..a4efc0fde6f 100644 --- a/test/integration/volumescheduling/util.go +++ b/test/integration/volumescheduling/util.go @@ -135,7 +135,7 @@ func cleanupTest(t *testing.T, testCtx *testContext) { // Kill the scheduler. testCtx.cancelFn() // Cleanup nodes. - testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) framework.DeleteTestingNamespace(testCtx.ns, testCtx.httpServer, t) testCtx.closeFn() } diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index 22d81603724..a58314067c2 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -693,7 +693,7 @@ func TestPVAffinityConflict(t *testing.T) { t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict. Got message %q", podName, p.Status.Conditions[0].Message) } // Deleting test pod - if err := config.client.CoreV1().Pods(config.ns).Delete(context.TODO(), podName, &metav1.DeleteOptions{}); err != nil { + if err := config.client.CoreV1().Pods(config.ns).Delete(context.TODO(), podName, metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete Pod %s: %v", podName, err) } } @@ -848,7 +848,7 @@ func TestRescheduleProvisioning(t *testing.T) { defer func() { close(controllerCh) deleteTestObjects(clientset, ns, nil) - testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) testCtx.closeFn() }() diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 3071e0ff9ec..6d5951c1aa2 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -122,7 +122,7 @@ func main() { } ns := got.Name defer func(ns string) { - if err := client.CoreV1().Namespaces().Delete(context.TODO(), ns, nil); err != nil { + if err := client.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}); err != nil { klog.Warningf("Failed to delete namespace %s: %v", ns, err) } else { // wait until the namespace disappears @@ -177,7 +177,7 @@ func main() { klog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { - if err := client.CoreV1().Services(ns).Delete(context.TODO(), svc.Name, nil); err == nil { + if err := client.CoreV1().Services(ns).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}); err == nil { return } klog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) @@ -230,7 +230,7 @@ func main() { // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { - if err = client.CoreV1().Pods(ns).Delete(context.TODO(), podName, nil); err == nil { + if err = client.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{}); err == nil { break } klog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err)