more artisanal fixes

Most of these could have been refactored automatically but it wouldn't
have been uglier. The unsophisticated tooling left lots of unnecessary
struct -> pointer -> struct transitions.
This commit is contained in:
Mike Danese 2020-03-01 09:34:30 -08:00
parent aaf855c1e6
commit 76f8594378
34 changed files with 94 additions and 101 deletions

View File

@ -200,10 +200,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
func deleteHealthCheckJob(client clientset.Interface, ns, jobName string) error { func deleteHealthCheckJob(client clientset.Interface, ns, jobName string) error {
klog.V(2).Infof("Deleting Job %q in the namespace %q", jobName, ns) klog.V(2).Infof("Deleting Job %q in the namespace %q", jobName, ns)
propagation := metav1.DeletePropagationForeground propagation := metav1.DeletePropagationForeground
deleteOptions := &metav1.DeleteOptions{ if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, metav1.DeleteOptions{PropagationPolicy: &propagation}); err != nil {
PropagationPolicy: &propagation,
}
if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, deleteOptions); err != nil {
return errors.Wrapf(err, "could not delete Job %q in the namespace %q", jobName, ns) return errors.Wrapf(err, "could not delete Job %q in the namespace %q", jobName, ns)
} }
return nil return nil

View File

@ -194,19 +194,13 @@ func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) err
// DeleteDaemonSetForeground deletes the specified DaemonSet in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted // DeleteDaemonSetForeground deletes the specified DaemonSet in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
func DeleteDaemonSetForeground(client clientset.Interface, namespace, name string) error { func DeleteDaemonSetForeground(client clientset.Interface, namespace, name string) error {
foregroundDelete := metav1.DeletePropagationForeground foregroundDelete := metav1.DeletePropagationForeground
deleteOptions := &metav1.DeleteOptions{ return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete})
PropagationPolicy: &foregroundDelete,
}
return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, deleteOptions)
} }
// DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted // DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
func DeleteDeploymentForeground(client clientset.Interface, namespace, name string) error { func DeleteDeploymentForeground(client clientset.Interface, namespace, name string) error {
foregroundDelete := metav1.DeletePropagationForeground foregroundDelete := metav1.DeletePropagationForeground
deleteOptions := &metav1.DeleteOptions{ return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete})
PropagationPolicy: &foregroundDelete,
}
return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, deleteOptions)
} }
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. // CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.

View File

@ -192,9 +192,9 @@ func (tc *TokenCleaner) evalSecret(o interface{}) {
ttl, alreadyExpired := bootstrapsecretutil.GetExpiration(secret, time.Now()) ttl, alreadyExpired := bootstrapsecretutil.GetExpiration(secret, time.Now())
if alreadyExpired { if alreadyExpired {
klog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name) klog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name)
var options *metav1.DeleteOptions var options metav1.DeleteOptions
if len(secret.UID) > 0 { if len(secret.UID) > 0 {
options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}} options.Preconditions = &metav1.Preconditions{UID: &secret.UID}
} }
err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, options) err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, options)
// NotFound isn't a real error (it's already been deleted) // NotFound isn't a real error (it's already been deleted)

View File

@ -342,9 +342,9 @@ func (e *TokensController) deleteTokens(serviceAccount *v1.ServiceAccount) ( /*r
} }
func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry*/ bool, error) { func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry*/ bool, error) {
var opts *metav1.DeleteOptions var opts metav1.DeleteOptions
if len(uid) > 0 { if len(uid) > 0 {
opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}} opts.Preconditions = &metav1.Preconditions{UID: &uid}
} }
err := e.client.CoreV1().Secrets(ns).Delete(context.TODO(), name, opts) err := e.client.CoreV1().Secrets(ns).Delete(context.TODO(), name, opts)
// NotFound doesn't need a retry (it's already been deleted) // NotFound doesn't need a retry (it's already been deleted)
@ -460,9 +460,9 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
if !addedReference { if !addedReference {
// we weren't able to use the token, try to clean it up. // we weren't able to use the token, try to clean it up.
klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err) klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}} deleteOpts := metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); deleteErr != nil { if err := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); err != nil {
klog.Error(deleteErr) // if we fail, just log it klog.Error(err) // if we fail, just log it
} }
} }

View File

@ -26,28 +26,26 @@ import (
"testing" "testing"
"time" "time"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/util/clock"
ref "k8s.io/client-go/tools/reference"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
ref "k8s.io/client-go/tools/reference"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
utilnode "k8s.io/kubernetes/pkg/util/node" utilnode "k8s.io/kubernetes/pkg/util/node"
jsonpatch "github.com/evanphx/json-patch" jsonpatch "github.com/evanphx/json-patch"
"k8s.io/klog"
) )
var ( var (
@ -183,7 +181,7 @@ func (m *FakeNodeHandler) List(_ context.Context, opts metav1.ListOptions) (*v1.
} }
// Delete deletes a Node from the fake store. // Delete deletes a Node from the fake store.
func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt *metav1.DeleteOptions) error { func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt metav1.DeleteOptions) error {
m.lock.Lock() m.lock.Lock()
defer func() { defer func() {
m.RequestCount++ m.RequestCount++
@ -197,7 +195,7 @@ func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt *metav1.Delet
} }
// DeleteCollection deletes a collection of Nodes from the fake store. // DeleteCollection deletes a collection of Nodes from the fake store.
func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt *metav1.DeleteOptions, listOpts metav1.ListOptions) error { func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt metav1.DeleteOptions, listOpts metav1.ListOptions) error {
return nil return nil
} }

View File

@ -230,7 +230,7 @@ func (tc *Controller) processJob(key string) error {
} }
// Cascade deletes the Jobs if TTL truly expires. // Cascade deletes the Jobs if TTL truly expires.
policy := metav1.DeletePropagationForeground policy := metav1.DeletePropagationForeground
options := &metav1.DeleteOptions{ options := metav1.DeleteOptions{
PropagationPolicy: &policy, PropagationPolicy: &policy,
Preconditions: &metav1.Preconditions{UID: &fresh.UID}, Preconditions: &metav1.Preconditions{UID: &fresh.UID},
} }

View File

@ -583,9 +583,12 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
// We don't handle graceful deletion of mirror pods. // We don't handle graceful deletion of mirror pods.
if m.canBeDeleted(pod, status.status) { if m.canBeDeleted(pod, status.status) {
deleteOptions := metav1.NewDeleteOptions(0) deleteOptions := metav1.DeleteOptions{
// Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace. GracePeriodSeconds: new(int64),
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID)) // Use the pod UID as the precondition for deletion to prevent deleting a
// newly created pod with the same name and namespace.
Preconditions: metav1.NewUIDPreconditions(string(pod.UID)),
}
err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions) err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions)
if err != nil { if err != nil {
klog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) klog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err)

View File

@ -262,7 +262,7 @@ func (c *autoRegisterController) checkAPIService(name string) (err error) {
// we have a spurious APIService that we're managing, delete it (5A,6A) // we have a spurious APIService that we're managing, delete it (5A,6A)
case desired == nil: case desired == nil:
opts := &metav1.DeleteOptions{Preconditions: metav1.NewUIDPreconditions(string(curr.UID))} opts := metav1.DeleteOptions{Preconditions: metav1.NewUIDPreconditions(string(curr.UID))}
err := c.apiServiceClient.APIServices().Delete(context.TODO(), curr.Name, opts) err := c.apiServiceClient.APIServices().Delete(context.TODO(), curr.Name, opts)
if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { if apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
// deleted or changed in the meantime, we'll get called again // deleted or changed in the meantime, we'll get called again

View File

@ -121,8 +121,8 @@ func CheckEvictionSupport(clientset kubernetes.Interface) (string, error) {
return "", nil return "", nil
} }
func (d *Helper) makeDeleteOptions() *metav1.DeleteOptions { func (d *Helper) makeDeleteOptions() metav1.DeleteOptions {
deleteOptions := &metav1.DeleteOptions{} deleteOptions := metav1.DeleteOptions{}
if d.GracePeriodSeconds >= 0 { if d.GracePeriodSeconds >= 0 {
gracePeriodSeconds := int64(d.GracePeriodSeconds) gracePeriodSeconds := int64(d.GracePeriodSeconds)
deleteOptions.GracePeriodSeconds = &gracePeriodSeconds deleteOptions.GracePeriodSeconds = &gracePeriodSeconds
@ -150,6 +150,8 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
return err return err
} }
} }
delOpts := d.makeDeleteOptions()
eviction := &policyv1beta1.Eviction{ eviction := &policyv1beta1.Eviction{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
APIVersion: policyGroupVersion, APIVersion: policyGroupVersion,
@ -159,8 +161,9 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
Name: pod.Name, Name: pod.Name,
Namespace: pod.Namespace, Namespace: pod.Namespace,
}, },
DeleteOptions: d.makeDeleteOptions(), DeleteOptions: &delOpts,
} }
// Remember to change change the URL manipulation func when Eviction's version change // Remember to change change the URL manipulation func when Eviction's version change
return d.Client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction) return d.Client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
} }

View File

@ -79,19 +79,19 @@ func estimateMaximumPods(c clientset.Interface, min, max int32) int32 {
return availablePods return availablePods
} }
func getForegroundOptions() *metav1.DeleteOptions { func getForegroundOptions() metav1.DeleteOptions {
policy := metav1.DeletePropagationForeground policy := metav1.DeletePropagationForeground
return &metav1.DeleteOptions{PropagationPolicy: &policy} return metav1.DeleteOptions{PropagationPolicy: &policy}
} }
func getBackgroundOptions() *metav1.DeleteOptions { func getBackgroundOptions() metav1.DeleteOptions {
policy := metav1.DeletePropagationBackground policy := metav1.DeletePropagationBackground
return &metav1.DeleteOptions{PropagationPolicy: &policy} return metav1.DeleteOptions{PropagationPolicy: &policy}
} }
func getOrphanOptions() *metav1.DeleteOptions { func getOrphanOptions() metav1.DeleteOptions {
policy := metav1.DeletePropagationOrphan policy := metav1.DeletePropagationOrphan
return &metav1.DeleteOptions{PropagationPolicy: &policy} return metav1.DeleteOptions{PropagationPolicy: &policy}
} }
var ( var (
@ -473,8 +473,9 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err) framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
} }
ginkgo.By("delete the rc") ginkgo.By("delete the rc")
deleteOptions := &metav1.DeleteOptions{} deleteOptions := metav1.DeleteOptions{
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) Preconditions: metav1.NewUIDPreconditions(string(rc.UID)),
}
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err) framework.Failf("failed to delete the rc: %v", err)
} }
@ -1101,7 +1102,8 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Logf("created dependent resource %q", dependentName) framework.Logf("created dependent resource %q", dependentName)
// Delete the owner and orphan the dependent. // Delete the owner and orphan the dependent.
err = resourceClient.Delete(ownerName, getOrphanOptions()) delOpts := getOrphanOptions()
err = resourceClient.Delete(ownerName, &delOpts)
if err != nil { if err != nil {
framework.Failf("failed to delete owner resource %q: %v", ownerName, err) framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
} }

View File

@ -854,7 +854,7 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin
func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error { func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error {
trueVar := true trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar} deleteOptions := metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID)) deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions) return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions)
} }

View File

@ -99,7 +99,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
_, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch pod") framework.ExpectNoError(err, "failed to patch pod")
f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
expectEvents(f, []utils.AuditEvent{ expectEvents(f, []utils.AuditEvent{
{ {

View File

@ -211,7 +211,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
_, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{}) _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch pod") framework.ExpectNoError(err, "failed to patch pod")
f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
}, },
[]utils.AuditEvent{ []utils.AuditEvent{
{ {

View File

@ -45,7 +45,6 @@ package common
import ( import (
"context" "context"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"

View File

@ -136,7 +136,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't // DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
// disappear before the timeout, it will fail the test. // disappear before the timeout, it will fail the test.
func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeout time.Duration) { func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) {
namespace := c.f.Namespace.Name namespace := c.f.Namespace.Name
err := c.Delete(context.TODO(), name, options) err := c.Delete(context.TODO(), name, options)
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {

View File

@ -814,7 +814,7 @@ func (f *Framework) MatchContainerOutput(
createdPod := podClient.Create(pod) createdPod := podClient.Create(pod)
defer func() { defer func() {
ginkgo.By("delete the pod") ginkgo.By("delete the pod")
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout) podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
}() }()
// Wait for client pod to complete. // Wait for client pod to complete.
@ -1181,7 +1181,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
defer ps.Stop() defer ps.Stop()
falseVar := false falseVar := false
deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar} deleteOption := metav1.DeleteOptions{OrphanDependents: &falseVar}
startTime := time.Now() startTime := time.Now()
if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil { if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil {
return err return err

View File

@ -3341,7 +3341,7 @@ func proxyMode(f *framework.Framework) (string, error) {
}, },
} }
f.PodClient().CreateSync(pod) f.PodClient().CreateSync(pod)
defer f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) defer f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode" cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode"
stdout, err := framework.RunHostCmd(pod.Namespace, pod.Name, cmd) stdout, err := framework.RunHostCmd(pod.Namespace, pod.Name, cmd)

View File

@ -99,9 +99,9 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
false: "RW", false: "RW",
} }
type testT struct { type testT struct {
descr string // It description descr string // It description
readOnly bool // true means pd is read-only readOnly bool // true means pd is read-only
deleteOpt *metav1.DeleteOptions // pod delete option deleteOpt metav1.DeleteOptions // pod delete option
} }
tests := []testT{ tests := []testT{
{ {
@ -112,7 +112,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
{ {
descr: podDefaultGrace, descr: podDefaultGrace,
readOnly: false, readOnly: false,
deleteOpt: &metav1.DeleteOptions{}, deleteOpt: metav1.DeleteOptions{},
}, },
{ {
descr: podImmediateGrace, descr: podImmediateGrace,
@ -122,7 +122,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
{ {
descr: podDefaultGrace, descr: podDefaultGrace,
readOnly: true, readOnly: true,
deleteOpt: &metav1.DeleteOptions{}, deleteOpt: metav1.DeleteOptions{},
}, },
} }

View File

@ -87,7 +87,7 @@ func deletePods(f *framework.Framework, podNames []string) {
delOpts := metav1.DeleteOptions{ delOpts := metav1.DeleteOptions{
GracePeriodSeconds: &gp, GracePeriodSeconds: &gp,
} }
f.PodClient().DeleteSync(podName, &delOpts, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout)
} }
} }

View File

@ -99,10 +99,10 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
// Delete Pods // Delete Pods
f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(guaranteedPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(burstablePodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(bestEffortPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
// Log Events // Log Events
logPodEvents(f) logPodEvents(f)
logNodeEvents(f) logNodeEvents(f)

View File

@ -281,8 +281,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
}, 10*time.Minute, framework.Poll).Should(gomega.BeTrue()) }, 10*time.Minute, framework.Poll).Should(gomega.BeTrue())
// Cleanup // Cleanup
f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(pod1.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(pod2.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
}) })
}) })
} }

View File

@ -169,7 +169,7 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func())
twist() twist()
ginkgo.By("Remove test pod") ginkgo.By("Remove test pod")
f.PodClient().DeleteSync(podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
ginkgo.By("Waiting for checkpoint to be removed") ginkgo.By("Waiting for checkpoint to be removed")
if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) { if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) {

View File

@ -535,7 +535,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
ginkgo.By("deleting pods") ginkgo.By("deleting pods")
for _, spec := range testSpecs { for _, spec := range testSpecs {
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name)) ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute) f.PodClient().DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute)
} }
// In case a test fails before verifying that NodeCondition no longer exist on the node, // In case a test fails before verifying that NodeCondition no longer exist on the node,

View File

@ -245,7 +245,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
for _, pod := range test.testPods { for _, pod := range test.testPods {
ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName)) ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName))
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(pod.podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
} }
ginkgo.By("Making sure all containers get cleaned up") ginkgo.By("Making sure all containers get cleaned up")

View File

@ -162,8 +162,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
logDevicePluginMetrics() logDevicePluginMetrics()
// Cleanup // Cleanup
f.PodClient().DeleteSync(p1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(p1.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(p2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(p2.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
}) })
}) })
}) })

View File

@ -80,7 +80,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
delOpts := metav1.DeleteOptions{ delOpts := metav1.DeleteOptions{
GracePeriodSeconds: &gp, GracePeriodSeconds: &gp,
} }
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(pod.Name, delOpts, framework.DefaultPodDeletionTimeout)
ginkgo.By("running the post test exec from the workload") ginkgo.By("running the post test exec from the workload")
err := wl.PostTestExec() err := wl.PostTestExec()
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -96,8 +96,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
ginkgo.By("Deleting test pods") ginkgo.By("Deleting test pods")
f.PodClient().DeleteSync(pod0, &metav1.DeleteOptions{}, 10*time.Minute) f.PodClient().DeleteSync(pod0, metav1.DeleteOptions{}, 10*time.Minute)
f.PodClient().DeleteSync(pod1, &metav1.DeleteOptions{}, 10*time.Minute) f.PodClient().DeleteSync(pod1, metav1.DeleteOptions{}, 10*time.Minute)
if !ginkgo.CurrentGinkgoTestDescription().Failed { if !ginkgo.CurrentGinkgoTestDescription().Failed {
return return
} }

View File

@ -1046,7 +1046,7 @@ func testPodBindingEviction(c *testContext) {
background := metav1.DeletePropagationBackground background := metav1.DeletePropagationBackground
zero := int64(0) zero := int64(0)
forceDelete := &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background} forceDelete := metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}
defer func() { defer func() {
err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(context.TODO(), pod.GetName(), forceDelete) err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(context.TODO(), pod.GetName(), forceDelete)
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
@ -1073,7 +1073,7 @@ func testPodBindingEviction(c *testContext) {
case gvr("", "v1", "pods/eviction"): case gvr("", "v1", "pods/eviction"):
err = c.clientset.CoreV1().RESTClient().Post().Namespace(pod.GetNamespace()).Resource("pods").Name(pod.GetName()).SubResource("eviction").Body(&policyv1beta1.Eviction{ err = c.clientset.CoreV1().RESTClient().Post().Namespace(pod.GetNamespace()).Resource("pods").Name(pod.GetName()).SubResource("eviction").Body(&policyv1beta1.Eviction{
ObjectMeta: metav1.ObjectMeta{Name: pod.GetName()}, ObjectMeta: metav1.ObjectMeta{Name: pod.GetName()},
DeleteOptions: forceDelete, DeleteOptions: &forceDelete,
}).Do(context.TODO()).Error() }).Do(context.TODO()).Error()
default: default:

View File

@ -174,7 +174,7 @@ func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet)
} }
falseVar := false falseVar := false
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} deleteOptions := metav1.DeleteOptions{OrphanDependents: &falseVar}
if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(context.TODO(), ds.Name, deleteOptions); err != nil { if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(context.TODO(), ds.Name, deleteOptions); err != nil {
t.Errorf("Failed to delete DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err) t.Errorf("Failed to delete DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
} }

View File

@ -73,7 +73,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
} }
var gracePeriodSeconds int64 = 30 var gracePeriodSeconds int64 = 30
deleteOption := &metav1.DeleteOptions{ deleteOption := metav1.DeleteOptions{
GracePeriodSeconds: &gracePeriodSeconds, GracePeriodSeconds: &gracePeriodSeconds,
} }
@ -192,7 +192,7 @@ func TestTerminalPodEviction(t *testing.T) {
} }
var gracePeriodSeconds int64 = 30 var gracePeriodSeconds int64 = 30
deleteOption := &metav1.DeleteOptions{ deleteOption := metav1.DeleteOptions{
GracePeriodSeconds: &gracePeriodSeconds, GracePeriodSeconds: &gracePeriodSeconds,
} }
pod := newPod("test-terminal-pod1") pod := newPod("test-terminal-pod1")
@ -309,7 +309,7 @@ func newPDB() *v1beta1.PodDisruptionBudget {
} }
} }
func newEviction(ns, evictionName string, deleteOption *metav1.DeleteOptions) *v1beta1.Eviction { func newEviction(ns, evictionName string, deleteOption metav1.DeleteOptions) *v1beta1.Eviction {
return &v1beta1.Eviction{ return &v1beta1.Eviction{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
APIVersion: "Policy/v1beta1", APIVersion: "Policy/v1beta1",
@ -319,7 +319,7 @@ func newEviction(ns, evictionName string, deleteOption *metav1.DeleteOptions) *v
Name: evictionName, Name: evictionName,
Namespace: ns, Namespace: ns,
}, },
DeleteOptions: deleteOption, DeleteOptions: &deleteOption,
} }
} }

View File

@ -52,24 +52,24 @@ import (
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
func getForegroundOptions() *metav1.DeleteOptions { func getForegroundOptions() metav1.DeleteOptions {
policy := metav1.DeletePropagationForeground policy := metav1.DeletePropagationForeground
return &metav1.DeleteOptions{PropagationPolicy: &policy} return metav1.DeleteOptions{PropagationPolicy: &policy}
} }
func getOrphanOptions() *metav1.DeleteOptions { func getOrphanOptions() metav1.DeleteOptions {
var trueVar = true var trueVar = true
return &metav1.DeleteOptions{OrphanDependents: &trueVar} return metav1.DeleteOptions{OrphanDependents: &trueVar}
} }
func getPropagateOrphanOptions() *metav1.DeleteOptions { func getPropagateOrphanOptions() metav1.DeleteOptions {
policy := metav1.DeletePropagationOrphan policy := metav1.DeletePropagationOrphan
return &metav1.DeleteOptions{PropagationPolicy: &policy} return metav1.DeleteOptions{PropagationPolicy: &policy}
} }
func getNonOrphanOptions() *metav1.DeleteOptions { func getNonOrphanOptions() metav1.DeleteOptions {
var falseVar = false var falseVar = false
return &metav1.DeleteOptions{OrphanDependents: &falseVar} return metav1.DeleteOptions{OrphanDependents: &falseVar}
} }
const garbageCollectedPodName = "test.pod.1" const garbageCollectedPodName = "test.pod.1"
@ -435,7 +435,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
} }
} }
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) { func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
defer wg.Done() defer wg.Done()
rcClient := clientSet.CoreV1().ReplicationControllers(namespace) rcClient := clientSet.CoreV1().ReplicationControllers(namespace)
podClient := clientSet.CoreV1().Pods(namespace) podClient := clientSet.CoreV1().Pods(namespace)
@ -461,9 +461,6 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
} }
orphan := false orphan := false
switch { switch {
case options == nil:
// if there are no deletion options, the default policy for replication controllers is orphan
orphan = true
case options.OrphanDependents != nil: case options.OrphanDependents != nil:
// if the deletion options explicitly specify whether to orphan, that controls // if the deletion options explicitly specify whether to orphan, that controls
orphan = *options.OrphanDependents orphan = *options.OrphanDependents
@ -537,9 +534,9 @@ func TestStressingCascadingDeletion(t *testing.T) {
rcUIDs := make(chan types.UID, collections*5) rcUIDs := make(chan types.UID, collections*5)
for i := 0; i < collections; i++ { for i := 0; i < collections; i++ {
// rc is created with empty finalizers, deleted with nil delete options, pods will remain. // rc is created with empty finalizers, deleted with nil delete options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, nil, &wg, rcUIDs) go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, metav1.DeleteOptions{}, &wg, rcUIDs)
// rc is created with the orphan finalizer, deleted with nil options, pods will remain. // rc is created with the orphan finalizer, deleted with nil options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, nil, &wg, rcUIDs) go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, metav1.DeleteOptions{}, &wg, rcUIDs)
// rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted. // rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted.
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, getNonOrphanOptions(), &wg, rcUIDs) go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, getNonOrphanOptions(), &wg, rcUIDs)
// rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain. // rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain.

View File

@ -56,7 +56,7 @@ type testConfig struct {
var ( var (
// Delete API objects immediately // Delete API objects immediately
deletePeriod = int64(0) deletePeriod = int64(0)
deleteOption = &metav1.DeleteOptions{GracePeriodSeconds: &deletePeriod} deleteOption = metav1.DeleteOptions{GracePeriodSeconds: &deletePeriod}
modeWait = storagev1.VolumeBindingWaitForFirstConsumer modeWait = storagev1.VolumeBindingWaitForFirstConsumer
modeImmediate = storagev1.VolumeBindingImmediate modeImmediate = storagev1.VolumeBindingImmediate
@ -847,7 +847,7 @@ func TestRescheduleProvisioning(t *testing.T) {
defer func() { defer func() {
close(controllerCh) close(controllerCh)
deleteTestObjects(clientset, ns, nil) deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
testCtx.closeFn() testCtx.closeFn()
}() }()
@ -931,7 +931,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
stop: textCtx.ctx.Done(), stop: textCtx.ctx.Done(),
teardown: func() { teardown: func() {
klog.Infof("test cluster %q start to tear down", ns) klog.Infof("test cluster %q start to tear down", ns)
deleteTestObjects(clientset, ns, nil) deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
cleanupTest(t, textCtx) cleanupTest(t, textCtx)
}, },
} }
@ -983,7 +983,7 @@ func initPVController(t *testing.T, testCtx *testContext, provisionDelaySeconds
return ctrl, informerFactory, nil return ctrl, informerFactory, nil
} }
func deleteTestObjects(client clientset.Interface, ns string, option *metav1.DeleteOptions) { func deleteTestObjects(client clientset.Interface, ns string, option metav1.DeleteOptions) {
client.CoreV1().Pods(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{}) client.CoreV1().Pods(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{})
client.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{}) client.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{})
client.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) client.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), option, metav1.ListOptions{})

View File

@ -32,7 +32,7 @@ import (
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
) )
func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
switch kind { switch kind {
case api.Kind("Pod"): case api.Kind("Pod"):
return c.CoreV1().Pods(namespace).Delete(context.TODO(), name, options) return c.CoreV1().Pods(namespace).Delete(context.TODO(), name, options)
@ -57,7 +57,7 @@ func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam
} }
} }
func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
deleteFunc := func() (bool, error) { deleteFunc := func() (bool, error) {
err := deleteResource(c, kind, namespace, name, options) err := deleteResource(c, kind, namespace, name, options)
if err == nil || apierrors.IsNotFound(err) { if err == nil || apierrors.IsNotFound(err) {

View File

@ -1565,7 +1565,7 @@ func (config *SecretConfig) Run() error {
} }
func (config *SecretConfig) Stop() error { func (config *SecretConfig) Stop() error {
if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, &metav1.DeleteOptions{}); err != nil { if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
return fmt.Errorf("Error deleting secret: %v", err) return fmt.Errorf("Error deleting secret: %v", err)
} }
config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name) config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name)
@ -1623,7 +1623,7 @@ func (config *ConfigMapConfig) Run() error {
} }
func (config *ConfigMapConfig) Stop() error { func (config *ConfigMapConfig) Stop() error {
if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, &metav1.DeleteOptions{}); err != nil { if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
return fmt.Errorf("Error deleting configmap: %v", err) return fmt.Errorf("Error deleting configmap: %v", err)
} }
config.LogFunc("Deleted configmap %v/%v", config.Namespace, config.Name) config.LogFunc("Deleted configmap %v/%v", config.Namespace, config.Name)