mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
more artisanal fixes
Most of these could have been refactored automatically but it wouldn't have been uglier. The unsophisticated tooling left lots of unnecessary struct -> pointer -> struct transitions.
This commit is contained in:
parent
aaf855c1e6
commit
76f8594378
@ -200,10 +200,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
||||
func deleteHealthCheckJob(client clientset.Interface, ns, jobName string) error {
|
||||
klog.V(2).Infof("Deleting Job %q in the namespace %q", jobName, ns)
|
||||
propagation := metav1.DeletePropagationForeground
|
||||
deleteOptions := &metav1.DeleteOptions{
|
||||
PropagationPolicy: &propagation,
|
||||
}
|
||||
if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, deleteOptions); err != nil {
|
||||
if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, metav1.DeleteOptions{PropagationPolicy: &propagation}); err != nil {
|
||||
return errors.Wrapf(err, "could not delete Job %q in the namespace %q", jobName, ns)
|
||||
}
|
||||
return nil
|
||||
|
@ -194,19 +194,13 @@ func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) err
|
||||
// DeleteDaemonSetForeground deletes the specified DaemonSet in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
|
||||
func DeleteDaemonSetForeground(client clientset.Interface, namespace, name string) error {
|
||||
foregroundDelete := metav1.DeletePropagationForeground
|
||||
deleteOptions := &metav1.DeleteOptions{
|
||||
PropagationPolicy: &foregroundDelete,
|
||||
}
|
||||
return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, deleteOptions)
|
||||
return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete})
|
||||
}
|
||||
|
||||
// DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
|
||||
func DeleteDeploymentForeground(client clientset.Interface, namespace, name string) error {
|
||||
foregroundDelete := metav1.DeletePropagationForeground
|
||||
deleteOptions := &metav1.DeleteOptions{
|
||||
PropagationPolicy: &foregroundDelete,
|
||||
}
|
||||
return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, deleteOptions)
|
||||
return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete})
|
||||
}
|
||||
|
||||
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||
|
@ -192,9 +192,9 @@ func (tc *TokenCleaner) evalSecret(o interface{}) {
|
||||
ttl, alreadyExpired := bootstrapsecretutil.GetExpiration(secret, time.Now())
|
||||
if alreadyExpired {
|
||||
klog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name)
|
||||
var options *metav1.DeleteOptions
|
||||
var options metav1.DeleteOptions
|
||||
if len(secret.UID) > 0 {
|
||||
options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}}
|
||||
options.Preconditions = &metav1.Preconditions{UID: &secret.UID}
|
||||
}
|
||||
err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, options)
|
||||
// NotFound isn't a real error (it's already been deleted)
|
||||
|
@ -342,9 +342,9 @@ func (e *TokensController) deleteTokens(serviceAccount *v1.ServiceAccount) ( /*r
|
||||
}
|
||||
|
||||
func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry*/ bool, error) {
|
||||
var opts *metav1.DeleteOptions
|
||||
var opts metav1.DeleteOptions
|
||||
if len(uid) > 0 {
|
||||
opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}
|
||||
opts.Preconditions = &metav1.Preconditions{UID: &uid}
|
||||
}
|
||||
err := e.client.CoreV1().Secrets(ns).Delete(context.TODO(), name, opts)
|
||||
// NotFound doesn't need a retry (it's already been deleted)
|
||||
@ -460,9 +460,9 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
||||
if !addedReference {
|
||||
// we weren't able to use the token, try to clean it up.
|
||||
klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
|
||||
deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
|
||||
if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); deleteErr != nil {
|
||||
klog.Error(deleteErr) // if we fail, just log it
|
||||
deleteOpts := metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
|
||||
if err := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); err != nil {
|
||||
klog.Error(err) // if we fail, just log it
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,28 +26,26 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -183,7 +181,7 @@ func (m *FakeNodeHandler) List(_ context.Context, opts metav1.ListOptions) (*v1.
|
||||
}
|
||||
|
||||
// Delete deletes a Node from the fake store.
|
||||
func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt *metav1.DeleteOptions) error {
|
||||
func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt metav1.DeleteOptions) error {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
@ -197,7 +195,7 @@ func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt *metav1.Delet
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of Nodes from the fake store.
|
||||
func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ func (tc *Controller) processJob(key string) error {
|
||||
}
|
||||
// Cascade deletes the Jobs if TTL truly expires.
|
||||
policy := metav1.DeletePropagationForeground
|
||||
options := &metav1.DeleteOptions{
|
||||
options := metav1.DeleteOptions{
|
||||
PropagationPolicy: &policy,
|
||||
Preconditions: &metav1.Preconditions{UID: &fresh.UID},
|
||||
}
|
||||
|
@ -583,9 +583,12 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
|
||||
|
||||
// We don't handle graceful deletion of mirror pods.
|
||||
if m.canBeDeleted(pod, status.status) {
|
||||
deleteOptions := metav1.NewDeleteOptions(0)
|
||||
// Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace.
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID))
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: new(int64),
|
||||
// Use the pod UID as the precondition for deletion to prevent deleting a
|
||||
// newly created pod with the same name and namespace.
|
||||
Preconditions: metav1.NewUIDPreconditions(string(pod.UID)),
|
||||
}
|
||||
err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err)
|
||||
|
@ -262,7 +262,7 @@ func (c *autoRegisterController) checkAPIService(name string) (err error) {
|
||||
|
||||
// we have a spurious APIService that we're managing, delete it (5A,6A)
|
||||
case desired == nil:
|
||||
opts := &metav1.DeleteOptions{Preconditions: metav1.NewUIDPreconditions(string(curr.UID))}
|
||||
opts := metav1.DeleteOptions{Preconditions: metav1.NewUIDPreconditions(string(curr.UID))}
|
||||
err := c.apiServiceClient.APIServices().Delete(context.TODO(), curr.Name, opts)
|
||||
if apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
|
||||
// deleted or changed in the meantime, we'll get called again
|
||||
|
@ -121,8 +121,8 @@ func CheckEvictionSupport(clientset kubernetes.Interface) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (d *Helper) makeDeleteOptions() *metav1.DeleteOptions {
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
func (d *Helper) makeDeleteOptions() metav1.DeleteOptions {
|
||||
deleteOptions := metav1.DeleteOptions{}
|
||||
if d.GracePeriodSeconds >= 0 {
|
||||
gracePeriodSeconds := int64(d.GracePeriodSeconds)
|
||||
deleteOptions.GracePeriodSeconds = &gracePeriodSeconds
|
||||
@ -150,6 +150,8 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
delOpts := d.makeDeleteOptions()
|
||||
eviction := &policyv1beta1.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: policyGroupVersion,
|
||||
@ -159,8 +161,9 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
DeleteOptions: d.makeDeleteOptions(),
|
||||
DeleteOptions: &delOpts,
|
||||
}
|
||||
|
||||
// Remember to change change the URL manipulation func when Eviction's version change
|
||||
return d.Client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
|
||||
}
|
||||
|
@ -79,19 +79,19 @@ func estimateMaximumPods(c clientset.Interface, min, max int32) int32 {
|
||||
return availablePods
|
||||
}
|
||||
|
||||
func getForegroundOptions() *metav1.DeleteOptions {
|
||||
func getForegroundOptions() metav1.DeleteOptions {
|
||||
policy := metav1.DeletePropagationForeground
|
||||
return &metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
return metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
}
|
||||
|
||||
func getBackgroundOptions() *metav1.DeleteOptions {
|
||||
func getBackgroundOptions() metav1.DeleteOptions {
|
||||
policy := metav1.DeletePropagationBackground
|
||||
return &metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
return metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
}
|
||||
|
||||
func getOrphanOptions() *metav1.DeleteOptions {
|
||||
func getOrphanOptions() metav1.DeleteOptions {
|
||||
policy := metav1.DeletePropagationOrphan
|
||||
return &metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
return metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
}
|
||||
|
||||
var (
|
||||
@ -473,8 +473,9 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
|
||||
}
|
||||
ginkgo.By("delete the rc")
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
Preconditions: metav1.NewUIDPreconditions(string(rc.UID)),
|
||||
}
|
||||
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
@ -1101,7 +1102,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
framework.Logf("created dependent resource %q", dependentName)
|
||||
|
||||
// Delete the owner and orphan the dependent.
|
||||
err = resourceClient.Delete(ownerName, getOrphanOptions())
|
||||
delOpts := getOrphanOptions()
|
||||
err = resourceClient.Delete(ownerName, &delOpts)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
|
@ -854,7 +854,7 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin
|
||||
|
||||
func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions := metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
|
||||
return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions)
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
_, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err, "failed to patch pod")
|
||||
|
||||
f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
|
||||
expectEvents(f, []utils.AuditEvent{
|
||||
{
|
||||
|
@ -211,7 +211,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
_, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err, "failed to patch pod")
|
||||
|
||||
f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
},
|
||||
[]utils.AuditEvent{
|
||||
{
|
||||
|
@ -45,7 +45,6 @@ package common
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
@ -136,7 +136,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||
|
||||
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
|
||||
// disappear before the timeout, it will fail the test.
|
||||
func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeout time.Duration) {
|
||||
func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) {
|
||||
namespace := c.f.Namespace.Name
|
||||
err := c.Delete(context.TODO(), name, options)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
|
@ -814,7 +814,7 @@ func (f *Framework) MatchContainerOutput(
|
||||
createdPod := podClient.Create(pod)
|
||||
defer func() {
|
||||
ginkgo.By("delete the pod")
|
||||
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
|
||||
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
@ -1181,7 +1181,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
|
||||
defer ps.Stop()
|
||||
falseVar := false
|
||||
deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
deleteOption := metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
startTime := time.Now()
|
||||
if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil {
|
||||
return err
|
||||
|
@ -3341,7 +3341,7 @@ func proxyMode(f *framework.Framework) (string, error) {
|
||||
},
|
||||
}
|
||||
f.PodClient().CreateSync(pod)
|
||||
defer f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
defer f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
|
||||
cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode"
|
||||
stdout, err := framework.RunHostCmd(pod.Namespace, pod.Name, cmd)
|
||||
|
@ -99,9 +99,9 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
false: "RW",
|
||||
}
|
||||
type testT struct {
|
||||
descr string // It description
|
||||
readOnly bool // true means pd is read-only
|
||||
deleteOpt *metav1.DeleteOptions // pod delete option
|
||||
descr string // It description
|
||||
readOnly bool // true means pd is read-only
|
||||
deleteOpt metav1.DeleteOptions // pod delete option
|
||||
}
|
||||
tests := []testT{
|
||||
{
|
||||
@ -112,7 +112,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
{
|
||||
descr: podDefaultGrace,
|
||||
readOnly: false,
|
||||
deleteOpt: &metav1.DeleteOptions{},
|
||||
deleteOpt: metav1.DeleteOptions{},
|
||||
},
|
||||
{
|
||||
descr: podImmediateGrace,
|
||||
@ -122,7 +122,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
{
|
||||
descr: podDefaultGrace,
|
||||
readOnly: true,
|
||||
deleteOpt: &metav1.DeleteOptions{},
|
||||
deleteOpt: metav1.DeleteOptions{},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ func deletePods(f *framework.Framework, podNames []string) {
|
||||
delOpts := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(podName, &delOpts, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(podName, delOpts, framework.DefaultPodDeletionTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,10 +99,10 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
// Delete Pods
|
||||
f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(guaranteedPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(burstablePodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(bestEffortPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClientNS(kubeapi.NamespaceSystem).DeleteSync(criticalPodName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
// Log Events
|
||||
logPodEvents(f)
|
||||
logNodeEvents(f)
|
||||
|
@ -281,8 +281,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
}, 10*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
|
||||
// Cleanup
|
||||
f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(pod1.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(pod2.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func())
|
||||
twist()
|
||||
|
||||
ginkgo.By("Remove test pod")
|
||||
f.PodClient().DeleteSync(podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
|
||||
ginkgo.By("Waiting for checkpoint to be removed")
|
||||
if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) {
|
||||
|
@ -535,7 +535,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
ginkgo.By("deleting pods")
|
||||
for _, spec := range testSpecs {
|
||||
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
||||
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute)
|
||||
f.PodClient().DeleteSync(spec.pod.Name, metav1.DeleteOptions{}, 10*time.Minute)
|
||||
}
|
||||
|
||||
// In case a test fails before verifying that NodeCondition no longer exist on the node,
|
||||
|
@ -245,7 +245,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
||||
ginkgo.AfterEach(func() {
|
||||
for _, pod := range test.testPods {
|
||||
ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName))
|
||||
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(pod.podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
ginkgo.By("Making sure all containers get cleaned up")
|
||||
|
@ -162,8 +162,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
|
||||
logDevicePluginMetrics()
|
||||
|
||||
// Cleanup
|
||||
f.PodClient().DeleteSync(p1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(p2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(p1.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(p2.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -80,7 +80,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
|
||||
delOpts := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(pod.Name, &delOpts, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(pod.Name, delOpts, framework.DefaultPodDeletionTimeout)
|
||||
ginkgo.By("running the post test exec from the workload")
|
||||
err := wl.PostTestExec()
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -96,8 +96,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("Deleting test pods")
|
||||
f.PodClient().DeleteSync(pod0, &metav1.DeleteOptions{}, 10*time.Minute)
|
||||
f.PodClient().DeleteSync(pod1, &metav1.DeleteOptions{}, 10*time.Minute)
|
||||
f.PodClient().DeleteSync(pod0, metav1.DeleteOptions{}, 10*time.Minute)
|
||||
f.PodClient().DeleteSync(pod1, metav1.DeleteOptions{}, 10*time.Minute)
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
|
@ -1046,7 +1046,7 @@ func testPodBindingEviction(c *testContext) {
|
||||
|
||||
background := metav1.DeletePropagationBackground
|
||||
zero := int64(0)
|
||||
forceDelete := &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}
|
||||
forceDelete := metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}
|
||||
defer func() {
|
||||
err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(context.TODO(), pod.GetName(), forceDelete)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
@ -1073,7 +1073,7 @@ func testPodBindingEviction(c *testContext) {
|
||||
case gvr("", "v1", "pods/eviction"):
|
||||
err = c.clientset.CoreV1().RESTClient().Post().Namespace(pod.GetNamespace()).Resource("pods").Name(pod.GetName()).SubResource("eviction").Body(&policyv1beta1.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod.GetName()},
|
||||
DeleteOptions: forceDelete,
|
||||
DeleteOptions: &forceDelete,
|
||||
}).Do(context.TODO()).Error()
|
||||
|
||||
default:
|
||||
|
@ -174,7 +174,7 @@ func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet)
|
||||
}
|
||||
|
||||
falseVar := false
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
deleteOptions := metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(context.TODO(), ds.Name, deleteOptions); err != nil {
|
||||
t.Errorf("Failed to delete DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ func TestConcurrentEvictionRequests(t *testing.T) {
|
||||
}
|
||||
|
||||
var gracePeriodSeconds int64 = 30
|
||||
deleteOption := &metav1.DeleteOptions{
|
||||
deleteOption := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gracePeriodSeconds,
|
||||
}
|
||||
|
||||
@ -192,7 +192,7 @@ func TestTerminalPodEviction(t *testing.T) {
|
||||
}
|
||||
|
||||
var gracePeriodSeconds int64 = 30
|
||||
deleteOption := &metav1.DeleteOptions{
|
||||
deleteOption := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gracePeriodSeconds,
|
||||
}
|
||||
pod := newPod("test-terminal-pod1")
|
||||
@ -309,7 +309,7 @@ func newPDB() *v1beta1.PodDisruptionBudget {
|
||||
}
|
||||
}
|
||||
|
||||
func newEviction(ns, evictionName string, deleteOption *metav1.DeleteOptions) *v1beta1.Eviction {
|
||||
func newEviction(ns, evictionName string, deleteOption metav1.DeleteOptions) *v1beta1.Eviction {
|
||||
return &v1beta1.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "Policy/v1beta1",
|
||||
@ -319,7 +319,7 @@ func newEviction(ns, evictionName string, deleteOption *metav1.DeleteOptions) *v
|
||||
Name: evictionName,
|
||||
Namespace: ns,
|
||||
},
|
||||
DeleteOptions: deleteOption,
|
||||
DeleteOptions: &deleteOption,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,24 +52,24 @@ import (
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func getForegroundOptions() *metav1.DeleteOptions {
|
||||
func getForegroundOptions() metav1.DeleteOptions {
|
||||
policy := metav1.DeletePropagationForeground
|
||||
return &metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
return metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
}
|
||||
|
||||
func getOrphanOptions() *metav1.DeleteOptions {
|
||||
func getOrphanOptions() metav1.DeleteOptions {
|
||||
var trueVar = true
|
||||
return &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
return metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
}
|
||||
|
||||
func getPropagateOrphanOptions() *metav1.DeleteOptions {
|
||||
func getPropagateOrphanOptions() metav1.DeleteOptions {
|
||||
policy := metav1.DeletePropagationOrphan
|
||||
return &metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
return metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
}
|
||||
|
||||
func getNonOrphanOptions() *metav1.DeleteOptions {
|
||||
func getNonOrphanOptions() metav1.DeleteOptions {
|
||||
var falseVar = false
|
||||
return &metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
return metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
}
|
||||
|
||||
const garbageCollectedPodName = "test.pod.1"
|
||||
@ -435,7 +435,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
|
||||
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
|
||||
defer wg.Done()
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(namespace)
|
||||
podClient := clientSet.CoreV1().Pods(namespace)
|
||||
@ -461,9 +461,6 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
|
||||
}
|
||||
orphan := false
|
||||
switch {
|
||||
case options == nil:
|
||||
// if there are no deletion options, the default policy for replication controllers is orphan
|
||||
orphan = true
|
||||
case options.OrphanDependents != nil:
|
||||
// if the deletion options explicitly specify whether to orphan, that controls
|
||||
orphan = *options.OrphanDependents
|
||||
@ -537,9 +534,9 @@ func TestStressingCascadingDeletion(t *testing.T) {
|
||||
rcUIDs := make(chan types.UID, collections*5)
|
||||
for i := 0; i < collections; i++ {
|
||||
// rc is created with empty finalizers, deleted with nil delete options, pods will remain.
|
||||
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, nil, &wg, rcUIDs)
|
||||
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, metav1.DeleteOptions{}, &wg, rcUIDs)
|
||||
// rc is created with the orphan finalizer, deleted with nil options, pods will remain.
|
||||
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, nil, &wg, rcUIDs)
|
||||
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, metav1.DeleteOptions{}, &wg, rcUIDs)
|
||||
// rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted.
|
||||
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, getNonOrphanOptions(), &wg, rcUIDs)
|
||||
// rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain.
|
||||
|
@ -56,7 +56,7 @@ type testConfig struct {
|
||||
var (
|
||||
// Delete API objects immediately
|
||||
deletePeriod = int64(0)
|
||||
deleteOption = &metav1.DeleteOptions{GracePeriodSeconds: &deletePeriod}
|
||||
deleteOption = metav1.DeleteOptions{GracePeriodSeconds: &deletePeriod}
|
||||
|
||||
modeWait = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
modeImmediate = storagev1.VolumeBindingImmediate
|
||||
@ -847,7 +847,7 @@ func TestRescheduleProvisioning(t *testing.T) {
|
||||
|
||||
defer func() {
|
||||
close(controllerCh)
|
||||
deleteTestObjects(clientset, ns, nil)
|
||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||
testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
testCtx.closeFn()
|
||||
}()
|
||||
@ -931,7 +931,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
|
||||
stop: textCtx.ctx.Done(),
|
||||
teardown: func() {
|
||||
klog.Infof("test cluster %q start to tear down", ns)
|
||||
deleteTestObjects(clientset, ns, nil)
|
||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||
cleanupTest(t, textCtx)
|
||||
},
|
||||
}
|
||||
@ -983,7 +983,7 @@ func initPVController(t *testing.T, testCtx *testContext, provisionDelaySeconds
|
||||
return ctrl, informerFactory, nil
|
||||
}
|
||||
|
||||
func deleteTestObjects(client clientset.Interface, ns string, option *metav1.DeleteOptions) {
|
||||
func deleteTestObjects(client clientset.Interface, ns string, option metav1.DeleteOptions) {
|
||||
client.CoreV1().Pods(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{})
|
||||
client.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{})
|
||||
client.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), option, metav1.ListOptions{})
|
||||
|
@ -32,7 +32,7 @@ import (
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error {
|
||||
func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
switch kind {
|
||||
case api.Kind("Pod"):
|
||||
return c.CoreV1().Pods(namespace).Delete(context.TODO(), name, options)
|
||||
@ -57,7 +57,7 @@ func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error {
|
||||
func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
deleteFunc := func() (bool, error) {
|
||||
err := deleteResource(c, kind, namespace, name, options)
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
|
@ -1565,7 +1565,7 @@ func (config *SecretConfig) Run() error {
|
||||
}
|
||||
|
||||
func (config *SecretConfig) Stop() error {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
|
||||
return fmt.Errorf("Error deleting secret: %v", err)
|
||||
}
|
||||
config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name)
|
||||
@ -1623,7 +1623,7 @@ func (config *ConfigMapConfig) Run() error {
|
||||
}
|
||||
|
||||
func (config *ConfigMapConfig) Stop() error {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
|
||||
return fmt.Errorf("Error deleting configmap: %v", err)
|
||||
}
|
||||
config.LogFunc("Deleted configmap %v/%v", config.Namespace, config.Name)
|
||||
|
Loading…
Reference in New Issue
Block a user