generated: run refactor

This commit is contained in:
Mike Danese
2020-02-08 12:30:21 -05:00
parent 6c274ea72d
commit 25651408ae
399 changed files with 1560 additions and 1507 deletions

View File

@@ -27,6 +27,7 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@@ -243,7 +244,7 @@ func (e *Signer) signConfigMap() {
}
func (e *Signer) updateConfigMap(cm *v1.ConfigMap) {
_, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(context.TODO(), cm)
_, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{})
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
klog.V(3).Infof("Error updating ConfigMap: %v", err)
}

View File

@@ -30,6 +30,7 @@ go_library(
"//pkg/controller/certificates:go_default_library",
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
],

View File

@@ -26,6 +26,7 @@ import (
authorization "k8s.io/api/authorization/v1"
capi "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
clientset "k8s.io/client-go/kubernetes"
capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
@@ -130,7 +131,7 @@ func (a *sarApprover) authorize(csr *capi.CertificateSigningRequest, rattrs auth
ResourceAttributes: &rattrs,
},
}
sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar)
sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{})
if err != nil {
return false, err
}

View File

@@ -186,7 +186,7 @@ func (c *Publisher) syncNamespace(ns string) error {
Data: map[string]string{
"ca.crt": string(c.rootCA),
},
})
}, metav1.CreateOptions{})
return err
case err != nil:
return err
@@ -202,7 +202,7 @@ func (c *Publisher) syncNamespace(ns string) error {
cm.Data = data
_, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm)
_, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{})
return err
}

View File

@@ -36,6 +36,7 @@ go_library(
"//pkg/controller/certificates:go_default_library",
"//pkg/controller/certificates/authority:go_default_library",
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates:go_default_library",
"//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",

View File

@@ -24,6 +24,7 @@ import (
"time"
capi "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/server/dynamiccertificates"
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
clientset "k8s.io/client-go/kubernetes"
@@ -95,7 +96,7 @@ func (s *signer) handle(csr *capi.CertificateSigningRequest) error {
if err != nil {
return fmt.Errorf("error auto signing csr: %v", err)
}
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr)
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("error updating signature for csr: %v", err)
}

View File

@@ -186,7 +186,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount,
// Try token review first
tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}}
if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(context.TODO(), tokenReview); err == nil {
if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(context.TODO(), tokenReview, metav1.CreateOptions{}); err == nil {
if !tokenResult.Status.Authenticated {
klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Namespace, sa.Name)
return nil, false, nil

View File

@@ -26,6 +26,7 @@ import (
"golang.org/x/oauth2"
v1authenticationapi "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
@@ -179,7 +180,7 @@ func (ts *tokenSourceImpl) Token() (*oauth2.Token, error) {
Spec: v1authenticationapi.TokenRequestSpec{
ExpirationSeconds: utilpointer.Int64Ptr(ts.expirationSeconds),
},
})
}, metav1.CreateOptions{})
if inErr != nil {
klog.Warningf("get token failed: %v", inErr)
return false, nil

View File

@@ -385,7 +385,7 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod
modify(curNode)
}
_, err = cnc.kubeClient.CoreV1().Nodes().Update(context.TODO(), curNode)
_, err = cnc.kubeClient.CoreV1().Nodes().Update(context.TODO(), curNode, metav1.UpdateOptions{})
if err != nil {
return err
}

View File

@@ -127,7 +127,7 @@ func (c *ClusterRoleAggregationController) syncClusterRole(key string) error {
for _, rule := range newPolicyRules {
clusterRole.Rules = append(clusterRole.Rules, *rule.DeepCopy())
}
_, err = c.clusterRoleClient.ClusterRoles().Update(context.TODO(), clusterRole)
_, err = c.clusterRoleClient.ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{})
return err
}

View File

@@ -420,7 +420,7 @@ type RealRSControl struct {
var _ RSControlInterface = &RealRSControl{}
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
_, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data)
_, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
return err
}
@@ -440,7 +440,7 @@ type RealControllerRevisionControl struct {
var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{}
func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error {
_, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data)
_, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
return err
}
@@ -537,7 +537,7 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v
}
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data)
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
return err
}
@@ -577,7 +577,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT
if len(labels.Set(pod.Labels)) == 0 {
return fmt.Errorf("unable to create pods, no labels")
}
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod)
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
// only send an event if the namespace isn't terminating
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
@@ -1119,7 +1119,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
}
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes)
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err
}
@@ -1178,7 +1178,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la
if err != nil {
return fmt.Errorf("failed to create a two-way merge patch: %v", err)
}
if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes); err != nil {
if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("failed to patch the node: %v", err)
}
return nil
@@ -1197,13 +1197,13 @@ func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, nam
// Create the namespace if we can't verify it exists.
// Tolerate errors, since we don't know whether this component has namespace creation permissions.
if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) {
if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}); err != nil && !apierrors.IsAlreadyExists(err) {
if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
klog.Warningf("create non-exist namespace %s failed:%v", namespace, err)
}
}
// Create the service account
sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}})
sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
// If we're racing to init and someone else already created it, re-fetch
return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{})

View File

@@ -45,7 +45,7 @@ type realSJControl struct {
var _ sjControlInterface = &realSJControl{}
func (c *realSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(context.TODO(), sj)
return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(context.TODO(), sj, metav1.UpdateOptions{})
}
// fakeSJControl is the default implementation of sjControlInterface.
@@ -107,15 +107,15 @@ func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
}
func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), job)
return r.KubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), job, metav1.UpdateOptions{})
}
func (r realJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Patch(context.TODO(), name, pt, data, subresources...)
return r.KubeClient.BatchV1().Jobs(namespace).Patch(context.TODO(), name, pt, data, metav1.PatchOptions{}, subresources...)
}
func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job)
return r.KubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{})
}
func (r realJobControl) DeleteJob(namespace string, name string) error {

View File

@@ -1032,7 +1032,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
toUpdate.Status.NumberAvailable = int32(numberAvailable)
toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate); updateErr == nil {
if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate, metav1.UpdateOptions{}); updateErr == nil {
return nil
}

View File

@@ -95,7 +95,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
toUpdate := history.DeepCopy()
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate)
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
if err != nil {
return nil, nil, err
}
@@ -130,7 +130,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
if cur.Revision < currRevision {
toUpdate := cur.DeepCopy()
toUpdate.Revision = currRevision
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate)
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
if err != nil {
return nil, nil, err
}
@@ -220,7 +220,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor
toUpdate.Labels = make(map[string]string)
}
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate)
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
if err != nil {
return nil, err
}
@@ -323,7 +323,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
Revision: revision,
}
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history)
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history, metav1.CreateOptions{})
if outerErr := err; errors.IsAlreadyExists(outerErr) {
// TODO: Is it okay to get from historyLister?
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
@@ -353,7 +353,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
currDS.Status.CollisionCount = new(int32)
}
*currDS.Status.CollisionCount++
_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS)
_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS, metav1.UpdateOptions{})
if updateErr != nil {
return nil, updateErr
}

View File

@@ -589,7 +589,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
if d.Status.ObservedGeneration < d.Generation {
d.Status.ObservedGeneration = d.Generation
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
}
return nil
}

View File

@@ -22,6 +22,7 @@ import (
"reflect"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
apps "k8s.io/api/apps/v1"
@@ -113,7 +114,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment)
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{})
return err
}

View File

@@ -21,6 +21,7 @@ import (
"fmt"
"strconv"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
apps "k8s.io/api/apps/v1"
@@ -114,7 +115,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
setRollbackTo(d, nil)
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d)
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d, metav1.UpdateOptions{})
return err
}

View File

@@ -98,7 +98,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
}
var err error
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
return err
}
@@ -155,7 +155,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
if annotationsUpdated || minReadySecondsNeedsUpdate {
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy)
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
}
// Should use the revision in existingNewRS's annotation, since it set by before
@@ -173,7 +173,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
if needsUpdate {
var err error
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d); err != nil {
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}); err != nil {
return nil, err
}
}
@@ -220,7 +220,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
// hash collisions. If there is any other error, we need to report it in the status of
// the Deployment.
alreadyExists := false
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS)
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS, metav1.CreateOptions{})
switch {
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
case errors.IsAlreadyExists(err):
@@ -252,7 +252,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
*d.Status.CollisionCount++
// Update the collisionCount for the Deployment and let it requeue by returning the original
// error.
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
if dErr == nil {
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
}
@@ -268,7 +268,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
// We don't really care about this error at this point, since we have a bigger issue to report.
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
}
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
return nil, err
@@ -285,7 +285,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
needsUpdate = true
}
if needsUpdate {
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
}
return createdRS, err
}
@@ -420,7 +420,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in
rsCopy := rs.DeepCopy()
*(rsCopy.Spec.Replicas) = newScale
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy)
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
if err == nil && sizeNeedsUpdate {
scaled = true
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
@@ -478,7 +478,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet,
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment)
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{})
return err
}

View File

@@ -792,6 +792,6 @@ func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget,
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
// If this update fails, don't retry it. Allow the failure to get handled &
// retried in `processNextWorkItem()`.
_, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb)
_, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb, metav1.UpdateOptions{})
return err
}

View File

@@ -1054,14 +1054,14 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
// Create a PDB and 3 pods that match it.
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb)
pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create PDB: %v", err)
}
podNames := []string{"moe", "larry", "curly"}
for _, name := range podNames {
pod, _ := newPod(t, name)
_, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod)
_, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}

View File

@@ -513,10 +513,10 @@ func (e *EndpointController) syncService(key string) error {
klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
if createEndpoints {
// No previous endpoints, create them
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(context.TODO(), newEndpoints)
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(context.TODO(), newEndpoints, metav1.CreateOptions{})
} else {
// Pre-existing
_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(context.TODO(), newEndpoints)
_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(context.TODO(), newEndpoints, metav1.UpdateOptions{})
}
if err != nil {
if createEndpoints && errors.IsForbidden(err) {

View File

@@ -250,7 +250,7 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
if err != nil {
t.Fatalf("Expected no error adding EndpointSlice: %v", err)
}
_, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(context.TODO(), endpointSlice)
_, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
}
@@ -306,7 +306,7 @@ func TestSyncServiceFull(t *testing.T) {
},
}
esController.serviceStore.Add(service)
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service)
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
assert.Nil(t, err, "Expected no error creating service")
// run through full sync service loop
@@ -369,7 +369,7 @@ func createService(t *testing.T, esController *endpointSliceController, namespac
},
}
esController.serviceStore.Add(service)
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service)
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
assert.Nil(t, err, "Expected no error creating service")
return service
}

View File

@@ -206,7 +206,7 @@ func (r *reconciler) finalize(
for _, endpointSlice := range slicesToCreate {
addTriggerTimeAnnotation(endpointSlice, triggerTime)
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice)
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
if err != nil {
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
@@ -221,7 +221,7 @@ func (r *reconciler) finalize(
for _, endpointSlice := range slicesToUpdate {
addTriggerTimeAnnotation(endpointSlice, triggerTime)
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice)
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("Error updating %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err))
} else {

View File

@@ -204,7 +204,7 @@ func TestReconcile1EndpointSlice(t *testing.T) {
svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
_, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice1)
_, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice1, metav1.CreateOptions{})
assert.Nil(t, createErr, "Expected no error creating endpoint slice")
numActionsBefore := len(client.Actions())
@@ -828,7 +828,7 @@ func portsAndAddressTypeEqual(slice1, slice2 discovery.EndpointSlice) bool {
func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string, endpointSlices []*discovery.EndpointSlice) {
t.Helper()
for _, endpointSlice := range endpointSlices {
_, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice)
_, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Expected no error creating Endpoint Slice, got: %v", err)
}

View File

@@ -249,7 +249,7 @@ func (rh *realHistory) CreateControllerRevision(parent metav1.Object, revision *
// Update the revisions name
clone.Name = ControllerRevisionName(parent.GetName(), hash)
ns := parent.GetNamespace()
created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(context.TODO(), clone)
created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(context.TODO(), clone, metav1.CreateOptions{})
if errors.IsAlreadyExists(err) {
exists, err := rh.client.AppsV1().ControllerRevisions(ns).Get(context.TODO(), clone.Name, metav1.GetOptions{})
if err != nil {
@@ -272,7 +272,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio
return nil
}
clone.Revision = newRevision
updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(context.TODO(), clone)
updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(context.TODO(), clone, metav1.UpdateOptions{})
if updateErr == nil {
return nil
}
@@ -328,14 +328,14 @@ func (rh *realHistory) AdoptControllerRevision(parent metav1.Object, parentKind
}
// Use strategic merge patch to add an owner reference indicating a controller ref
return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(context.TODO(), revision.GetName(),
types.StrategicMergePatchType, patchBytes)
types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
}
func (rh *realHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) {
// Use strategic merge patch to add an owner reference indicating a controller ref
released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(context.TODO(), revision.GetName(),
types.StrategicMergePatchType,
[]byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID)))
[]byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID)), metav1.PatchOptions{})
if err != nil {
if errors.IsNotFound(err) {

View File

@@ -261,7 +261,7 @@ func TestRealHistory_CreateControllerRevision(t *testing.T) {
var collisionCount int32
for _, item := range test.existing {
_, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(context.TODO(), item.revision)
_, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(context.TODO(), item.revision, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}

View File

@@ -832,7 +832,7 @@ func (jm *JobController) updateJobStatus(job *batch.Job) error {
break
}
newJob.Status = job.Status
if _, err = jobClient.UpdateStatus(context.TODO(), newJob); err == nil {
if _, err = jobClient.UpdateStatus(context.TODO(), newJob, metav1.UpdateOptions{}); err == nil {
break
}
}

View File

@@ -269,7 +269,7 @@ func (d *namespacedResourcesDeleter) updateNamespaceStatusFunc(namespace *v1.Nam
newNamespace.ObjectMeta = namespace.ObjectMeta
newNamespace.Status = *namespace.Status.DeepCopy()
newNamespace.Status.Phase = v1.NamespaceTerminating
return d.nsClient.UpdateStatus(context.TODO(), &newNamespace)
return d.nsClient.UpdateStatus(context.TODO(), &newNamespace, metav1.UpdateOptions{})
}
// finalized returns true if the namespace.Spec.Finalizers is an empty list
@@ -551,7 +551,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ns *v1.Namespace) (int64,
// we need to reflect that information. Recall that additional finalizers can be set on namespaces, so this finalizer may clear itself and
// NOT remove the resource instance.
if hasChanged := conditionUpdater.Update(ns); hasChanged {
if _, err = d.nsClient.UpdateStatus(context.TODO(), ns); err != nil {
if _, err = d.nsClient.UpdateStatus(context.TODO(), ns, metav1.UpdateOptions{}); err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't update status condition for namespace %q: %v", namespace, err))
}
}

View File

@@ -103,7 +103,7 @@ func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRang
return err
}
_, err = a.k8s.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, bytes)
_, err = a.k8s.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, bytes, metav1.PatchOptions{})
return err
}

View File

@@ -1149,7 +1149,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
_, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) {
if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node); err != nil {
if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{}); err != nil {
klog.Errorf("Error updating node %s: %v", node.Name, err)
return gracePeriod, observedReadyCondition, currentReadyCondition, err
}

View File

@@ -1113,7 +1113,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto
}
hpav1 := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler)
_, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(context.TODO(), hpav1)
_, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(context.TODO(), hpav1, metav1.UpdateOptions{})
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)

View File

@@ -346,7 +346,7 @@ func TestGCOrphaned(t *testing.T) {
// Execute planned nodes changes
for _, node := range test.addedClientNodes {
client.CoreV1().Nodes().Create(context.TODO(), node)
client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
}
for _, node := range test.deletedClientNodes {
client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{})

View File

@@ -1159,7 +1159,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
}
oldRS := newReplicaSet(1, map[string]string{"foo": "bar"})
oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(context.TODO(), oldRS)
oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(context.TODO(), oldRS, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
@@ -1240,7 +1240,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
newRS := oldRS.DeepCopy()
newRS.UID = uuid.NewUUID()
newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(context.TODO(), newRS)
newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(context.TODO(), newRS, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}

View File

@@ -64,7 +64,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe
fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, newStatus.ObservedGeneration))
rs.Status = newStatus
updatedRS, updateErr = c.UpdateStatus(context.TODO(), rs)
updatedRS, updateErr = c.UpdateStatus(context.TODO(), rs, metav1.UpdateOptions{})
if updateErr == nil {
return updatedRS, nil
}

View File

@@ -356,7 +356,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
// there was a change observed by this controller that requires we update quota
if dirty {
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage)
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage, metav1.UpdateOptions{})
if err != nil {
errors = append(errors, err)
}

View File

@@ -12,6 +12,7 @@ go_library(
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",

View File

@@ -310,7 +310,7 @@ func TestSyncLoadBalancerIfNeeded(t *testing.T) {
controller, cloud, client := newController()
cloud.Exists = tc.lbExists
key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name)
if _, err := client.CoreV1().Services(tc.service.Namespace).Create(context.TODO(), tc.service); err != nil {
if _, err := client.CoreV1().Services(tc.service.Namespace).Create(context.TODO(), tc.service, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to prepare service %s for testing: %v", key, err)
}
client.ClearActions()
@@ -603,7 +603,7 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) {
for _, tc := range testCases {
newSvc := tc.updateFn(tc.svc)
if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err)
}
obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key)
@@ -1222,7 +1222,7 @@ func TestAddFinalizer(t *testing.T) {
s := &Controller{
kubeClient: c,
}
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to prepare service for testing: %v", err)
}
if err := s.addFinalizer(tc.svc); err != nil {
@@ -1276,7 +1276,7 @@ func TestRemoveFinalizer(t *testing.T) {
s := &Controller{
kubeClient: c,
}
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to prepare service for testing: %v", err)
}
if err := s.removeFinalizer(tc.svc); err != nil {
@@ -1376,7 +1376,7 @@ func TestPatchStatus(t *testing.T) {
s := &Controller{
kubeClient: c,
}
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to prepare service for testing: %v", err)
}
if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil {

View File

@@ -22,6 +22,7 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
@@ -38,7 +39,7 @@ func patch(c v1core.CoreV1Interface, oldSvc *v1.Service, newSvc *v1.Service) (*v
return nil, err
}
return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status")
return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
}
func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) {

View File

@@ -45,7 +45,7 @@ func TestPatch(t *testing.T) {
// Issue a separate update and verify patch doesn't fail after this.
svcToUpdate := svcOrigin.DeepCopy()
addAnnotations(svcToUpdate)
if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate); err != nil {
if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Failed to update service: %v", err)
}

View File

@@ -213,7 +213,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
// TODO eliminate this once the fake client can handle creation without NS
sa.Namespace = ns.Name
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &sa); err != nil && !apierrors.IsAlreadyExists(err) {
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &sa, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
// we can safely ignore terminating namespace errors
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
createFailures = append(createFailures, err)

View File

@@ -407,7 +407,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
}
// Save the secret
createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(context.TODO(), secret)
createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
if err != nil {
// if the namespace is being terminated, create will fail no matter what
if apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
@@ -449,7 +449,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
// Try to add a reference to the token
liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, v1.ObjectReference{Name: secret.Name})
if _, err := serviceAccounts.Update(context.TODO(), liveServiceAccount); err != nil {
if _, err := serviceAccounts.Update(context.TODO(), liveServiceAccount, metav1.UpdateOptions{}); err != nil {
return err
}
@@ -567,7 +567,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou
liveSecret.Annotations[v1.ServiceAccountUIDKey] = string(serviceAccount.UID)
// Save the secret
_, err = secrets.Update(context.TODO(), liveSecret)
_, err = secrets.Update(context.TODO(), liveSecret, metav1.UpdateOptions{})
if apierrors.IsConflict(err) || apierrors.IsNotFound(err) {
// if we got a Conflict error, the secret was updated by someone else, and we'll get an update notification later
// if we got a NotFound error, the secret no longer exists, and we don't need to populate a token
@@ -611,7 +611,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri
}
}
serviceAccount.Secrets = secrets
_, err = serviceAccounts.Update(context.TODO(), serviceAccount)
_, err = serviceAccounts.Update(context.TODO(), serviceAccount, metav1.UpdateOptions{})
// Ignore NotFound errors when attempting to remove a reference
if apierrors.IsNotFound(err) {
return nil

View File

@@ -24,6 +24,7 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
@@ -78,7 +79,7 @@ func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod
return err
}
// If we created the PVCs attempt to create the Pod
_, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod)
_, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
// sink already exists errors
if apierrors.IsAlreadyExists(err) {
return err
@@ -114,7 +115,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod
attemptedUpdate = true
// commit the update, retrying on conflicts
_, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod)
_, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{})
if updateErr == nil {
return nil
}
@@ -183,7 +184,7 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef
_, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
switch {
case apierrors.IsNotFound(err):
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim)
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim, metav1.CreateOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err))
}

View File

@@ -21,6 +21,7 @@ import (
"fmt"
apps "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
appslisters "k8s.io/client-go/listers/apps/v1"
@@ -54,7 +55,7 @@ func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus(
// don't wait due to limited number of clients, but backoff after the default number of steps
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
set.Status = *status
_, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set)
_, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set, metav1.UpdateOptions{})
if updateErr == nil {
return nil
}

View File

@@ -14,6 +14,7 @@ go_library(
"//pkg/controller:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",

View File

@@ -36,6 +36,7 @@ import (
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -264,7 +265,7 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey
if err != nil {
return err
}
_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes)
_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err)
return err

View File

@@ -110,7 +110,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
var updatedPod *v1.Pod
var err error
if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod); err != nil {
if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil {
return nil, err
}
return updatedPod, nil
@@ -137,7 +137,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s
break
}
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod)
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
// NotFound error means that pod was already deleted.

View File

@@ -270,7 +270,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
for _, newPod := range extraPods1 {
// Add a new pod between ASW and DSW ppoulators
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod)
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
}
@@ -287,7 +287,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
for _, newPod := range extraPods2 {
// Add a new pod between DSW ppoulator and reconciler run
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod)
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
}

View File

@@ -203,7 +203,7 @@ func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSi
return fmt.Errorf("Error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err)
}
_, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, commontypes.StrategicMergePatchType, patchBytes)
_, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, commontypes.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
if updateErr != nil {
klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr)

View File

@@ -754,7 +754,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo
return claim, nil
}
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(context.TODO(), claimClone)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(context.TODO(), claimClone, metav1.UpdateOptions{})
if err != nil {
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err)
return newClaim, err
@@ -810,7 +810,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV
volumeClone.Status.Phase = phase
volumeClone.Status.Message = message
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), volumeClone)
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), volumeClone, metav1.UpdateOptions{})
if err != nil {
klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err)
return newVol, err
@@ -872,7 +872,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV
func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, updateCache bool) (*v1.PersistentVolume, error) {
claimKey := claimrefToClaimKey(volumeClone.Spec.ClaimRef)
klog.V(2).Infof("claim %q bound to volume %q", claimKey, volumeClone.Name)
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone)
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{})
if err != nil {
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimKey, err)
return newVol, err
@@ -924,7 +924,7 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo
if dirty {
klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
if err != nil {
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err)
return newClaim, err
@@ -1011,7 +1011,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume
volumeClone.Spec.ClaimRef.UID = ""
}
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone)
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{})
if err != nil {
klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err)
return err
@@ -1515,7 +1515,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
var newVol *v1.PersistentVolume
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume); err == nil || apierrors.IsAlreadyExists(err) {
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume, metav1.CreateOptions{}); err == nil || apierrors.IsAlreadyExists(err) {
// Save succeeded.
if err != nil {
klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))
@@ -1631,7 +1631,7 @@ func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.Persist
newClaim := claim.DeepCopy()
delete(newClaim.Annotations, pvutil.AnnSelectedNode)
// Try to update the PVC object
if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(context.TODO(), newClaim); err != nil {
if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(context.TODO(), newClaim, metav1.UpdateOptions{}); err != nil {
klog.V(4).Infof("Failed to delete annotation 'pvutil.AnnSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err)
return
}

View File

@@ -322,7 +322,7 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(claim *v
if !modified {
return claimClone, nil
}
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
}
@@ -339,7 +339,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotations(volume
if !modified {
return volumeClone, nil
}
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone)
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{})
if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
}
@@ -546,7 +546,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent
claimClone := claim.DeepCopy()
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnStorageProvisioner, provisionerName)
updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, pvutil.AnnStorageProvisioner)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
if err != nil {
return newClaim, err
}

View File

@@ -189,7 +189,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
}
claimClone := pvc.DeepCopy()
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone)
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
if err != nil {
klog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
return err
@@ -201,7 +201,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error {
claimClone := pvc.DeepCopy()
claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone)
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
if err != nil {
klog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
return err

View File

@@ -11,6 +11,7 @@ go_library(
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
@@ -162,7 +163,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
}
pvClone := pv.DeepCopy()
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone)
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone, metav1.UpdateOptions{})
if err != nil {
klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
return err
@@ -174,7 +175,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error {
pvClone := pv.DeepCopy()
pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone)
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone, metav1.UpdateOptions{})
if err != nil {
klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err)
return err

View File

@@ -424,7 +424,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
// TODO: does it hurt if we make an api call and nothing needs to be updated?
claimKey := claimToClaimKey(binding.pvc)
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv)
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{})
if err != nil {
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
return err
@@ -439,7 +439,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
for i, claim = range claimsToProvision {
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim)
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{})
if err != nil {
return err
}

View File

@@ -295,7 +295,7 @@ func (env *testEnv) initVolumes(cachedPVs []*v1.PersistentVolume, apiPVs []*v1.P
func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, waitCache bool) {
for _, pv := range pvs {
if _, err := env.client.CoreV1().PersistentVolumes().Update(context.TODO(), pv); err != nil {
if _, err := env.client.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil {
t.Fatalf("failed to update PV %q", pv.Name)
}
}
@@ -321,7 +321,7 @@ func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, wait
func (env *testEnv) updateClaims(t *testing.T, pvcs []*v1.PersistentVolumeClaim, waitCache bool) {
for _, pvc := range pvcs {
if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), pvc); err != nil {
if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), pvc, metav1.UpdateOptions{}); err != nil {
t.Fatalf("failed to update PVC %q", getPVCName(pvc))
}
}
@@ -1769,7 +1769,7 @@ func TestBindPodVolumes(t *testing.T) {
newPVC := pvc.DeepCopy()
newPVC.Spec.VolumeName = pv.Name
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil {
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil {
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
}
},
@@ -1786,14 +1786,14 @@ func TestBindPodVolumes(t *testing.T) {
return
}
dynamicPV := makeTestPV("dynamic-pv", "node1", "1G", "1", newPVC, waitClass)
dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(context.TODO(), dynamicPV)
dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(context.TODO(), dynamicPV, metav1.CreateOptions{})
if err != nil {
t.Errorf("failed to create PV %q: %v", dynamicPV.Name, err)
return
}
newPVC.Spec.VolumeName = dynamicPV.Name
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil {
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil {
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
}
},
@@ -1869,7 +1869,7 @@ func TestBindPodVolumes(t *testing.T) {
newPVC := pvcs[0].DeepCopy()
newPVC.Spec.VolumeName = pvNode2.Name
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil {
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil {
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
}
},
@@ -1904,13 +1904,13 @@ func TestBindPodVolumes(t *testing.T) {
// Before Execute
if scenario.apiPV != nil {
_, err := testEnv.client.CoreV1().PersistentVolumes().Update(context.TODO(), scenario.apiPV)
_, err := testEnv.client.CoreV1().PersistentVolumes().Update(context.TODO(), scenario.apiPV, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("failed to update PV %q", scenario.apiPV.Name)
}
}
if scenario.apiPVC != nil {
_, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(context.TODO(), scenario.apiPVC)
_, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(context.TODO(), scenario.apiPVC, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("failed to update PVC %q", getPVCName(scenario.apiPVC))
}