generated: run refactor

This commit is contained in:
Mike Danese
2020-02-07 18:16:47 -08:00
parent 7e88d8db66
commit 3aa59f7f30
697 changed files with 4380 additions and 3806 deletions

View File

@@ -21,6 +21,7 @@ limitations under the License.
package deployment
import (
"context"
"fmt"
"reflect"
"time"
@@ -508,7 +509,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment)
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing ReplicaSets (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -588,7 +589,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
if d.Status.ObservedGeneration < d.Generation {
d.Status.ObservedGeneration = d.Generation
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
}
return nil
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package deployment
import (
"context"
"fmt"
"reflect"
"time"
@@ -112,7 +113,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment)
return err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package deployment
import (
"context"
"fmt"
"strconv"
@@ -113,7 +114,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
setRollbackTo(d, nil)
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d)
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d)
return err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package deployment
import (
"context"
"fmt"
"reflect"
"sort"
@@ -97,7 +98,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
}
var err error
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
return err
}
@@ -154,7 +155,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
if annotationsUpdated || minReadySecondsNeedsUpdate {
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy)
}
// Should use the revision in existingNewRS's annotation, since it set by before
@@ -172,7 +173,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
if needsUpdate {
var err error
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d); err != nil {
return nil, err
}
}
@@ -219,7 +220,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
// hash collisions. If there is any other error, we need to report it in the status of
// the Deployment.
alreadyExists := false
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(&newRS)
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS)
switch {
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
case errors.IsAlreadyExists(err):
@@ -251,7 +252,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
*d.Status.CollisionCount++
// Update the collisionCount for the Deployment and let it requeue by returning the original
// error.
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
if dErr == nil {
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
}
@@ -267,7 +268,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
// We don't really care about this error at this point, since we have a bigger issue to report.
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
}
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
return nil, err
@@ -284,7 +285,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
needsUpdate = true
}
if needsUpdate {
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
}
return createdRS, err
}
@@ -419,7 +420,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in
rsCopy := rs.DeepCopy()
*(rsCopy.Spec.Replicas) = newScale
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy)
if err == nil && sizeNeedsUpdate {
scaled = true
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
@@ -457,7 +458,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
continue
}
klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(context.TODO(), rs.Name, nil); err != nil && !errors.IsNotFound(err) {
// Return error instead of aggregating and continuing DELETEs on the theory
// that we may be overloading the api server.
return err
@@ -477,7 +478,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet,
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment)
return err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package util
import (
"context"
"fmt"
"math"
"sort"
@@ -545,7 +546,7 @@ func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface)
// RsListFromClient returns an rsListFunc that wraps the given client.
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
rsList, err := c.ReplicaSets(namespace).List(options)
rsList, err := c.ReplicaSets(namespace).List(context.TODO(), options)
if err != nil {
return nil, err
}