Merge pull request #49763 from supereagle/versioned-group-clients

Automatic merge from submit-queue (batch tested with PRs 55331, 55272, 55228, 49763, 55242). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

use versiond group clients from client-go

**What this PR does / why we need it**:
Some **Deprecated** group clients are still used, replace them with versioned group clients.

**Which issue this PR fixes**: fixes #49760

**Special notes for your reviewer**:
/assign @caesarxuchao

**Release note**:
```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue
2017-11-08 17:13:27 -08:00
committed by GitHub
37 changed files with 163 additions and 163 deletions

View File

@@ -106,7 +106,7 @@ func (a *sarApprover) handle(csr *capi.CertificateSigningRequest) error {
}
if approved {
appendApprovalCondition(csr, r.successMessage)
_, err = a.client.Certificates().CertificateSigningRequests().UpdateApproval(csr)
_, err = a.client.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(csr)
if err != nil {
return fmt.Errorf("error updating approval for csr: %v", err)
}

View File

@@ -103,7 +103,7 @@ func (s *cfsslSigner) handle(csr *capi.CertificateSigningRequest) error {
if err != nil {
return fmt.Errorf("error auto signing csr: %v", err)
}
_, err = s.client.Certificates().CertificateSigningRequests().UpdateStatus(csr)
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(csr)
if err != nil {
return fmt.Errorf("error updating signature for csr: %v", err)
}

View File

@@ -410,7 +410,7 @@ type RealRSControl struct {
var _ RSControlInterface = &RealRSControl{}
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
_, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
_, err := r.KubeClient.ExtensionsV1beta1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
return err
}

View File

@@ -1071,7 +1071,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet,
}
numberUnavailable := desiredNumberScheduled - numberAvailable
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
err = storeDaemonSetStatus(dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
if err != nil {
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
}

View File

@@ -587,7 +587,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
if d.Status.ObservedGeneration < d.Generation {
d.Status.ObservedGeneration = d.Generation
dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
}
return nil
}

View File

@@ -112,7 +112,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
_, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
return err
}

View File

@@ -112,6 +112,6 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *extensions.Deployment
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *extensions.Deployment) error {
glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
d.Spec.RollbackTo = nil
_, err := dc.client.Extensions().Deployments(d.Namespace).Update(d)
_, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Update(d)
return err
}

View File

@@ -100,7 +100,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
}
var err error
d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
return err
}
@@ -167,7 +167,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet,
return nil, err
}
// 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label.
updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name,
updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name,
func(updated *extensions.ReplicaSet) error {
// Precondition: the RS doesn't contain the new hash in its pod template label.
if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
@@ -207,7 +207,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet,
// 3. Update rs label and selector to include the new hash label
// Copy the old selector, so that we can scrub out any orphaned pods
updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error {
updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error {
// Precondition: the RS doesn't contain the new hash in its label and selector.
if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
return utilerrors.ErrPreconditionViolated
@@ -251,7 +251,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
if annotationsUpdated || minReadySecondsNeedsUpdate {
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
return dc.client.Extensions().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
return dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
}
// Should use the revision in existingNewRS's annotation, since it set by before
@@ -269,7 +269,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
if needsUpdate {
var err error
if d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d); err != nil {
if d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
return nil, err
}
}
@@ -315,7 +315,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
// hash collisions. If there is any other error, we need to report it in the status of
// the Deployment.
alreadyExists := false
createdRS, err := dc.client.Extensions().ReplicaSets(d.Namespace).Create(&newRS)
createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS)
switch {
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
// Fetch a copy of the ReplicaSet. If its PodTemplateSpec is semantically deep equal
@@ -338,7 +338,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
*d.Status.CollisionCount++
// Update the collisionCount for the Deployment and let it requeue by returning the original
// error.
_, dErr := dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
_, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
if dErr == nil {
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
}
@@ -355,7 +355,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
// We don't really care about this error at this point, since we have a bigger issue to report.
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
_, _ = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
_, _ = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
}
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
return nil, err
@@ -372,7 +372,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
needsUpdate = true
}
if needsUpdate {
_, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
_, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
}
return createdRS, err
}
@@ -508,7 +508,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
var err error
if sizeNeedsUpdate || annotationsNeedUpdate {
*(rsCopy.Spec.Replicas) = newScale
rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
rs, err = dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
if err == nil && sizeNeedsUpdate {
scaled = true
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
@@ -546,7 +546,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
continue
}
glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
if err := dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
// Return error instead of aggregating and continuing DELETEs on the theory
// that we may be overloading the api server.
return err
@@ -566,7 +566,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.Replic
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
_, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
return err
}

View File

@@ -728,7 +728,7 @@ func refresh(pdbClient policyclientset.PodDisruptionBudgetInterface, pdb *policy
}
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
pdbClient := dc.kubeClient.Policy().PodDisruptionBudgets(pdb.Namespace)
pdbClient := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace)
st := pdb.Status
var err error

View File

@@ -770,7 +770,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
}
func (jm *JobController) updateJobStatus(job *batch.Job) error {
_, err := jm.kubeClient.Batch().Jobs(job.Namespace).UpdateStatus(job)
_, err := jm.kubeClient.BatchV1().Jobs(job.Namespace).UpdateStatus(job)
return err
}

View File

@@ -599,7 +599,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)
// Always updates status as pods come up or die.
updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.Extensions().ReplicaSets(rs.Namespace), rs, newStatus)
updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace), rs, newStatus)
if err != nil {
// Multiple things could lead to this update failing. Requeuing the replica set ensures
// Returning an error causes a requeue without forcing a hotloop