move the retry util out of pkg/client/unversioned

This commit is contained in:
Chao Xu 2016-10-13 11:49:19 -07:00
parent 834de10774
commit fbd187af45
7 changed files with 13 additions and 12 deletions

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package unversioned
package retry
import (
"time"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package unversioned
package retry
import (
"fmt"

View File

@ -20,7 +20,7 @@ import (
"fmt"
"k8s.io/kubernetes/pkg/apis/extensions"
unversionedclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/wait"
)
@ -107,7 +107,7 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
specReplicas := rs.Spec.Replicas
statusReplicas := rs.Status.Replicas
if err := wait.ExponentialBackoff(unversionedclient.DefaultRetry, func() (bool, error) {
if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
replicaSet, err := dc.rsLister.ReplicaSets(rs.Namespace).Get(rs.Name)
if err != nil {
return false, err

View File

@ -26,7 +26,7 @@ import (
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientretry "k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/registry/core/secret"
"k8s.io/kubernetes/pkg/runtime"
@ -298,7 +298,7 @@ func (e *TokensController) syncSecret() {
// If the service account exists
if sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, false); saErr == nil && sa != nil {
// secret no longer exists, so delete references to this secret from the service account
if err := client.RetryOnConflict(RemoveTokenBackoff, func() error {
if err := clientretry.RetryOnConflict(RemoveTokenBackoff, func() error {
return e.removeSecretReference(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, secretInfo.name)
}); err != nil {
glog.Error(err)

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/retry"
client "k8s.io/kubernetes/pkg/client/unversioned"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/labels"
@ -766,7 +767,7 @@ func updateRcWithRetries(rcClient coreclient.ReplicationControllersGetter, names
return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err)
}
oldRc := obj.(*api.ReplicationController)
err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc)
if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil {
@ -801,7 +802,7 @@ func updatePodWithRetries(podClient coreclient.PodsGetter, namespace string, pod
return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err)
}
oldPod := obj.(*api.Pod)
err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(pod)
if pod, e = podClient.Pods(namespace).Update(pod); e == nil {

View File

@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
"k8s.io/kubernetes/pkg/registry/core/service"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
@ -75,7 +75,7 @@ func (c *Repair) RunUntil(ch chan struct{}) {
// RunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
return client.RetryOnConflict(client.DefaultBackoff, c.runOnce)
return retry.RetryOnConflict(retry.DefaultBackoff, c.runOnce)
}
// runOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.

View File

@ -22,7 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
"k8s.io/kubernetes/pkg/registry/core/service"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
@ -61,7 +61,7 @@ func (c *Repair) RunUntil(ch chan struct{}) {
// RunOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
return client.RetryOnConflict(client.DefaultBackoff, c.runOnce)
return retry.RetryOnConflict(retry.DefaultBackoff, c.runOnce)
}
// runOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.