Merge pull request #34759 from caesarxuchao/remove-unversioned

Automatic merge from submit-queue

Remove imports of "pkg/client/unversioned" in "pkg/controller"

We are trying to deprecate "pkg/client/unversioned", see https://github.com/kubernetes/kubernetes/issues/25442.

Also part of https://github.com/kubernetes/kubernetes/issues/29934.
This commit is contained in:
Kubernetes Submit Queue 2016-10-15 00:10:15 -07:00 committed by GitHub
commit 13196527cf
11 changed files with 36 additions and 24 deletions

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package unversioned
package retry
import (
"time"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package unversioned
package retry
import (
"fmt"

View File

@ -0,0 +1,18 @@
{
"Rules": [
{
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned$",
"ForbiddenPrefixes": [
"k8s.io/kubernetes/pkg/client/unversioned"
]
},
{
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned/testclient$",
"ForbiddenPrefixes": [
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
]
}
]
}

View File

@ -20,7 +20,7 @@ import (
"fmt"
"k8s.io/kubernetes/pkg/apis/extensions"
unversionedclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/wait"
)
@ -107,7 +107,7 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
specReplicas := rs.Spec.Replicas
statusReplicas := rs.Status.Replicas
if err := wait.ExponentialBackoff(unversionedclient.DefaultRetry, func() (bool, error) {
if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
replicaSet, err := dc.rsLister.ReplicaSets(rs.Namespace).Get(rs.Name)
if err != nil {
return false, err

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr"
)
@ -51,7 +50,7 @@ func addListPodsReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Cl
func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset {
rsList, ok := obj.(*extensions.ReplicaSetList)
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
name := action.(testclient.GetAction).GetName()
name := action.(core.GetAction).GetName()
if ok {
for _, rs := range rsList.Items {
if rs.Name == name {
@ -67,7 +66,7 @@ func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clien
func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
obj := action.(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
return true, obj, nil
})
return fakeClient
@ -75,7 +74,7 @@ func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset {
fakeClient.AddReactor("update", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(testclient.UpdateAction).GetObject().(*api.Pod)
obj := action.(core.UpdateAction).GetObject().(*api.Pod)
return true, obj, nil
})
return fakeClient

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/rand"
"k8s.io/kubernetes/pkg/util/wait"
@ -615,11 +614,6 @@ func TestSyncJobExpectations(t *testing.T) {
}
}
type FakeWatcher struct {
w *watch.FakeWatcher
*testclient.Fake
}
func TestWatchJobs(t *testing.T) {
clientset := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()

View File

@ -23,11 +23,11 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/apis/extensions"
client "k8s.io/kubernetes/pkg/client/unversioned"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
)
// updateReplicaCount attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry.
func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.ReplicaSet, numReplicas, numFullyLabeledReplicas, numReadyReplicas, numAvailableReplicas int) (updateErr error) {
func updateReplicaCount(rsClient unversionedextensions.ReplicaSetInterface, rs extensions.ReplicaSet, numReplicas, numFullyLabeledReplicas, numReadyReplicas, numAvailableReplicas int) (updateErr error) {
// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
// we do a periodic relist every 30s. If the generations differ but the replicas are
// the same, a caller might've resized to the same replica count.

View File

@ -26,7 +26,7 @@ import (
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientretry "k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/registry/core/secret"
"k8s.io/kubernetes/pkg/runtime"
@ -298,7 +298,7 @@ func (e *TokensController) syncSecret() {
// If the service account exists
if sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, false); saErr == nil && sa != nil {
// secret no longer exists, so delete references to this secret from the service account
if err := client.RetryOnConflict(RemoveTokenBackoff, func() error {
if err := clientretry.RetryOnConflict(RemoveTokenBackoff, func() error {
return e.removeSecretReference(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, secretInfo.name)
}); err != nil {
glog.Error(err)

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/retry"
client "k8s.io/kubernetes/pkg/client/unversioned"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/labels"
@ -766,7 +767,7 @@ func updateRcWithRetries(rcClient coreclient.ReplicationControllersGetter, names
return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err)
}
oldRc := obj.(*api.ReplicationController)
err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc)
if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil {
@ -801,7 +802,7 @@ func updatePodWithRetries(podClient coreclient.PodsGetter, namespace string, pod
return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err)
}
oldPod := obj.(*api.Pod)
err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(pod)
if pod, e = podClient.Pods(namespace).Update(pod); e == nil {

View File

@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
"k8s.io/kubernetes/pkg/registry/core/service"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
@ -75,7 +75,7 @@ func (c *Repair) RunUntil(ch chan struct{}) {
// RunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
return client.RetryOnConflict(client.DefaultBackoff, c.runOnce)
return retry.RetryOnConflict(retry.DefaultBackoff, c.runOnce)
}
// runOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.

View File

@ -22,7 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
"k8s.io/kubernetes/pkg/registry/core/service"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
@ -61,7 +61,7 @@ func (c *Repair) RunUntil(ch chan struct{}) {
// RunOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
return client.RetryOnConflict(client.DefaultBackoff, c.runOnce)
return retry.RetryOnConflict(retry.DefaultBackoff, c.runOnce)
}
// runOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.