mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 23:37:01 +00:00
Merge pull request #34759 from caesarxuchao/remove-unversioned
Automatic merge from submit-queue Remove imports of "pkg/client/unversioned" in "pkg/controller" We are trying to deprecate "pkg/client/unversioned", see https://github.com/kubernetes/kubernetes/issues/25442. Also part of https://github.com/kubernetes/kubernetes/issues/29934.
This commit is contained in:
commit
13196527cf
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package unversioned
|
package retry
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package unversioned
|
package retry
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
18
pkg/controller/.import-restrictions
Normal file
18
pkg/controller/.import-restrictions
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
{
|
||||||
|
"Rules": [
|
||||||
|
{
|
||||||
|
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned$",
|
||||||
|
"ForbiddenPrefixes": [
|
||||||
|
"k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned/testclient$",
|
||||||
|
"ForbiddenPrefixes": [
|
||||||
|
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
unversionedclient "k8s.io/kubernetes/pkg/client/unversioned"
|
"k8s.io/kubernetes/pkg/client/retry"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
)
|
)
|
||||||
@ -107,7 +107,7 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
|
|||||||
specReplicas := rs.Spec.Replicas
|
specReplicas := rs.Spec.Replicas
|
||||||
statusReplicas := rs.Status.Replicas
|
statusReplicas := rs.Status.Replicas
|
||||||
|
|
||||||
if err := wait.ExponentialBackoff(unversionedclient.DefaultRetry, func() (bool, error) {
|
if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
|
||||||
replicaSet, err := dc.rsLister.ReplicaSets(rs.Namespace).Get(rs.Name)
|
replicaSet, err := dc.rsLister.ReplicaSets(rs.Namespace).Get(rs.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
"k8s.io/kubernetes/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
@ -51,7 +50,7 @@ func addListPodsReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Cl
|
|||||||
func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset {
|
func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset {
|
||||||
rsList, ok := obj.(*extensions.ReplicaSetList)
|
rsList, ok := obj.(*extensions.ReplicaSetList)
|
||||||
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
name := action.(testclient.GetAction).GetName()
|
name := action.(core.GetAction).GetName()
|
||||||
if ok {
|
if ok {
|
||||||
for _, rs := range rsList.Items {
|
for _, rs := range rsList.Items {
|
||||||
if rs.Name == name {
|
if rs.Name == name {
|
||||||
@ -67,7 +66,7 @@ func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clien
|
|||||||
|
|
||||||
func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
||||||
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
obj := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
obj := action.(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||||
return true, obj, nil
|
return true, obj, nil
|
||||||
})
|
})
|
||||||
return fakeClient
|
return fakeClient
|
||||||
@ -75,7 +74,7 @@ func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
|||||||
|
|
||||||
func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
||||||
fakeClient.AddReactor("update", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
fakeClient.AddReactor("update", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
obj := action.(testclient.UpdateAction).GetObject().(*api.Pod)
|
obj := action.(core.UpdateAction).GetObject().(*api.Pod)
|
||||||
return true, obj, nil
|
return true, obj, nil
|
||||||
})
|
})
|
||||||
return fakeClient
|
return fakeClient
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/util/rand"
|
"k8s.io/kubernetes/pkg/util/rand"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
@ -615,11 +614,6 @@ func TestSyncJobExpectations(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type FakeWatcher struct {
|
|
||||||
w *watch.FakeWatcher
|
|
||||||
*testclient.Fake
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWatchJobs(t *testing.T) {
|
func TestWatchJobs(t *testing.T) {
|
||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewSimpleClientset()
|
||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
|
@ -23,11 +23,11 @@ import (
|
|||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
|
||||||
)
|
)
|
||||||
|
|
||||||
// updateReplicaCount attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry.
|
// updateReplicaCount attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry.
|
||||||
func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.ReplicaSet, numReplicas, numFullyLabeledReplicas, numReadyReplicas, numAvailableReplicas int) (updateErr error) {
|
func updateReplicaCount(rsClient unversionedextensions.ReplicaSetInterface, rs extensions.ReplicaSet, numReplicas, numFullyLabeledReplicas, numReadyReplicas, numAvailableReplicas int) (updateErr error) {
|
||||||
// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
|
// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
|
||||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||||
// the same, a caller might've resized to the same replica count.
|
// the same, a caller might've resized to the same replica count.
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
clientretry "k8s.io/kubernetes/pkg/client/retry"
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/registry/core/secret"
|
"k8s.io/kubernetes/pkg/registry/core/secret"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
@ -298,7 +298,7 @@ func (e *TokensController) syncSecret() {
|
|||||||
// If the service account exists
|
// If the service account exists
|
||||||
if sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, false); saErr == nil && sa != nil {
|
if sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, false); saErr == nil && sa != nil {
|
||||||
// secret no longer exists, so delete references to this secret from the service account
|
// secret no longer exists, so delete references to this secret from the service account
|
||||||
if err := client.RetryOnConflict(RemoveTokenBackoff, func() error {
|
if err := clientretry.RetryOnConflict(RemoveTokenBackoff, func() error {
|
||||||
return e.removeSecretReference(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, secretInfo.name)
|
return e.removeSecretReference(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, secretInfo.name)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
glog.Error(err)
|
glog.Error(err)
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/client/retry"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
@ -766,7 +767,7 @@ func updateRcWithRetries(rcClient coreclient.ReplicationControllersGetter, names
|
|||||||
return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err)
|
return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err)
|
||||||
}
|
}
|
||||||
oldRc := obj.(*api.ReplicationController)
|
oldRc := obj.(*api.ReplicationController)
|
||||||
err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
|
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
|
||||||
// Apply the update, then attempt to push it to the apiserver.
|
// Apply the update, then attempt to push it to the apiserver.
|
||||||
applyUpdate(rc)
|
applyUpdate(rc)
|
||||||
if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil {
|
if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil {
|
||||||
@ -801,7 +802,7 @@ func updatePodWithRetries(podClient coreclient.PodsGetter, namespace string, pod
|
|||||||
return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err)
|
return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err)
|
||||||
}
|
}
|
||||||
oldPod := obj.(*api.Pod)
|
oldPod := obj.(*api.Pod)
|
||||||
err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
|
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) {
|
||||||
// Apply the update, then attempt to push it to the apiserver.
|
// Apply the update, then attempt to push it to the apiserver.
|
||||||
applyUpdate(pod)
|
applyUpdate(pod)
|
||||||
if pod, e = podClient.Pods(namespace).Update(pod); e == nil {
|
if pod, e = podClient.Pods(namespace).Update(pod); e == nil {
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
"k8s.io/kubernetes/pkg/client/retry"
|
||||||
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
|
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
|
||||||
"k8s.io/kubernetes/pkg/registry/core/service"
|
"k8s.io/kubernetes/pkg/registry/core/service"
|
||||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||||
@ -75,7 +75,7 @@ func (c *Repair) RunUntil(ch chan struct{}) {
|
|||||||
|
|
||||||
// RunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.
|
// RunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.
|
||||||
func (c *Repair) RunOnce() error {
|
func (c *Repair) RunOnce() error {
|
||||||
return client.RetryOnConflict(client.DefaultBackoff, c.runOnce)
|
return retry.RetryOnConflict(retry.DefaultBackoff, c.runOnce)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.
|
// runOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs.
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
"k8s.io/kubernetes/pkg/client/retry"
|
||||||
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
|
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
|
||||||
"k8s.io/kubernetes/pkg/registry/core/service"
|
"k8s.io/kubernetes/pkg/registry/core/service"
|
||||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||||
@ -61,7 +61,7 @@ func (c *Repair) RunUntil(ch chan struct{}) {
|
|||||||
|
|
||||||
// RunOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
|
// RunOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
|
||||||
func (c *Repair) RunOnce() error {
|
func (c *Repair) RunOnce() error {
|
||||||
return client.RetryOnConflict(client.DefaultBackoff, c.runOnce)
|
return retry.RetryOnConflict(retry.DefaultBackoff, c.runOnce)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
|
// runOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
|
||||||
|
Loading…
Reference in New Issue
Block a user