From 8c4341de4e88095959cb08682f1b00895bb7f191 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Mon, 19 Mar 2018 13:18:23 -0700 Subject: [PATCH] ReplicaSet: Use apps/v1 for RS controller. --- pkg/controller/replicaset/replica_set.go | 34 ++-- pkg/controller/replicaset/replica_set_test.go | 158 +++++++++--------- .../replicaset/replica_set_utils.go | 30 ++-- .../replicaset/replica_set_utils_test.go | 40 ++--- 4 files changed, 131 insertions(+), 131 deletions(-) diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index 7c7ee2b778d..8139ce25889 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -36,21 +36,21 @@ import ( "time" "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + appsinformers "k8s.io/client-go/informers/apps/v1" coreinformers "k8s.io/client-go/informers/core/v1" - extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + appslisters "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/integer" @@ -90,7 +90,7 @@ type ReplicaSetController struct { expectations *controller.UIDTrackingControllerExpectations // A store of ReplicaSets, populated by the shared informer passed to NewReplicaSetController - rsLister extensionslisters.ReplicaSetLister + rsLister appslisters.ReplicaSetLister // rsListerSynced returns true if the pod store has been synced at least once. // Added as a member to the struct to allow injection for testing. rsListerSynced cache.InformerSynced @@ -106,12 +106,12 @@ type ReplicaSetController struct { } // NewReplicaSetController configures a replica set controller with the specified event recorder -func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController { +func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) return NewBaseController(rsInformer, podInformer, kubeClient, burstReplicas, - extensions.SchemeGroupVersion.WithKind("ReplicaSet"), + apps.SchemeGroupVersion.WithKind("ReplicaSet"), "replicaset_controller", "replicaset", controller.RealPodControl{ @@ -123,7 +123,7 @@ func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, // NewBaseController is the implementation of NewReplicaSetController with additional injected // parameters so that it can also serve as the implementation of NewReplicationController. -func NewBaseController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int, +func NewBaseController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int, gvk schema.GroupVersionKind, metricOwnerName, queueName string, podControl controller.PodControlInterface) *ReplicaSetController { if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage(metricOwnerName, kubeClient.CoreV1().RESTClient().GetRateLimiter()) @@ -194,7 +194,7 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) { } // getPodReplicaSets returns a list of ReplicaSets matching the given pod. -func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*extensions.ReplicaSet { +func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*apps.ReplicaSet { rss, err := rsc.rsLister.GetPodReplicaSets(pod) if err != nil { return nil @@ -210,7 +210,7 @@ func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*extensions.Re // resolveControllerRef returns the controller referenced by a ControllerRef, // or nil if the ControllerRef could not be resolved to a matching controller // of the correct Kind. -func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.ReplicaSet { +func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.ReplicaSet { // We can't look up by UID, so look up by Name and then verify UID. // Don't even try to look up by Name if it's the wrong Kind. if controllerRef.Kind != rsc.Kind { @@ -230,8 +230,8 @@ func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controll // callback when RS is updated func (rsc *ReplicaSetController) updateRS(old, cur interface{}) { - oldRS := old.(*extensions.ReplicaSet) - curRS := cur.(*extensions.ReplicaSet) + oldRS := old.(*apps.ReplicaSet) + curRS := cur.(*apps.ReplicaSet) // You might imagine that we only really need to enqueue the // replica set when Spec changes, but it is safer to sync any @@ -407,7 +407,7 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) { rsc.enqueueReplicaSet(rs) } -// obj could be an *extensions.ReplicaSet, or a DeletionFinalStateUnknown marker item. +// obj could be an *apps.ReplicaSet, or a DeletionFinalStateUnknown marker item. func (rsc *ReplicaSetController) enqueueReplicaSet(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { @@ -417,7 +417,7 @@ func (rsc *ReplicaSetController) enqueueReplicaSet(obj interface{}) { rsc.queue.Add(key) } -// obj could be an *extensions.ReplicaSet, or a DeletionFinalStateUnknown marker item. +// obj could be an *apps.ReplicaSet, or a DeletionFinalStateUnknown marker item. func (rsc *ReplicaSetController) enqueueReplicaSetAfter(obj interface{}, after time.Duration) { key, err := controller.KeyFunc(obj) if err != nil { @@ -456,7 +456,7 @@ func (rsc *ReplicaSetController) processNextWorkItem() bool { // manageReplicas checks and updates replicas for the given ReplicaSet. // Does NOT modify . // It will requeue the replica set in case of an error while creating/deleting pods. -func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *extensions.ReplicaSet) error { +func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps.ReplicaSet) error { diff := len(filteredPods) - int(*(rs.Spec.Replicas)) rsKey, err := controller.KeyFunc(rs) if err != nil { @@ -626,7 +626,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { newStatus := calculateStatus(rs, filteredPods, manageReplicasErr) // Always updates status as pods come up or die. - updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace), rs, newStatus) + updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace), rs, newStatus) if err != nil { // Multiple things could lead to this update failing. Requeuing the replica set ensures // Returning an error causes a requeue without forcing a hotloop @@ -641,11 +641,11 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { return manageReplicasErr } -func (rsc *ReplicaSetController) claimPods(rs *extensions.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) { +func (rsc *ReplicaSetController) claimPods(rs *apps.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) { // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing Pods (see #42639). canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) + fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index 6dc135676ba..4564df7807f 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -28,8 +28,8 @@ import ( "testing" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -54,7 +54,7 @@ func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh ch informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) ret := NewReplicaSetController( - informers.Extensions().V1beta1().ReplicaSets(), + informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), client, burstReplicas, @@ -78,7 +78,7 @@ func skipListerFunc(verb string, url url.URL) bool { var alwaysReady = func() bool { return true } -func getKey(rs *extensions.ReplicaSet, t *testing.T) string { +func getKey(rs *apps.ReplicaSet, t *testing.T) string { if key, err := controller.KeyFunc(rs); err != nil { t.Errorf("Unexpected error getting key for ReplicaSet %v: %v", rs.Name, err) return "" @@ -87,8 +87,8 @@ func getKey(rs *extensions.ReplicaSet, t *testing.T) string { } } -func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.ReplicaSet { - rs := &extensions.ReplicaSet{ +func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet { + rs := &apps.ReplicaSet{ TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), @@ -96,7 +96,7 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl Namespace: metav1.NamespaceDefault, ResourceVersion: "18", }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: func() *int32 { i := int32(replicas); return &i }(), Selector: &metav1.LabelSelector{MatchLabels: selectorMap}, Template: v1.PodTemplateSpec{ @@ -128,7 +128,7 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl } // create a pod with the given phase for the given rs (same selectors and namespace) -func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod { +func newPod(name string, rs *apps.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod { var conditions []v1.PodCondition if status == v1.PodRunning { condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} @@ -154,7 +154,7 @@ func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTran } // create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store. -func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *v1.PodList { +func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *apps.ReplicaSet, name string) *v1.PodList { pods := []v1.Pod{} var trueVar = true controllerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar} @@ -212,7 +212,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) { // 2 running pods, a controller with 2 replicas, sync is a no-op labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod") manager.podControl = &fakePodControl @@ -238,7 +238,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) { // the controller matching the selectors of the deleted pod into the work queue. labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(1, labelMap) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod") manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) @@ -268,7 +268,7 @@ func TestSyncReplicaSetCreateFailures(t *testing.T) { defer close(stopCh) manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) manager.podControl = &fakePodControl manager.syncReplicaSet(getKey(rs, t)) @@ -303,7 +303,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) { labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rsSpec, "pod") // Creates a replica and sets expectations @@ -355,23 +355,23 @@ func TestPodControllerLookup(t *testing.T) { defer close(stopCh) manager, informers := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), stopCh, BurstReplicas) testCases := []struct { - inRSs []*extensions.ReplicaSet + inRSs []*apps.ReplicaSet pod *v1.Pod outRSName string }{ // pods without labels don't match any ReplicaSets { - inRSs: []*extensions.ReplicaSet{ + inRSs: []*apps.ReplicaSet{ {ObjectMeta: metav1.ObjectMeta{Name: "basic"}}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll}}, outRSName: "", }, // Matching labels, not namespace { - inRSs: []*extensions.ReplicaSet{ + inRSs: []*apps.ReplicaSet{ { ObjectMeta: metav1.ObjectMeta{Name: "foo"}, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -383,10 +383,10 @@ func TestPodControllerLookup(t *testing.T) { }, // Matching ns and labels returns the key to the ReplicaSet, not the ReplicaSet name { - inRSs: []*extensions.ReplicaSet{ + inRSs: []*apps.ReplicaSet{ { ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns"}, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -399,7 +399,7 @@ func TestPodControllerLookup(t *testing.T) { } for _, c := range testCases { for _, r := range c.inRSs { - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(r) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(r) } if rss := manager.getPodReplicaSets(c.pod); rss != nil { if len(rss) != 1 { @@ -424,25 +424,25 @@ func TestWatchControllers(t *testing.T) { defer close(stopCh) informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) manager := NewReplicaSetController( - informers.Extensions().V1beta1().ReplicaSets(), + informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), client, BurstReplicas, ) informers.Start(stopCh) - var testRSSpec extensions.ReplicaSet + var testRSSpec apps.ReplicaSet received := make(chan string) // The update sent through the fakeWatcher should make its way into the workqueue, // and eventually into the syncHandler. The handler validates the received controller // and closes the received channel to indicate that the test can finish. manager.syncHandler = func(key string) error { - obj, exists, err := informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().GetByKey(key) + obj, exists, err := informers.Apps().V1().ReplicaSets().Informer().GetIndexer().GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find replica set under key %v", key) } - rsSpec := *obj.(*extensions.ReplicaSet) + rsSpec := *obj.(*apps.ReplicaSet) if !apiequality.Semantic.DeepDerivative(rsSpec, testRSSpec) { t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec) } @@ -477,7 +477,7 @@ func TestWatchPods(t *testing.T) { // Put one ReplicaSet into the shared informer labelMap := map[string]string{"foo": "bar"} testRSSpec := newReplicaSet(1, labelMap) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec) received := make(chan string) // The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and @@ -540,12 +540,12 @@ func TestUpdatePods(t *testing.T) { // Put 2 ReplicaSets and one pod into the informers labelMap1 := map[string]string{"foo": "bar"} testRSSpec1 := newReplicaSet(1, labelMap1) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec1) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec1) testRSSpec2 := *testRSSpec1 labelMap2 := map[string]string{"bar": "foo"} testRSSpec2.Spec.Selector = &metav1.LabelSelector{MatchLabels: labelMap2} testRSSpec2.Name = "barfoo" - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&testRSSpec2) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(&testRSSpec2) isController := true controllerRef1 := metav1.OwnerReference{UID: testRSSpec1.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: testRSSpec1.Name, Controller: &isController} @@ -656,8 +656,8 @@ func TestControllerUpdateRequeue(t *testing.T) { defer close(stopCh) manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) - rs.Status = extensions.ReplicaSetStatus{Replicas: 2} + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) + rs.Status = apps.ReplicaSetStatus{Replicas: 2} newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rs, "pod") fakePodControl := controller.FakePodControl{} @@ -678,11 +678,11 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { fakeClient := &fake.Clientset{} fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil }) fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { - return true, &extensions.ReplicaSet{}, fmt.Errorf("Fake error") + return true, &apps.ReplicaSet{}, fmt.Errorf("Fake error") }) - fakeRSClient := fakeClient.Extensions().ReplicaSets("default") + fakeRSClient := fakeClient.Apps().ReplicaSets("default") numReplicas := int32(10) - newStatus := extensions.ReplicaSetStatus{Replicas: numReplicas} + newStatus := apps.ReplicaSetStatus{Replicas: numReplicas} updateReplicaSetStatus(fakeRSClient, rs, newStatus) updates, gets := 0, 0 for _, a := range fakeClient.Actions() { @@ -702,7 +702,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { updates++ // Confirm that the update has the right status.Replicas even though the Get // returned a ReplicaSet with replicas=1. - if c, ok := action.GetObject().(*extensions.ReplicaSet); !ok { + if c, ok := action.GetObject().(*apps.ReplicaSet); !ok { t.Errorf("Expected a ReplicaSet as the argument to update, got %T", c) } else if c.Status.Replicas != numReplicas { t.Errorf("Expected update for ReplicaSet to contain replicas %v, got %v instead", @@ -729,7 +729,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, burstReplicas) manager.podControl = &fakePodControl - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) expectedPods := int32(0) pods := newPodList(nil, numReplicas, v1.PodPending, labelMap, rsSpec, "pod") @@ -743,7 +743,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) for _, replicas := range []int32{int32(numReplicas), 0} { *(rsSpec.Spec.Replicas) = replicas - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) for i := 0; i < numReplicas; i += burstReplicas { manager.syncReplicaSet(getKey(rsSpec, t)) @@ -890,7 +890,7 @@ func TestRSSyncExpectations(t *testing.T) { labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod") informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[0]) postExpectationsPod := pods.Items[1] @@ -914,7 +914,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { defer close(stopCh) manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 10) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl @@ -936,7 +936,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { if !exists || err != nil { t.Errorf("No expectations found for ReplicaSet") } - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Delete(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs) manager.syncReplicaSet(getKey(rs, t)) if _, exists, err = manager.expectations.GetExpectations(rsKey); exists { @@ -951,10 +951,10 @@ func TestDeleteControllerAndExpectations(t *testing.T) { } // shuffle returns a new shuffled list of container controllers. -func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet { +func shuffle(controllers []*apps.ReplicaSet) []*apps.ReplicaSet { numControllers := len(controllers) randIndexes := rand.Perm(numControllers) - shuffled := make([]*extensions.ReplicaSet, numControllers) + shuffled := make([]*apps.ReplicaSet, numControllers) for i := 0; i < numControllers; i++ { shuffled[i] = controllers[randIndexes[i]] } @@ -974,7 +974,7 @@ func TestOverlappingRSs(t *testing.T) { // All use the same CreationTimestamp since ControllerRef should be able // to handle that. timestamp := metav1.Date(2014, time.December, 0, 0, 0, 0, 0, time.Local) - var controllers []*extensions.ReplicaSet + var controllers []*apps.ReplicaSet for j := 1; j < 10; j++ { rsSpec := newReplicaSet(1, labelMap) rsSpec.CreationTimestamp = timestamp @@ -983,7 +983,7 @@ func TestOverlappingRSs(t *testing.T) { } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(shuffledControllers[j]) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(shuffledControllers[j]) } // Add a pod with a ControllerRef and make sure only the corresponding // ReplicaSet is synced. Pick a RS in the middle since the old code used to @@ -1012,7 +1012,7 @@ func TestDeletionTimestamp(t *testing.T) { manager, informers := testNewReplicaSetControllerFromClient(c, stopCh, 10) rs := newReplicaSet(1, labelMap) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) rsKey, err := controller.KeyFunc(rs) if err != nil { t.Errorf("Couldn't get key for object %#v: %v", rs, err) @@ -1116,7 +1116,7 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) var trueVar = true otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar} // add to podLister a matching Pod controlled by another controller. Expect no patch. @@ -1137,7 +1137,7 @@ func TestPatchPodFails(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) // add to podLister two matching pods. Expect two patches to take control // them. informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil, false)) @@ -1169,7 +1169,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) pod1 := newPod("pod1", rs, v1.PodRunning, nil, false) informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1) @@ -1193,7 +1193,7 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) { // Lister (cache) says it's NOT deleted. rs2 := *rs rs2.DeletionTimestamp = nil - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&rs2) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(&rs2) // Recheck occurs if a matching orphan is present. pod1 := newPod("pod1", rs, v1.PodRunning, nil, false) @@ -1209,35 +1209,35 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) { } var ( - imagePullBackOff extensions.ReplicaSetConditionType = "ImagePullBackOff" + imagePullBackOff apps.ReplicaSetConditionType = "ImagePullBackOff" - condImagePullBackOff = func() extensions.ReplicaSetCondition { - return extensions.ReplicaSetCondition{ + condImagePullBackOff = func() apps.ReplicaSetCondition { + return apps.ReplicaSetCondition{ Type: imagePullBackOff, Status: v1.ConditionTrue, Reason: "NonExistentImage", } } - condReplicaFailure = func() extensions.ReplicaSetCondition { - return extensions.ReplicaSetCondition{ - Type: extensions.ReplicaSetReplicaFailure, + condReplicaFailure = func() apps.ReplicaSetCondition { + return apps.ReplicaSetCondition{ + Type: apps.ReplicaSetReplicaFailure, Status: v1.ConditionTrue, Reason: "OtherFailure", } } - condReplicaFailure2 = func() extensions.ReplicaSetCondition { - return extensions.ReplicaSetCondition{ - Type: extensions.ReplicaSetReplicaFailure, + condReplicaFailure2 = func() apps.ReplicaSetCondition { + return apps.ReplicaSetCondition{ + Type: apps.ReplicaSetReplicaFailure, Status: v1.ConditionTrue, Reason: "AnotherFailure", } } - status = func() *extensions.ReplicaSetStatus { - return &extensions.ReplicaSetStatus{ - Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}, + status = func() *apps.ReplicaSetStatus { + return &apps.ReplicaSetStatus{ + Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}, } } ) @@ -1248,8 +1248,8 @@ func TestGetCondition(t *testing.T) { tests := []struct { name string - status extensions.ReplicaSetStatus - condType extensions.ReplicaSetConditionType + status apps.ReplicaSetStatus + condType apps.ReplicaSetConditionType condStatus v1.ConditionStatus condReason string @@ -1259,7 +1259,7 @@ func TestGetCondition(t *testing.T) { name: "condition exists", status: *exampleStatus, - condType: extensions.ReplicaSetReplicaFailure, + condType: apps.ReplicaSetReplicaFailure, expected: true, }, @@ -1286,34 +1286,34 @@ func TestSetCondition(t *testing.T) { tests := []struct { name string - status *extensions.ReplicaSetStatus - cond extensions.ReplicaSetCondition + status *apps.ReplicaSetStatus + cond apps.ReplicaSetCondition - expectedStatus *extensions.ReplicaSetStatus + expectedStatus *apps.ReplicaSetStatus }{ { name: "set for the first time", - status: &extensions.ReplicaSetStatus{}, + status: &apps.ReplicaSetStatus{}, cond: condReplicaFailure(), - expectedStatus: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}}, + expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}}, }, { name: "simple set", - status: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condImagePullBackOff()}}, + status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condImagePullBackOff()}}, cond: condReplicaFailure(), - expectedStatus: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condImagePullBackOff(), condReplicaFailure()}}, + expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condImagePullBackOff(), condReplicaFailure()}}, }, { name: "overwrite", - status: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}}, + status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}}, cond: condReplicaFailure2(), - expectedStatus: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure2()}}, + expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure2()}}, }, } @@ -1329,26 +1329,26 @@ func TestRemoveCondition(t *testing.T) { tests := []struct { name string - status *extensions.ReplicaSetStatus - condType extensions.ReplicaSetConditionType + status *apps.ReplicaSetStatus + condType apps.ReplicaSetConditionType - expectedStatus *extensions.ReplicaSetStatus + expectedStatus *apps.ReplicaSetStatus }{ { name: "remove from empty status", - status: &extensions.ReplicaSetStatus{}, - condType: extensions.ReplicaSetReplicaFailure, + status: &apps.ReplicaSetStatus{}, + condType: apps.ReplicaSetReplicaFailure, - expectedStatus: &extensions.ReplicaSetStatus{}, + expectedStatus: &apps.ReplicaSetStatus{}, }, { name: "simple remove", - status: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}}, - condType: extensions.ReplicaSetReplicaFailure, + status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}}, + condType: apps.ReplicaSetReplicaFailure, - expectedStatus: &extensions.ReplicaSetStatus{}, + expectedStatus: &apps.ReplicaSetStatus{}, }, { name: "doesn't remove anything", diff --git a/pkg/controller/replicaset/replica_set_utils.go b/pkg/controller/replicaset/replica_set_utils.go index ad628da9551..de915e522a3 100644 --- a/pkg/controller/replicaset/replica_set_utils.go +++ b/pkg/controller/replicaset/replica_set_utils.go @@ -24,16 +24,16 @@ import ( "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) // updateReplicaSetStatus attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry. -func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *extensions.ReplicaSet, newStatus extensions.ReplicaSetStatus) (*extensions.ReplicaSet, error) { +func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSet, newStatus apps.ReplicaSetStatus) (*apps.ReplicaSet, error) { // This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since // we do a periodic relist every 30s. If the generations differ but the replicas are // the same, a caller might've resized to the same replica count. @@ -53,7 +53,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *ext newStatus.ObservedGeneration = rs.Generation var getErr, updateErr error - var updatedRS *extensions.ReplicaSet + var updatedRS *apps.ReplicaSet for i, rs := 0, rs; ; i++ { glog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) + fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) + @@ -82,7 +82,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *ext return nil, updateErr } -func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus { +func calculateStatus(rs *apps.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) apps.ReplicaSetStatus { newStatus := rs.Status // Count the number of pods that have labels matching the labels of the pod // template of the replica set, the matching pods may have more @@ -105,7 +105,7 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe } } - failureCond := GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure) + failureCond := GetCondition(rs.Status, apps.ReplicaSetReplicaFailure) if manageReplicasErr != nil && failureCond == nil { var reason string if diff := len(filteredPods) - int(*(rs.Spec.Replicas)); diff < 0 { @@ -113,10 +113,10 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe } else if diff > 0 { reason = "FailedDelete" } - cond := NewReplicaSetCondition(extensions.ReplicaSetReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error()) + cond := NewReplicaSetCondition(apps.ReplicaSetReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error()) SetCondition(&newStatus, cond) } else if manageReplicasErr == nil && failureCond != nil { - RemoveCondition(&newStatus, extensions.ReplicaSetReplicaFailure) + RemoveCondition(&newStatus, apps.ReplicaSetReplicaFailure) } newStatus.Replicas = int32(len(filteredPods)) @@ -127,8 +127,8 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe } // NewReplicaSetCondition creates a new replicaset condition. -func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status v1.ConditionStatus, reason, msg string) extensions.ReplicaSetCondition { - return extensions.ReplicaSetCondition{ +func NewReplicaSetCondition(condType apps.ReplicaSetConditionType, status v1.ConditionStatus, reason, msg string) apps.ReplicaSetCondition { + return apps.ReplicaSetCondition{ Type: condType, Status: status, LastTransitionTime: metav1.Now(), @@ -138,7 +138,7 @@ func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status } // GetCondition returns a replicaset condition with the provided type if it exists. -func GetCondition(status extensions.ReplicaSetStatus, condType extensions.ReplicaSetConditionType) *extensions.ReplicaSetCondition { +func GetCondition(status apps.ReplicaSetStatus, condType apps.ReplicaSetConditionType) *apps.ReplicaSetCondition { for _, c := range status.Conditions { if c.Type == condType { return &c @@ -149,7 +149,7 @@ func GetCondition(status extensions.ReplicaSetStatus, condType extensions.Replic // SetCondition adds/replaces the given condition in the replicaset status. If the condition that we // are about to add already exists and has the same status and reason then we are not going to update. -func SetCondition(status *extensions.ReplicaSetStatus, condition extensions.ReplicaSetCondition) { +func SetCondition(status *apps.ReplicaSetStatus, condition apps.ReplicaSetCondition) { currentCond := GetCondition(*status, condition.Type) if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { return @@ -159,13 +159,13 @@ func SetCondition(status *extensions.ReplicaSetStatus, condition extensions.Repl } // RemoveCondition removes the condition with the provided type from the replicaset status. -func RemoveCondition(status *extensions.ReplicaSetStatus, condType extensions.ReplicaSetConditionType) { +func RemoveCondition(status *apps.ReplicaSetStatus, condType apps.ReplicaSetConditionType) { status.Conditions = filterOutCondition(status.Conditions, condType) } // filterOutCondition returns a new slice of replicaset conditions without conditions with the provided type. -func filterOutCondition(conditions []extensions.ReplicaSetCondition, condType extensions.ReplicaSetConditionType) []extensions.ReplicaSetCondition { - var newConditions []extensions.ReplicaSetCondition +func filterOutCondition(conditions []apps.ReplicaSetCondition, condType apps.ReplicaSetConditionType) []apps.ReplicaSetCondition { + var newConditions []apps.ReplicaSetCondition for _, c := range conditions { if c.Type == condType { continue diff --git a/pkg/controller/replicaset/replica_set_utils_test.go b/pkg/controller/replicaset/replica_set_utils_test.go index fbde4d0b40a..6f65315e77f 100644 --- a/pkg/controller/replicaset/replica_set_utils_test.go +++ b/pkg/controller/replicaset/replica_set_utils_test.go @@ -23,8 +23,8 @@ import ( "reflect" "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" ) func TestCalculateStatus(t *testing.T) { @@ -38,9 +38,9 @@ func TestCalculateStatus(t *testing.T) { rsStatusTests := []struct { name string - replicaset *extensions.ReplicaSet + replicaset *apps.ReplicaSet filteredPods []*v1.Pod - expectedReplicaSetStatus extensions.ReplicaSetStatus + expectedReplicaSetStatus apps.ReplicaSetStatus }{ { "1 fully labelled pod", @@ -48,7 +48,7 @@ func TestCalculateStatus(t *testing.T) { []*v1.Pod{ newPod("pod1", fullyLabelledRS, v1.PodRunning, nil, true), }, - extensions.ReplicaSetStatus{ + apps.ReplicaSetStatus{ Replicas: 1, FullyLabeledReplicas: 1, ReadyReplicas: 1, @@ -61,7 +61,7 @@ func TestCalculateStatus(t *testing.T) { []*v1.Pod{ newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true), }, - extensions.ReplicaSetStatus{ + apps.ReplicaSetStatus{ Replicas: 1, FullyLabeledReplicas: 0, ReadyReplicas: 1, @@ -75,7 +75,7 @@ func TestCalculateStatus(t *testing.T) { newPod("pod1", fullyLabelledRS, v1.PodRunning, nil, true), newPod("pod2", fullyLabelledRS, v1.PodRunning, nil, true), }, - extensions.ReplicaSetStatus{ + apps.ReplicaSetStatus{ Replicas: 2, FullyLabeledReplicas: 2, ReadyReplicas: 2, @@ -89,7 +89,7 @@ func TestCalculateStatus(t *testing.T) { newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true), newPod("pod2", notFullyLabelledRS, v1.PodRunning, nil, true), }, - extensions.ReplicaSetStatus{ + apps.ReplicaSetStatus{ Replicas: 2, FullyLabeledReplicas: 0, ReadyReplicas: 2, @@ -103,7 +103,7 @@ func TestCalculateStatus(t *testing.T) { newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true), newPod("pod2", fullyLabelledRS, v1.PodRunning, nil, true), }, - extensions.ReplicaSetStatus{ + apps.ReplicaSetStatus{ Replicas: 2, FullyLabeledReplicas: 1, ReadyReplicas: 2, @@ -116,7 +116,7 @@ func TestCalculateStatus(t *testing.T) { []*v1.Pod{ newPod("pod1", fullyLabelledRS, v1.PodPending, nil, true), }, - extensions.ReplicaSetStatus{ + apps.ReplicaSetStatus{ Replicas: 1, FullyLabeledReplicas: 1, ReadyReplicas: 0, @@ -129,7 +129,7 @@ func TestCalculateStatus(t *testing.T) { []*v1.Pod{ newPod("pod1", longMinReadySecondsRS, v1.PodRunning, nil, true), }, - extensions.ReplicaSetStatus{ + apps.ReplicaSetStatus{ Replicas: 1, FullyLabeledReplicas: 1, ReadyReplicas: 1, @@ -150,19 +150,19 @@ func TestCalculateStatusConditions(t *testing.T) { labelMap := map[string]string{"name": "foo"} rs := newReplicaSet(2, labelMap) replicaFailureRS := newReplicaSet(10, labelMap) - replicaFailureRS.Status.Conditions = []extensions.ReplicaSetCondition{ + replicaFailureRS.Status.Conditions = []apps.ReplicaSetCondition{ { - Type: extensions.ReplicaSetReplicaFailure, + Type: apps.ReplicaSetReplicaFailure, Status: v1.ConditionTrue, }, } rsStatusConditionTests := []struct { name string - replicaset *extensions.ReplicaSet + replicaset *apps.ReplicaSet filteredPods []*v1.Pod manageReplicasErr error - expectedReplicaSetConditions []extensions.ReplicaSetCondition + expectedReplicaSetConditions []apps.ReplicaSetCondition }{ { @@ -172,9 +172,9 @@ func TestCalculateStatusConditions(t *testing.T) { newPod("pod1", rs, v1.PodRunning, nil, true), }, fmt.Errorf("fake manageReplicasErr"), - []extensions.ReplicaSetCondition{ + []apps.ReplicaSetCondition{ { - Type: extensions.ReplicaSetReplicaFailure, + Type: apps.ReplicaSetReplicaFailure, Status: v1.ConditionTrue, Reason: "FailedCreate", Message: "fake manageReplicasErr", @@ -190,9 +190,9 @@ func TestCalculateStatusConditions(t *testing.T) { newPod("pod3", rs, v1.PodRunning, nil, true), }, fmt.Errorf("fake manageReplicasErr"), - []extensions.ReplicaSetCondition{ + []apps.ReplicaSetCondition{ { - Type: extensions.ReplicaSetReplicaFailure, + Type: apps.ReplicaSetReplicaFailure, Status: v1.ConditionTrue, Reason: "FailedDelete", Message: "fake manageReplicasErr", @@ -215,9 +215,9 @@ func TestCalculateStatusConditions(t *testing.T) { newPod("pod1", replicaFailureRS, v1.PodRunning, nil, true), }, fmt.Errorf("fake manageReplicasErr"), - []extensions.ReplicaSetCondition{ + []apps.ReplicaSetCondition{ { - Type: extensions.ReplicaSetReplicaFailure, + Type: apps.ReplicaSetReplicaFailure, Status: v1.ConditionTrue, }, },