diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index 74462342012..64c82a719a2 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -11,7 +11,6 @@ go_library( "cloudproviders.go", "controllermanager.go", "core.go", - "extensions.go", "import_known_versions.go", "plugins.go", "policy.go", diff --git a/cmd/kube-controller-manager/app/apps.go b/cmd/kube-controller-manager/app/apps.go index 143367e3e9d..7525f174bbe 100644 --- a/cmd/kube-controller-manager/app/apps.go +++ b/cmd/kube-controller-manager/app/apps.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/controller/daemon" + "k8s.io/kubernetes/pkg/controller/deployment" "k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/pkg/controller/statefulset" ) @@ -73,3 +74,20 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) { ).Run(int(ctx.ComponentConfig.ReplicaSetController.ConcurrentRSSyncs), ctx.Stop) return true, nil } + +func startDeploymentController(ctx ControllerContext) (bool, error) { + if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}] { + return false, nil + } + dc, err := deployment.NewDeploymentController( + ctx.InformerFactory.Apps().V1().Deployments(), + ctx.InformerFactory.Apps().V1().ReplicaSets(), + ctx.InformerFactory.Core().V1().Pods(), + ctx.ClientBuilder.ClientOrDie("deployment-controller"), + ) + if err != nil { + return true, fmt.Errorf("error creating Deployment controller: %v", err) + } + go dc.Run(int(ctx.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Stop) + return true, nil +} diff --git a/cmd/kube-controller-manager/app/extensions.go b/cmd/kube-controller-manager/app/extensions.go deleted file mode 100644 index 6cca85bab6e..00000000000 --- a/cmd/kube-controller-manager/app/extensions.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package app implements a server that runs a set of active -// components. This includes replication controllers, service endpoints and -// nodes. -// -package app - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/controller/deployment" -) - -func startDeploymentController(ctx ControllerContext) (bool, error) { - if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] { - return false, nil - } - dc, err := deployment.NewDeploymentController( - ctx.InformerFactory.Extensions().V1beta1().Deployments(), - ctx.InformerFactory.Extensions().V1beta1().ReplicaSets(), - ctx.InformerFactory.Core().V1().Pods(), - ctx.ClientBuilder.ClientOrDie("deployment-controller"), - ) - if err != nil { - return true, fmt.Errorf("error creating Deployment controller: %v", err) - } - go dc.Run(int(ctx.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Stop) - return true, nil -} diff --git a/pkg/controller/BUILD b/pkg/controller/BUILD index 3c9438c63fe..d2d5ed451f2 100644 --- a/pkg/controller/BUILD +++ b/pkg/controller/BUILD @@ -19,8 +19,8 @@ go_test( "//pkg/controller/testutil:go_default_library", "//pkg/securitycontext:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -64,7 +64,6 @@ go_library( "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/controller_ref_manager.go b/pkg/controller/controller_ref_manager.go index 21d7aa302ea..6cf2ac18946 100644 --- a/pkg/controller/controller_ref_manager.go +++ b/pkg/controller/controller_ref_manager.go @@ -23,7 +23,6 @@ import ( "github.com/golang/glog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -301,18 +300,18 @@ func NewReplicaSetControllerRefManager( // If the error is nil, either the reconciliation succeeded, or no // reconciliation was necessary. The list of ReplicaSets that you now own is // returned. -func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, error) { - var claimed []*extensions.ReplicaSet +func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) { + var claimed []*apps.ReplicaSet var errlist []error match := func(obj metav1.Object) bool { return m.Selector.Matches(labels.Set(obj.GetLabels())) } adopt := func(obj metav1.Object) error { - return m.AdoptReplicaSet(obj.(*extensions.ReplicaSet)) + return m.AdoptReplicaSet(obj.(*apps.ReplicaSet)) } release := func(obj metav1.Object) error { - return m.ReleaseReplicaSet(obj.(*extensions.ReplicaSet)) + return m.ReleaseReplicaSet(obj.(*apps.ReplicaSet)) } for _, rs := range sets { @@ -330,7 +329,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep // AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns // the error if the patching fails. -func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaSet) error { +func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) error { if err := m.CanAdopt(); err != nil { return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err) } @@ -345,7 +344,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS // ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. -func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extensions.ReplicaSet) error { +func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error { glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID) diff --git a/pkg/controller/controller_ref_manager_test.go b/pkg/controller/controller_ref_manager_test.go index 4e6acb7cf38..fe878176430 100644 --- a/pkg/controller/controller_ref_manager_test.go +++ b/pkg/controller/controller_ref_manager_test.go @@ -20,8 +20,8 @@ import ( "reflect" "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" @@ -52,7 +52,7 @@ func newPod(podName string, label map[string]string, owner metav1.Object) *v1.Po }, } if owner != nil { - pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, v1beta1.SchemeGroupVersion.WithKind("Fake"))} + pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, apps.SchemeGroupVersion.WithKind("Fake"))} } return pod } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index cc3f5828008..fb00a7567ad 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -25,8 +25,8 @@ import ( "sync/atomic" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -818,18 +818,18 @@ func IsPodActive(p *v1.Pod) bool { } // FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods. -func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions.ReplicaSet { - activeFilter := func(rs *extensions.ReplicaSet) bool { +func FilterActiveReplicaSets(replicaSets []*apps.ReplicaSet) []*apps.ReplicaSet { + activeFilter := func(rs *apps.ReplicaSet) bool { return rs != nil && *(rs.Spec.Replicas) > 0 } return FilterReplicaSets(replicaSets, activeFilter) } -type filterRS func(rs *extensions.ReplicaSet) bool +type filterRS func(rs *apps.ReplicaSet) bool // FilterReplicaSets returns replica sets that are filtered by filterFn (all returned ones should match filterFn). -func FilterReplicaSets(RSes []*extensions.ReplicaSet, filterFn filterRS) []*extensions.ReplicaSet { - var filtered []*extensions.ReplicaSet +func FilterReplicaSets(RSes []*apps.ReplicaSet, filterFn filterRS) []*apps.ReplicaSet { + var filtered []*apps.ReplicaSet for i := range RSes { if filterFn(RSes[i]) { filtered = append(filtered, RSes[i]) @@ -859,7 +859,7 @@ func (o ControllersByCreationTimestamp) Less(i, j int) bool { } // ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. -type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet +type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) } func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } @@ -872,7 +872,7 @@ func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool { // ReplicaSetsBySizeOlder sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from old to new replica sets. -type ReplicaSetsBySizeOlder []*extensions.ReplicaSet +type ReplicaSetsBySizeOlder []*apps.ReplicaSet func (o ReplicaSetsBySizeOlder) Len() int { return len(o) } func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] } @@ -885,7 +885,7 @@ func (o ReplicaSetsBySizeOlder) Less(i, j int) bool { // ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from new to old replica sets. -type ReplicaSetsBySizeNewer []*extensions.ReplicaSet +type ReplicaSetsBySizeNewer []*apps.ReplicaSet func (o ReplicaSetsBySizeNewer) Len() int { return len(o) } func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] } diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index e6ecef86b3c..857b8b46fef 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -27,8 +27,8 @@ import ( "testing" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -122,8 +122,8 @@ func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.Replica } } -func newReplicaSet(name string, replicas int) *extensions.ReplicaSet { - return &extensions.ReplicaSet{ +func newReplicaSet(name string, replicas int) *apps.ReplicaSet { + return &apps.ReplicaSet{ TypeMeta: metav1.TypeMeta{APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), @@ -131,7 +131,7 @@ func newReplicaSet(name string, replicas int) *extensions.ReplicaSet { Namespace: metav1.NamespaceDefault, ResourceVersion: "18", }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: func() *int32 { i := int32(replicas); return &i }(), Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Template: v1.PodTemplateSpec{ @@ -417,7 +417,7 @@ func TestSortingActivePods(t *testing.T) { } func TestActiveReplicaSetsFiltering(t *testing.T) { - var replicaSets []*extensions.ReplicaSet + var replicaSets []*apps.ReplicaSet replicaSets = append(replicaSets, newReplicaSet("zero", 0)) replicaSets = append(replicaSets, nil) replicaSets = append(replicaSets, newReplicaSet("foo", 1)) diff --git a/pkg/controller/deployment/BUILD b/pkg/controller/deployment/BUILD index da5d39a8348..05339b46622 100644 --- a/pkg/controller/deployment/BUILD +++ b/pkg/controller/deployment/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/util/labels:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -32,13 +33,13 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/listers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", @@ -64,13 +65,13 @@ go_test( "//pkg/apis/batch/install:go_default_library", "//pkg/apis/certificates/install:go_default_library", "//pkg/apis/core/install:go_default_library", - "//pkg/apis/extensions/install:go_default_library", "//pkg/apis/policy/install:go_default_library", "//pkg/apis/rbac/install:go_default_library", "//pkg/apis/settings/install:go_default_library", "//pkg/apis/storage/install:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/deployment/util:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index ecff7d5142d..8b14efddb22 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -27,21 +27,21 @@ import ( "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + appsinformers "k8s.io/client-go/informers/apps/v1" coreinformers "k8s.io/client-go/informers/core/v1" - extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + appslisters "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" @@ -60,7 +60,7 @@ const ( ) // controllerKind contains the schema.GroupVersionKind for this controller type. -var controllerKind = extensions.SchemeGroupVersion.WithKind("Deployment") +var controllerKind = apps.SchemeGroupVersion.WithKind("Deployment") // DeploymentController is responsible for synchronizing Deployment objects stored // in the system with actual running replica sets and pods. @@ -73,12 +73,12 @@ type DeploymentController struct { // To allow injection of syncDeployment for testing. syncHandler func(dKey string) error // used for unit testing - enqueueDeployment func(deployment *extensions.Deployment) + enqueueDeployment func(deployment *apps.Deployment) // dLister can list/get deployments from the shared informer's store - dLister extensionslisters.DeploymentLister + dLister appslisters.DeploymentLister // rsLister can list/get replica sets from the shared informer's store - rsLister extensionslisters.ReplicaSetLister + rsLister appslisters.ReplicaSetLister // podLister can list/get pods from the shared informer's store podLister corelisters.PodLister @@ -97,7 +97,7 @@ type DeploymentController struct { } // NewDeploymentController creates a new DeploymentController. -func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) { +func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) @@ -164,27 +164,27 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) { } func (dc *DeploymentController) addDeployment(obj interface{}) { - d := obj.(*extensions.Deployment) + d := obj.(*apps.Deployment) glog.V(4).Infof("Adding deployment %s", d.Name) dc.enqueueDeployment(d) } func (dc *DeploymentController) updateDeployment(old, cur interface{}) { - oldD := old.(*extensions.Deployment) - curD := cur.(*extensions.Deployment) + oldD := old.(*apps.Deployment) + curD := cur.(*apps.Deployment) glog.V(4).Infof("Updating deployment %s", oldD.Name) dc.enqueueDeployment(curD) } func (dc *DeploymentController) deleteDeployment(obj interface{}) { - d, ok := obj.(*extensions.Deployment) + d, ok := obj.(*apps.Deployment) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) return } - d, ok = tombstone.Obj.(*extensions.Deployment) + d, ok = tombstone.Obj.(*apps.Deployment) if !ok { utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Deployment %#v", obj)) return @@ -196,7 +196,7 @@ func (dc *DeploymentController) deleteDeployment(obj interface{}) { // addReplicaSet enqueues the deployment that manages a ReplicaSet when the ReplicaSet is created. func (dc *DeploymentController) addReplicaSet(obj interface{}) { - rs := obj.(*extensions.ReplicaSet) + rs := obj.(*apps.ReplicaSet) if rs.DeletionTimestamp != nil { // On a restart of the controller manager, it's possible for an object to @@ -230,7 +230,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) { // getDeploymentsForReplicaSet returns a list of Deployments that potentially // match a ReplicaSet. -func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *extensions.ReplicaSet) []*extensions.Deployment { +func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet) []*apps.Deployment { deployments, err := dc.dLister.GetDeploymentsForReplicaSet(rs) if err != nil || len(deployments) == 0 { return nil @@ -250,11 +250,11 @@ func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *extensions.Repli // updateReplicaSet figures out what deployment(s) manage a ReplicaSet when the ReplicaSet // is updated and wake them up. If the anything of the ReplicaSets have changed, we need to -// awaken both the old and new deployments. old and cur must be *extensions.ReplicaSet +// awaken both the old and new deployments. old and cur must be *apps.ReplicaSet // types. func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) { - curRS := cur.(*extensions.ReplicaSet) - oldRS := old.(*extensions.ReplicaSet) + curRS := cur.(*apps.ReplicaSet) + oldRS := old.(*apps.ReplicaSet) if curRS.ResourceVersion == oldRS.ResourceVersion { // Periodic resync will send update events for all known replica sets. // Two different versions of the same replica set will always have different RVs. @@ -298,10 +298,10 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) { } // deleteReplicaSet enqueues the deployment that manages a ReplicaSet when -// the ReplicaSet is deleted. obj could be an *extensions.ReplicaSet, or +// the ReplicaSet is deleted. obj could be an *apps.ReplicaSet, or // a DeletionFinalStateUnknown marker item. func (dc *DeploymentController) deleteReplicaSet(obj interface{}) { - rs, ok := obj.(*extensions.ReplicaSet) + rs, ok := obj.(*apps.ReplicaSet) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains @@ -313,7 +313,7 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) { utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) return } - rs, ok = tombstone.Obj.(*extensions.ReplicaSet) + rs, ok = tombstone.Obj.(*apps.ReplicaSet) if !ok { utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a ReplicaSet %#v", obj)) return @@ -354,9 +354,9 @@ func (dc *DeploymentController) deletePod(obj interface{}) { } } glog.V(4).Infof("Pod %s deleted.", pod.Name) - if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == extensions.RecreateDeploymentStrategyType { + if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType { // Sync if this Deployment now has no more Pods. - rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.ExtensionsV1beta1())) + rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1())) if err != nil { return } @@ -374,7 +374,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) { } } -func (dc *DeploymentController) enqueue(deployment *extensions.Deployment) { +func (dc *DeploymentController) enqueue(deployment *apps.Deployment) { key, err := controller.KeyFunc(deployment) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err)) @@ -384,7 +384,7 @@ func (dc *DeploymentController) enqueue(deployment *extensions.Deployment) { dc.queue.Add(key) } -func (dc *DeploymentController) enqueueRateLimited(deployment *extensions.Deployment) { +func (dc *DeploymentController) enqueueRateLimited(deployment *apps.Deployment) { key, err := controller.KeyFunc(deployment) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err)) @@ -395,7 +395,7 @@ func (dc *DeploymentController) enqueueRateLimited(deployment *extensions.Deploy } // enqueueAfter will enqueue a deployment after the provided amount of time. -func (dc *DeploymentController) enqueueAfter(deployment *extensions.Deployment, after time.Duration) { +func (dc *DeploymentController) enqueueAfter(deployment *apps.Deployment, after time.Duration) { key, err := controller.KeyFunc(deployment) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err)) @@ -406,16 +406,16 @@ func (dc *DeploymentController) enqueueAfter(deployment *extensions.Deployment, } // getDeploymentForPod returns the deployment managing the given Pod. -func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *extensions.Deployment { +func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deployment { // Find the owning replica set - var rs *extensions.ReplicaSet + var rs *apps.ReplicaSet var err error controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { // No controller owns this Pod. return nil } - if controllerRef.Kind != extensions.SchemeGroupVersion.WithKind("ReplicaSet").Kind { + if controllerRef.Kind != apps.SchemeGroupVersion.WithKind("ReplicaSet").Kind { // Not a pod owned by a replica set. return nil } @@ -436,7 +436,7 @@ func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *extensions.Dep // resolveControllerRef returns the controller referenced by a ControllerRef, // or nil if the ControllerRef could not be resolved to a matching controller // of the correct Kind. -func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.Deployment { +func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.Deployment { // We can't look up by UID, so look up by Name and then verify UID. // Don't even try to look up by Name if it's the wrong Kind. if controllerRef.Kind != controllerKind.Kind { @@ -494,7 +494,7 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) { // getReplicaSetsForDeployment uses ControllerRefManager to reconcile // ControllerRef by adopting and orphaning. // It returns the list of ReplicaSets that this Deployment should manage. -func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deployment) ([]*extensions.ReplicaSet, error) { +func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment) ([]*apps.ReplicaSet, error) { // List all ReplicaSets to find those we own but that no longer match our // selector. They will be orphaned by ClaimReplicaSets(). rsList, err := dc.rsLister.ReplicaSets(d.Namespace).List(labels.Everything()) @@ -508,7 +508,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deploy // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing ReplicaSets (see #42639). canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) + fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -525,7 +525,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deploy // // It returns a map from ReplicaSet UID to a list of Pods controlled by that RS, // according to the Pod's ControllerRef. -func (dc *DeploymentController) getPodMapForDeployment(d *extensions.Deployment, rsList []*extensions.ReplicaSet) (map[types.UID]*v1.PodList, error) { +func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsList []*apps.ReplicaSet) (map[types.UID]*v1.PodList, error) { // Get all Pods that potentially belong to this Deployment. selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) if err != nil { @@ -586,7 +586,7 @@ func (dc *DeploymentController) syncDeployment(key string) error { dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.") if d.Status.ObservedGeneration < d.Generation { d.Status.ObservedGeneration = d.Generation - dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) } return nil } @@ -625,7 +625,7 @@ func (dc *DeploymentController) syncDeployment(key string) error { // rollback is not re-entrant in case the underlying replica sets are updated with a new // revision so we should ensure that we won't proceed to update replica sets until we // make sure that the deployment has cleaned up its rollback spec in subsequent enqueues. - if d.Spec.RollbackTo != nil { + if getRollbackTo(d) != nil { return dc.rollback(d, rsList, podMap) } @@ -638,9 +638,9 @@ func (dc *DeploymentController) syncDeployment(key string) error { } switch d.Spec.Strategy.Type { - case extensions.RecreateDeploymentStrategyType: + case apps.RecreateDeploymentStrategyType: return dc.rolloutRecreate(d, rsList, podMap) - case extensions.RollingUpdateDeploymentStrategyType: + case apps.RollingUpdateDeploymentStrategyType: return dc.rolloutRolling(d, rsList, podMap) } return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type) diff --git a/pkg/controller/deployment/deployment_controller_test.go b/pkg/controller/deployment/deployment_controller_test.go index a75f8daf8ee..6b289fe0d14 100644 --- a/pkg/controller/deployment/deployment_controller_test.go +++ b/pkg/controller/deployment/deployment_controller_test.go @@ -20,6 +20,7 @@ import ( "strconv" "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,7 +39,6 @@ import ( _ "k8s.io/kubernetes/pkg/apis/batch/install" _ "k8s.io/kubernetes/pkg/apis/certificates/install" _ "k8s.io/kubernetes/pkg/apis/core/install" - _ "k8s.io/kubernetes/pkg/apis/extensions/install" _ "k8s.io/kubernetes/pkg/apis/policy/install" _ "k8s.io/kubernetes/pkg/apis/rbac/install" _ "k8s.io/kubernetes/pkg/apis/settings/install" @@ -52,14 +52,14 @@ var ( noTimestamp = metav1.Time{} ) -func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *extensions.ReplicaSet { - return &extensions.ReplicaSet{ +func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *apps.ReplicaSet { + return &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, CreationTimestamp: timestamp, Namespace: metav1.NamespaceDefault, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: func() *int32 { i := int32(replicas); return &i }(), Selector: &metav1.LabelSelector{MatchLabels: selector}, Template: v1.PodTemplateSpec{}, @@ -67,27 +67,27 @@ func rs(name string, replicas int, selector map[string]string, timestamp metav1. } } -func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *extensions.ReplicaSet { +func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *apps.ReplicaSet { rs := rs(name, specReplicas, selector, noTimestamp) - rs.Status = extensions.ReplicaSetStatus{ + rs.Status = apps.ReplicaSetStatus{ Replicas: int32(statusReplicas), } return rs } -func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment { - d := extensions.Deployment{ - TypeMeta: metav1.TypeMeta{APIVersion: "extensions/v1beta1"}, +func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment { + d := apps.Deployment{ + TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"}, ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: name, Namespace: metav1.NamespaceDefault, Annotations: make(map[string]string), }, - Spec: extensions.DeploymentSpec{ - Strategy: extensions.DeploymentStrategy{ - Type: extensions.RollingUpdateDeploymentStrategyType, - RollingUpdate: &extensions.RollingUpdateDeployment{ + Spec: apps.DeploymentSpec{ + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(), MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(), }, @@ -118,8 +118,8 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu return &d } -func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet { - return &extensions.ReplicaSet{ +func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet { + return &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, UID: uuid.NewUUID(), @@ -127,7 +127,7 @@ func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensi Labels: d.Spec.Selector.MatchLabels, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: d.Spec.Selector, Replicas: func() *int32 { i := int32(replicas); return &i }(), Template: d.Spec.Template, @@ -135,7 +135,7 @@ func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensi } } -func getKey(d *extensions.Deployment, t *testing.T) string { +func getKey(d *apps.Deployment, t *testing.T) string { if key, err := controller.KeyFunc(d); err != nil { t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err) return "" @@ -149,8 +149,8 @@ type fixture struct { client *fake.Clientset // Objects to put in the store. - dLister []*extensions.Deployment - rsLister []*extensions.ReplicaSet + dLister []*apps.Deployment + rsLister []*apps.ReplicaSet podLister []*v1.Pod // Actions expected to happen on the client. Objects from here are also @@ -159,23 +159,23 @@ type fixture struct { objects []runtime.Object } -func (f *fixture) expectGetDeploymentAction(d *extensions.Deployment) { +func (f *fixture) expectGetDeploymentAction(d *apps.Deployment) { action := core.NewGetAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d.Name) f.actions = append(f.actions, action) } -func (f *fixture) expectUpdateDeploymentStatusAction(d *extensions.Deployment) { +func (f *fixture) expectUpdateDeploymentStatusAction(d *apps.Deployment) { action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d) action.Subresource = "status" f.actions = append(f.actions, action) } -func (f *fixture) expectUpdateDeploymentAction(d *extensions.Deployment) { +func (f *fixture) expectUpdateDeploymentAction(d *apps.Deployment) { action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d) f.actions = append(f.actions, action) } -func (f *fixture) expectCreateRSAction(rs *extensions.ReplicaSet) { +func (f *fixture) expectCreateRSAction(rs *apps.ReplicaSet) { f.actions = append(f.actions, core.NewCreateAction(schema.GroupVersionResource{Resource: "replicasets"}, rs.Namespace, rs)) } @@ -189,7 +189,7 @@ func newFixture(t *testing.T) *fixture { func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory, error) { f.client = fake.NewSimpleClientset(f.objects...) informers := informers.NewSharedInformerFactory(f.client, controller.NoResyncPeriodFunc()) - c, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), f.client) + c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), f.client) if err != nil { return nil, nil, err } @@ -198,10 +198,10 @@ func (f *fixture) newController() (*DeploymentController, informers.SharedInform c.rsListerSynced = alwaysReady c.podListerSynced = alwaysReady for _, d := range f.dLister { - informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d) + informers.Apps().V1().Deployments().Informer().GetIndexer().Add(d) } for _, rs := range f.rsLister { - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) } for _, pod := range f.podLister { informers.Core().V1().Pods().Informer().GetIndexer().Add(pod) @@ -344,20 +344,19 @@ func TestReentrantRollback(t *testing.T) { f := newFixture(t) d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - - d.Spec.RollbackTo = &extensions.RollbackConfig{Revision: 0} d.Annotations = map[string]string{util.RevisionAnnotation: "2"} + setRollbackTo(d, &extensions.RollbackConfig{Revision: 0}) f.dLister = append(f.dLister, d) rs1 := newReplicaSet(d, "deploymentrs-old", 0) rs1.Annotations = map[string]string{util.RevisionAnnotation: "1"} one := int64(1) rs1.Spec.Template.Spec.TerminationGracePeriodSeconds = &one - rs1.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] = "hash" + rs1.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash" rs2 := newReplicaSet(d, "deploymentrs-new", 1) rs2.Annotations = map[string]string{util.RevisionAnnotation: "2"} - rs2.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] = "hash" + rs2.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash" f.rsLister = append(f.rsLister, rs1, rs2) f.objects = append(f.objects, d, rs1, rs2) @@ -375,7 +374,7 @@ func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) { f := newFixture(t) foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType rs := newReplicaSet(foo, "foo-1", 1) pod := generatePodFromRS(rs) @@ -388,7 +387,7 @@ func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) { t.Fatalf("error creating Deployment controller: %v", err) } enqueued := false - c.enqueueDeployment = func(d *extensions.Deployment) { + c.enqueueDeployment = func(d *apps.Deployment) { if d.Name == "foo" { enqueued = true } @@ -408,7 +407,7 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) { f := newFixture(t) foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType rs1 := newReplicaSet(foo, "foo-1", 1) rs2 := newReplicaSet(foo, "foo-1", 1) pod1 := generatePodFromRS(rs1) @@ -424,7 +423,7 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) { t.Fatalf("error creating Deployment controller: %v", err) } enqueued := false - c.enqueueDeployment = func(d *extensions.Deployment) { + c.enqueueDeployment = func(d *apps.Deployment) { if d.Name == "foo" { enqueued = true } @@ -445,7 +444,7 @@ func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testi f := newFixture(t) foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType rs1 := newReplicaSet(foo, "foo-1", 1) rs2 := newReplicaSet(foo, "foo-2", 2) rs2.OwnerReferences = nil @@ -460,7 +459,7 @@ func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testi t.Fatalf("error creating Deployment controller: %v", err) } enqueued := false - c.enqueueDeployment = func(d *extensions.Deployment) { + c.enqueueDeployment = func(d *apps.Deployment) { if d.Name == "foo" { enqueued = true } @@ -481,7 +480,7 @@ func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t f := newFixture(t) foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType rs1 := newReplicaSet(foo, "foo-1", 1) rs2 := newReplicaSet(foo, "foo-2", 2) rs2.OwnerReferences = nil @@ -499,7 +498,7 @@ func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t t.Fatalf("error creating Deployment controller: %v", err) } enqueued := false - c.enqueueDeployment = func(d *extensions.Deployment) { + c.enqueueDeployment = func(d *apps.Deployment) { if d.Name == "foo" { enqueued = true } @@ -972,7 +971,7 @@ func bumpResourceVersion(obj metav1.Object) { } // generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template -func generatePodFromRS(rs *extensions.ReplicaSet) *v1.Pod { +func generatePodFromRS(rs *apps.ReplicaSet) *v1.Pod { trueVar := true return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/deployment/progress.go b/pkg/controller/deployment/progress.go index 1cd25ed1fff..cfe35ebab4d 100644 --- a/pkg/controller/deployment/progress.go +++ b/pkg/controller/deployment/progress.go @@ -23,8 +23,8 @@ import ( "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/kubernetes/pkg/controller/deployment/util" ) @@ -32,18 +32,18 @@ import ( // cases this helper will run that cannot be prevented from the scaling detection, // for example a resync of the deployment after it was scaled up. In those cases, // we shouldn't try to estimate any progress. -func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error { +func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error { newStatus := calculateStatus(allRSs, newRS, d) // If there is no progressDeadlineSeconds set, remove any Progressing condition. if d.Spec.ProgressDeadlineSeconds == nil { - util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing) + util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing) } // If there is only one replica set that is active then that means we are not running // a new rollout and this is a resync where we don't need to estimate any progress. // In such a case, we should simply not estimate any progress for this deployment. - currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) + currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) isCompleteDeployment := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason // Check for progress only if there is a progress deadline set and the latest rollout // hasn't completed yet. @@ -56,7 +56,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe if newRS != nil { msg = fmt.Sprintf("ReplicaSet %q has successfully progressed.", newRS.Name) } - condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg) + condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg) util.SetDeploymentCondition(&newStatus, *condition) case util.DeploymentProgressing(d, &newStatus): @@ -66,7 +66,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe if newRS != nil { msg = fmt.Sprintf("ReplicaSet %q is progressing.", newRS.Name) } - condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg) + condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg) // Update the current Progressing condition or add a new one if it doesn't exist. // If a Progressing condition with status=true already exists, we should update // everything but lastTransitionTime. SetDeploymentCondition already does that but @@ -78,7 +78,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe if currentCond.Status == v1.ConditionTrue { condition.LastTransitionTime = currentCond.LastTransitionTime } - util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing) + util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing) } util.SetDeploymentCondition(&newStatus, *condition) @@ -89,7 +89,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe if newRS != nil { msg = fmt.Sprintf("ReplicaSet %q has timed out progressing.", newRS.Name) } - condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg) + condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg) util.SetDeploymentCondition(&newStatus, *condition) } } @@ -100,7 +100,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe // There will be only one ReplicaFailure condition on the replica set. util.SetDeploymentCondition(&newStatus, replicaFailureCond[0]) } else { - util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentReplicaFailure) + util.RemoveDeploymentCondition(&newStatus, apps.DeploymentReplicaFailure) } // Do not update if there is nothing new to add. @@ -112,17 +112,17 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) + _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } // getReplicaFailures will convert replica failure conditions from replica sets // to deployment conditions. -func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) []extensions.DeploymentCondition { - var conditions []extensions.DeploymentCondition +func (dc *DeploymentController) getReplicaFailures(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) []apps.DeploymentCondition { + var conditions []apps.DeploymentCondition if newRS != nil { for _, c := range newRS.Status.Conditions { - if c.Type != extensions.ReplicaSetReplicaFailure { + if c.Type != apps.ReplicaSetReplicaFailure { continue } conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c)) @@ -141,7 +141,7 @@ func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaS } for _, c := range rs.Status.Conditions { - if c.Type != extensions.ReplicaSetReplicaFailure { + if c.Type != apps.ReplicaSetReplicaFailure { continue } conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c)) @@ -156,8 +156,8 @@ var nowFn = func() time.Time { return time.Now() } // requeueStuckDeployment checks whether the provided deployment needs to be synced for a progress // check. It returns the time after the deployment will be requeued for the progress check, 0 if it // will be requeued now, or -1 if it does not need to be requeued. -func (dc *DeploymentController) requeueStuckDeployment(d *extensions.Deployment, newStatus extensions.DeploymentStatus) time.Duration { - currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) +func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newStatus apps.DeploymentStatus) time.Duration { + currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) // Can't estimate progress if there is no deadline in the spec or progressing condition in the current status. if d.Spec.ProgressDeadlineSeconds == nil || currentCond == nil { return time.Duration(-1) diff --git a/pkg/controller/deployment/progress_test.go b/pkg/controller/deployment/progress_test.go index 978b21469fb..da0aa2d67df 100644 --- a/pkg/controller/deployment/progress_test.go +++ b/pkg/controller/deployment/progress_test.go @@ -20,16 +20,16 @@ import ( "testing" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller/deployment/util" ) -func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) extensions.DeploymentStatus { - return extensions.DeploymentStatus{ +func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) apps.DeploymentStatus { + return apps.DeploymentStatus{ Replicas: replicas, UpdatedReplicas: updatedReplicas, AvailableReplicas: availableReplicas, @@ -37,16 +37,16 @@ func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) ext } // assumes the retuned deployment is always observed - not needed to be tested here. -func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []extensions.DeploymentCondition) *extensions.Deployment { - d := &extensions.Deployment{ +func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []apps.DeploymentCondition) *apps.Deployment { + d := &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "progress-test", }, - Spec: extensions.DeploymentSpec{ + Spec: apps.DeploymentSpec{ ProgressDeadlineSeconds: pds, Replicas: &replicas, - Strategy: extensions.DeploymentStrategy{ - Type: extensions.RecreateDeploymentStrategyType, + Strategy: apps.DeploymentStrategy{ + Type: apps.RecreateDeploymentStrategyType, }, }, Status: newDeploymentStatus(statusReplicas, updatedReplicas, availableReplicas), @@ -56,9 +56,9 @@ func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, av } // helper to create RS with given availableReplicas -func newRSWithAvailable(name string, specReplicas, statusReplicas, availableReplicas int) *extensions.ReplicaSet { +func newRSWithAvailable(name string, specReplicas, statusReplicas, availableReplicas int) *apps.ReplicaSet { rs := rs(name, specReplicas, nil, metav1.Time{}) - rs.Status = extensions.ReplicaSetStatus{ + rs.Status = apps.ReplicaSetStatus{ Replicas: int32(statusReplicas), AvailableReplicas: int32(availableReplicas), } @@ -67,16 +67,16 @@ func newRSWithAvailable(name string, specReplicas, statusReplicas, availableRepl func TestRequeueStuckDeployment(t *testing.T) { pds := int32(60) - failed := []extensions.DeploymentCondition{ + failed := []apps.DeploymentCondition{ { - Type: extensions.DeploymentProgressing, + Type: apps.DeploymentProgressing, Status: v1.ConditionFalse, Reason: util.TimedOutReason, }, } - stuck := []extensions.DeploymentCondition{ + stuck := []apps.DeploymentCondition{ { - Type: extensions.DeploymentProgressing, + Type: apps.DeploymentProgressing, Status: v1.ConditionTrue, LastUpdateTime: metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC), }, @@ -84,8 +84,8 @@ func TestRequeueStuckDeployment(t *testing.T) { tests := []struct { name string - d *extensions.Deployment - status extensions.DeploymentStatus + d *apps.Deployment + status apps.DeploymentStatus nowFn func() time.Time expected time.Duration }{ @@ -178,20 +178,20 @@ func TestRequeueStuckDeployment(t *testing.T) { func TestSyncRolloutStatus(t *testing.T) { pds := int32(60) testTime := metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC) - failedTimedOut := extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + failedTimedOut := apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionFalse, Reason: util.TimedOutReason, } - newRSAvailable := extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + newRSAvailable := apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionTrue, Reason: util.NewRSAvailableReason, LastUpdateTime: testTime, LastTransitionTime: testTime, } - replicaSetUpdated := extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + replicaSetUpdated := apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionTrue, Reason: util.ReplicaSetUpdatedReason, LastUpdateTime: testTime, @@ -200,10 +200,10 @@ func TestSyncRolloutStatus(t *testing.T) { tests := []struct { name string - d *extensions.Deployment - allRSs []*extensions.ReplicaSet - newRS *extensions.ReplicaSet - conditionType extensions.DeploymentConditionType + d *apps.Deployment + allRSs []*apps.ReplicaSet + newRS *apps.ReplicaSet + conditionType apps.DeploymentConditionType conditionStatus v1.ConditionStatus conditionReason string lastUpdate metav1.Time @@ -211,15 +211,15 @@ func TestSyncRolloutStatus(t *testing.T) { }{ { name: "General: remove Progressing condition and do not estimate progress if deployment has no Progress Deadline", - d: currentDeployment(nil, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, + d: currentDeployment(nil, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, newRS: newRSWithAvailable("foo", 3, 2, 2), }, { name: "General: do not estimate progress of deployment with only one active ReplicaSet", - d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{newRSAvailable}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 3, 3, 3)}, - conditionType: extensions.DeploymentProgressing, + d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{newRSAvailable}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 3, 3, 3)}, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, lastUpdate: testTime, @@ -227,83 +227,83 @@ func TestSyncRolloutStatus(t *testing.T) { }, { name: "DeploymentProgressing: dont update lastTransitionTime if deployment already has Progressing=True", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.ReplicaSetUpdatedReason, lastTransition: testTime, }, { name: "DeploymentProgressing: update everything if deployment has Progressing=False", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{failedTimedOut}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.ReplicaSetUpdatedReason, }, { name: "DeploymentProgressing: create Progressing condition if it does not exist", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.ReplicaSetUpdatedReason, }, { name: "DeploymentComplete: dont update lastTransitionTime if deployment already has Progressing=True", - d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 3, 3), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, lastTransition: testTime, }, { name: "DeploymentComplete: update everything if deployment has Progressing=False", - d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{failedTimedOut}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{failedTimedOut}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 3, 3), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, }, { name: "DeploymentComplete: create Progressing condition if it does not exist", - d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 3, 3), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, }, { name: "DeploymentComplete: defend against NPE when newRS=nil", - d: currentDeployment(&pds, 0, 3, 3, 3, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("foo", 0, 0, 0)}, - conditionType: extensions.DeploymentProgressing, + d: currentDeployment(&pds, 0, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("foo", 0, 0, 0)}, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, }, { name: "DeploymentTimedOut: update status if rollout exceeds Progress Deadline", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionFalse, conditionReason: util.TimedOutReason, }, { name: "DeploymentTimedOut: do not update status if deployment has existing timedOut condition", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{failedTimedOut}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionFalse, conditionReason: util.TimedOutReason, lastUpdate: testTime, diff --git a/pkg/controller/deployment/recreate.go b/pkg/controller/deployment/recreate.go index b6f01ef5460..04403978d5b 100644 --- a/pkg/controller/deployment/recreate.go +++ b/pkg/controller/deployment/recreate.go @@ -17,15 +17,15 @@ limitations under the License. package deployment import ( + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/deployment/util" ) // rolloutRecreate implements the logic for recreating a replica set. -func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { // Don't create a new RS if not already existed, so that we avoid scaling up before scaling down. newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false) if err != nil { @@ -74,7 +74,7 @@ func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList } // scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate". -func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { +func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (bool, error) { scaled := false for i := range oldRSs { rs := oldRSs[i] @@ -95,7 +95,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*ext } // oldPodsRunning returns whether there are old pods running or any of the old ReplicaSets thinks that it runs pods. -func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) bool { +func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) bool { if oldPods := util.GetActualReplicaCountForReplicaSets(oldRSs); oldPods > 0 { return true } @@ -123,7 +123,7 @@ func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSe } // scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate". -func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { +func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) { scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment) return scaled, err } diff --git a/pkg/controller/deployment/recreate_test.go b/pkg/controller/deployment/recreate_test.go index d81435c759e..dc9d3b04aa4 100644 --- a/pkg/controller/deployment/recreate_test.go +++ b/pkg/controller/deployment/recreate_test.go @@ -20,8 +20,8 @@ import ( "fmt" "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" @@ -33,7 +33,7 @@ import ( func TestScaleDownOldReplicaSets(t *testing.T) { tests := []struct { oldRSSizes []int - d *extensions.Deployment + d *apps.Deployment }{ { oldRSSizes: []int{3}, @@ -45,7 +45,7 @@ func TestScaleDownOldReplicaSets(t *testing.T) { t.Logf("running scenario %d", i) test := tests[i] - var oldRSs []*extensions.ReplicaSet + var oldRSs []*apps.ReplicaSet var expected []runtime.Object for n, size := range test.oldRSSizes { @@ -58,14 +58,14 @@ func TestScaleDownOldReplicaSets(t *testing.T) { rsCopy.Spec.Replicas = &zero expected = append(expected, rsCopy) - if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*extensions.ReplicaSet).Spec.Replicas) { + if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*apps.ReplicaSet).Spec.Replicas) { t.Errorf("broken test - original and expected RS have the same size") } } kc := fake.NewSimpleClientset(expected...) informers := informers.NewSharedInformerFactory(kc, controller.NoResyncPeriodFunc()) - c, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), kc) + c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), kc) if err != nil { t.Fatalf("error creating Deployment controller: %v", err) } @@ -86,8 +86,8 @@ func TestOldPodsRunning(t *testing.T) { tests := []struct { name string - newRS *extensions.ReplicaSet - oldRSs []*extensions.ReplicaSet + newRS *apps.ReplicaSet + oldRSs []*apps.ReplicaSet podMap map[types.UID]*v1.PodList hasOldPodsRunning bool @@ -98,23 +98,23 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with running pods", - oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")}, + oldRSs: []*apps.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")}, podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}), hasOldPodsRunning: true, }, { name: "old RSs without pods but with non-zero status replicas", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)}, hasOldPodsRunning: true, }, { name: "old RSs without pods or non-zero status replicas", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, hasOldPodsRunning: false, }, { name: "old RSs with zero status replicas but pods in terminal state are present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -135,7 +135,7 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with zero status replicas but pod in unknown phase present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -151,7 +151,7 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with zero status replicas with pending pod present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -167,7 +167,7 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with zero status replicas with running pod present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -183,7 +183,7 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with zero status replicas but pods in terminal state and pending are present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -225,7 +225,7 @@ func TestOldPodsRunning(t *testing.T) { } } -func rsWithUID(uid string) *extensions.ReplicaSet { +func rsWithUID(uid string) *apps.ReplicaSet { d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) rs := newReplicaSet(d, fmt.Sprintf("foo-%s", uid), 0) rs.UID = types.UID(uid) diff --git a/pkg/controller/deployment/rollback.go b/pkg/controller/deployment/rollback.go index 826185afc33..97e3b3027ec 100644 --- a/pkg/controller/deployment/rollback.go +++ b/pkg/controller/deployment/rollback.go @@ -18,9 +18,11 @@ package deployment import ( "fmt" + "strconv" "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/types" @@ -28,17 +30,17 @@ import ( ) // rollback the deployment to the specified revision. In any case cleanup the rollback spec. -func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true) if err != nil { return err } allRSs := append(allOldRSs, newRS) - toRevision := &d.Spec.RollbackTo.Revision + rollbackTo := getRollbackTo(d) // If rollback revision is 0, rollback to the last revision - if *toRevision == 0 { - if *toRevision = deploymentutil.LastRevision(allRSs); *toRevision == 0 { + if rollbackTo.Revision == 0 { + if rollbackTo.Revision = deploymentutil.LastRevision(allRSs); rollbackTo.Revision == 0 { // If we still can't find the last revision, gives up rollback dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.") // Gives up rollback @@ -51,14 +53,14 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err) continue } - if v == *toRevision { + if v == rollbackTo.Revision { glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v) // rollback by copying podTemplate.Spec from the replica set // revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call // no-op if the spec matches current deployment's podTemplate.Spec performedRollback, err := dc.rollbackToTemplate(d, rs) if performedRollback && err == nil { - dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, *toRevision)) + dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, rollbackTo.Revision)) } return err } @@ -71,7 +73,7 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext // rollbackToTemplate compares the templates of the provided deployment and replica set and // updates the deployment with the replica set template in case they are different. It also // cleans up the rollback spec so subsequent requeues of the deployment won't end up in here. -func (dc *DeploymentController) rollbackToTemplate(d *extensions.Deployment, rs *extensions.ReplicaSet) (bool, error) { +func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) { performedRollback := false if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) { glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec) @@ -98,20 +100,49 @@ func (dc *DeploymentController) rollbackToTemplate(d *extensions.Deployment, rs return performedRollback, dc.updateDeploymentAndClearRollbackTo(d) } -func (dc *DeploymentController) emitRollbackWarningEvent(d *extensions.Deployment, reason, message string) { +func (dc *DeploymentController) emitRollbackWarningEvent(d *apps.Deployment, reason, message string) { dc.eventRecorder.Eventf(d, v1.EventTypeWarning, reason, message) } -func (dc *DeploymentController) emitRollbackNormalEvent(d *extensions.Deployment, message string) { +func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, message string) { dc.eventRecorder.Eventf(d, v1.EventTypeNormal, deploymentutil.RollbackDone, message) } // updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment // It is assumed that the caller will have updated the deployment template appropriately (in case // we want to rollback). -func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *extensions.Deployment) error { +func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error { glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name) - d.Spec.RollbackTo = nil - _, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Update(d) + setRollbackTo(d, nil) + _, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d) return err } + +// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped. +func getRollbackTo(d *apps.Deployment) *extensions.RollbackConfig { + // Extract the annotation used for round-tripping the deprecated RollbackTo field. + revision := d.Annotations[apps.DeprecatedRollbackTo] + if revision == "" { + return nil + } + revision64, err := strconv.ParseInt(revision, 10, 64) + if err != nil { + // If it's invalid, ignore it. + return nil + } + return &extensions.RollbackConfig{ + Revision: revision64, + } +} + +// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped. +func setRollbackTo(d *apps.Deployment, rollbackTo *extensions.RollbackConfig) { + if rollbackTo == nil { + delete(d.Annotations, apps.DeprecatedRollbackTo) + return + } + if d.Annotations == nil { + d.Annotations = make(map[string]string) + } + d.Annotations[apps.DeprecatedRollbackTo] = strconv.FormatInt(rollbackTo.Revision, 10) +} diff --git a/pkg/controller/deployment/rolling.go b/pkg/controller/deployment/rolling.go index 598928366d7..132981fa3e6 100644 --- a/pkg/controller/deployment/rolling.go +++ b/pkg/controller/deployment/rolling.go @@ -21,8 +21,8 @@ import ( "sort" "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/integer" "k8s.io/kubernetes/pkg/controller" @@ -30,7 +30,7 @@ import ( ) // rolloutRolling implements the logic for rolling a new replica set. -func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true) if err != nil { return err @@ -67,7 +67,7 @@ func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList return dc.syncRolloutStatus(allRSs, newRS, d) } -func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { +func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) { if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) { // Scaling not required. return false, nil @@ -85,7 +85,7 @@ func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.Repl return scaled, err } -func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { +func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) { oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs) if oldPodsCount == 0 { // Can't scale down further @@ -154,7 +154,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep } // cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted. -func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) { +func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment, maxCleanupCount int32) ([]*apps.ReplicaSet, int32, error) { sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) // Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order // such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will @@ -191,7 +191,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re // scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate". // Need check maxUnavailable to ensure availability -func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) { +func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (int32, error) { maxUnavailable := deploymentutil.MaxUnavailable(*deployment) // Check if we can scale down. diff --git a/pkg/controller/deployment/rolling_test.go b/pkg/controller/deployment/rolling_test.go index 5f870e40b5f..b0feb42b40b 100644 --- a/pkg/controller/deployment/rolling_test.go +++ b/pkg/controller/deployment/rolling_test.go @@ -19,7 +19,7 @@ package deployment import ( "testing" - extensions "k8s.io/api/extensions/v1beta1" + apps "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" @@ -82,7 +82,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) { t.Logf("executing scenario %d", i) newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp) oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp) - allRSs := []*extensions.ReplicaSet{newRS, oldRS} + allRSs := []*apps.ReplicaSet{newRS, oldRS} maxUnavailable := intstr.FromInt(0) deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"}) fake := fake.Clientset{} @@ -109,7 +109,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) { t.Errorf("expected 1 action during scale, got: %v", fake.Actions()) continue } - updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet) + updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*apps.ReplicaSet) if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } @@ -187,8 +187,8 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) { newRS.Status.AvailableReplicas = int32(test.readyPodsFromNewRS) oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp) oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS) - oldRSs := []*extensions.ReplicaSet{oldRS} - allRSs := []*extensions.ReplicaSet{oldRS, newRS} + oldRSs := []*apps.ReplicaSet{oldRS} + allRSs := []*apps.ReplicaSet{oldRS, newRS} maxSurge := intstr.FromInt(0) deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector) fakeClientset := fake.Clientset{} @@ -255,7 +255,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) { t.Logf("executing scenario %d", i) oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp) oldRS.Status.AvailableReplicas = int32(test.readyPods) - oldRSs := []*extensions.ReplicaSet{oldRS} + oldRSs := []*apps.ReplicaSet{oldRS} maxSurge := intstr.FromInt(2) maxUnavailable := intstr.FromInt(2) deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil) @@ -330,8 +330,8 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing t.Logf("executing scenario %d", i) oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp) oldRS.Status.AvailableReplicas = int32(test.readyPods) - allRSs := []*extensions.ReplicaSet{oldRS} - oldRSs := []*extensions.ReplicaSet{oldRS} + allRSs := []*apps.ReplicaSet{oldRS} + oldRSs := []*apps.ReplicaSet{oldRS} maxSurge := intstr.FromInt(0) deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"}) fakeClientset := fake.Clientset{} @@ -371,7 +371,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing t.Errorf("expected an update action") continue } - updated := updateAction.GetObject().(*extensions.ReplicaSet) + updated := updateAction.GetObject().(*apps.ReplicaSet) if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index 42891c1b340..a49938bba9b 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -23,8 +23,8 @@ import ( "strconv" "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -35,7 +35,7 @@ import ( ) // syncStatusOnly only updates Deployments Status and doesn't take any mutating actions. -func (dc *DeploymentController) syncStatusOnly(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false) if err != nil { return err @@ -47,7 +47,7 @@ func (dc *DeploymentController) syncStatusOnly(d *extensions.Deployment, rsList // sync is responsible for reconciling deployments on scaling events or when they // are paused. -func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false) if err != nil { return err @@ -59,7 +59,7 @@ func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensi } // Clean up the deployment when it's paused and no rollback is in flight. - if d.Spec.Paused && d.Spec.RollbackTo == nil { + if d.Spec.Paused && getRollbackTo(d) == nil { if err := dc.cleanupDeployment(oldRSs, d); err != nil { return err } @@ -72,11 +72,11 @@ func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensi // checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition. // These conditions are needed so that we won't accidentally report lack of progress for resumed deployments // that were paused for longer than progressDeadlineSeconds. -func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) error { +func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error { if d.Spec.ProgressDeadlineSeconds == nil { return nil } - cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) + cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) if cond != nil && cond.Reason == deploymentutil.TimedOutReason { // If we have reported lack of progress, do not overwrite it with a paused condition. return nil @@ -85,11 +85,11 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) needsUpdate := false if d.Spec.Paused && !pausedCondExists { - condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused") + condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } else if !d.Spec.Paused && pausedCondExists { - condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed") + condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } @@ -99,7 +99,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) } var err error - d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) return err } @@ -115,7 +115,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) // // Note that currently the deployment controller is using caches to avoid querying the server for reads. // This may lead to stale reads of replica sets, thus incorrect deployment status. -func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { +func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) { _, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList) // Get new replica set with the updated revision number @@ -132,7 +132,7 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.D // 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes. // 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. // Note that the pod-template-hash will be added to adopted RSes and pods. -func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsList, oldRSs []*extensions.ReplicaSet, createIfNotExisted bool) (*extensions.ReplicaSet, error) { +func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, oldRSs []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, error) { existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList) // Calculate the max revision number among all old RSes @@ -152,7 +152,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds - return dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) + return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) } // Should use the revision in existingNewRS's annotation, since it set by before @@ -160,17 +160,17 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // If no other Progressing condition has been recorded and we need to estimate the progress // of this deployment then it is likely that old users started caring about progress. In that // case we need to take into account the first time we noticed their new replica set. - cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) + cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) if d.Spec.ProgressDeadlineSeconds != nil && cond == nil { msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name) - condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg) + condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg) deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } if needsUpdate { var err error - if d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d); err != nil { + if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d); err != nil { return nil, err } } @@ -184,19 +184,19 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // new ReplicaSet does not exist, create one. newRSTemplate := *d.Spec.Template.DeepCopy() podTemplateSpecHash := fmt.Sprintf("%d", controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount)) - newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) + newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Add podTemplateHash label to selector. - newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) + newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Create new ReplicaSet - newRS := extensions.ReplicaSet{ + newRS := apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ // Make the name deterministic, to ensure idempotence Name: d.Name + "-" + rand.SafeEncodeString(podTemplateSpecHash), Namespace: d.Namespace, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: new(int32), MinReadySeconds: d.Spec.MinReadySeconds, Selector: newRSSelector, @@ -216,7 +216,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // hash collisions. If there is any other error, we need to report it in the status of // the Deployment. alreadyExists := false - createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS) + createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(&newRS) switch { // We may end up hitting this due to a slow cache or a fast resync of the Deployment. case errors.IsAlreadyExists(err): @@ -248,7 +248,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis *d.Status.CollisionCount++ // Update the collisionCount for the Deployment and let it requeue by returning the original // error. - _, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + _, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) if dErr == nil { glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount) } @@ -256,12 +256,12 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis case err != nil: msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err) if d.Spec.ProgressDeadlineSeconds != nil { - cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg) + cond := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg) deploymentutil.SetDeploymentCondition(&d.Status, *cond) // We don't really care about this error at this point, since we have a bigger issue to report. // TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568 - _, _ = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + _, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) } dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg) return nil, err @@ -273,12 +273,12 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis needsUpdate := deploymentutil.SetDeploymentRevision(d, newRevision) if !alreadyExists && d.Spec.ProgressDeadlineSeconds != nil { msg := fmt.Sprintf("Created new replica set %q", createdRS.Name) - condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg) + condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg) deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } if needsUpdate { - _, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) } return createdRS, err } @@ -288,7 +288,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable // replicas in the event of a problem with the rolled out template. Should run only on scaling events or // when a deployment is paused and not during the normal rollout process. -func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) error { +func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) error { // If there is only one active replica set then we should scale that up to the full count of the // deployment. If there is no active replica set, then we should scale up the newest replica set. if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil { @@ -386,7 +386,7 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS * return nil } -func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { +func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment) (bool, *apps.ReplicaSet, error) { // No need to scale if *(rs.Spec.Replicas) == newScale { return false, rs, nil @@ -401,7 +401,7 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.Rep return scaled, newRS, err } -func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment, scalingOperation string) (bool, *extensions.ReplicaSet, error) { +func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment, scalingOperation string) (bool, *apps.ReplicaSet, error) { sizeNeedsUpdate := *(rs.Spec.Replicas) != newScale @@ -413,7 +413,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc rsCopy := rs.DeepCopy() *(rsCopy.Spec.Replicas) = newScale deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment)) - rs, err = dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.Namespace).Update(rsCopy) + rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(rsCopy) if err == nil && sizeNeedsUpdate { scaled = true dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale) @@ -425,13 +425,13 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc // cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets // where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept // around by default 1) for historical reasons and 2) for the ability to rollback a deployment. -func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error { +func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error { if deployment.Spec.RevisionHistoryLimit == nil { return nil } // Avoid deleting replica set with deletion timestamp set - aliveFilter := func(rs *extensions.ReplicaSet) bool { + aliveFilter := func(rs *apps.ReplicaSet) bool { return rs != nil && rs.ObjectMeta.DeletionTimestamp == nil } cleanableRSes := controller.FilterReplicaSets(oldRSs, aliveFilter) @@ -451,7 +451,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe continue } glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name) - if err := dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { + if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { // Return error instead of aggregating and continuing DELETEs on the theory // that we may be overloading the api server. return err @@ -462,7 +462,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe } // syncDeploymentStatus checks if the status is up-to-date and sync it if necessary -func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error { +func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error { newStatus := calculateStatus(allRSs, newRS, d) if reflect.DeepEqual(d.Status, newStatus) { @@ -471,12 +471,12 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.Replic newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) + _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } // calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets. -func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) extensions.DeploymentStatus { +func calculateStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) apps.DeploymentStatus { availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs) totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) unavailableReplicas := totalReplicas - availableReplicas @@ -486,11 +486,11 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS unavailableReplicas = 0 } - status := extensions.DeploymentStatus{ + status := apps.DeploymentStatus{ // TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value. ObservedGeneration: deployment.Generation, Replicas: deploymentutil.GetActualReplicaCountForReplicaSets(allRSs), - UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}), + UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*apps.ReplicaSet{newRS}), ReadyReplicas: deploymentutil.GetReadyReplicaCountForReplicaSets(allRSs), AvailableReplicas: availableReplicas, UnavailableReplicas: unavailableReplicas, @@ -504,10 +504,10 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS } if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) { - minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.") + minAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.") deploymentutil.SetDeploymentCondition(&status, *minAvailability) } else { - noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.") + noMinAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.") deploymentutil.SetDeploymentCondition(&status, *noMinAvailability) } @@ -519,7 +519,7 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS // // rsList should come from getReplicaSetsForDeployment(d). // podMap should come from getPodMapForDeployment(d, rsList). -func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) { +func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false) if err != nil { return false, err diff --git a/pkg/controller/deployment/sync_test.go b/pkg/controller/deployment/sync_test.go index 6f5cc96b344..4cd4c0ab2d9 100644 --- a/pkg/controller/deployment/sync_test.go +++ b/pkg/controller/deployment/sync_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - extensions "k8s.io/api/extensions/v1beta1" + apps "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/informers" @@ -41,7 +41,7 @@ func TestScale(t *testing.T) { oldTimestamp := metav1.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC) olderTimestamp := metav1.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC) - var updatedTemplate = func(replicas int) *extensions.Deployment { + var updatedTemplate = func(replicas int) *apps.Deployment { d := newDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"}) d.Spec.Template.Labels["another"] = "label" return d @@ -49,14 +49,14 @@ func TestScale(t *testing.T) { tests := []struct { name string - deployment *extensions.Deployment - oldDeployment *extensions.Deployment + deployment *apps.Deployment + oldDeployment *apps.Deployment - newRS *extensions.ReplicaSet - oldRSs []*extensions.ReplicaSet + newRS *apps.ReplicaSet + oldRSs []*apps.ReplicaSet - expectedNew *extensions.ReplicaSet - expectedOld []*extensions.ReplicaSet + expectedNew *apps.ReplicaSet + expectedOld []*apps.ReplicaSet wasntUpdated map[string]bool desiredReplicasAnnotations map[string]int32 @@ -67,10 +67,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{}, + oldRSs: []*apps.ReplicaSet{}, expectedNew: rs("foo-v1", 12, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{}, + expectedOld: []*apps.ReplicaSet{}, }, { name: "normal scaling event: 10 -> 5", @@ -78,10 +78,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{}, + oldRSs: []*apps.ReplicaSet{}, expectedNew: rs("foo-v1", 5, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{}, + expectedOld: []*apps.ReplicaSet{}, }, { name: "proportional scaling: 5 -> 10", @@ -89,10 +89,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, }, { name: "proportional scaling: 5 -> 3", @@ -100,10 +100,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 1, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)}, }, { name: "proportional scaling: 9 -> 4", @@ -111,10 +111,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 9, nil, nil, nil, nil), newRS: rs("foo-v2", 8, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)}, }, { name: "proportional scaling: 7 -> 10", @@ -122,10 +122,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 7, nil, nil, nil, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 3, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, }, { name: "proportional scaling: 13 -> 8", @@ -133,10 +133,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 13, nil, nil, nil, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales up the new replica set. { @@ -145,10 +145,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, }, // Scales down the older replica set. { @@ -157,10 +157,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up the latest replica set first. { @@ -169,10 +169,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 4, nil, nil, nil, nil), newRS: nil, - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: nil, - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales down to zero { @@ -181,10 +181,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil), newRS: rs("foo-v3", 3, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 0, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up from zero { @@ -193,10 +193,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil), newRS: rs("foo-v3", 0, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 6, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true}, }, // Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 ) @@ -208,10 +208,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true}, desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)}, @@ -222,10 +222,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, intOrStrP(2), nil, nil), newRS: rs("foo-v2", 6, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 11, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)}, }, { name: "change both surge and size", @@ -233,10 +233,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, intOrStrP(3), nil, nil), newRS: rs("foo-v2", 5, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 22, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)}, }, { name: "change both size and template", @@ -244,25 +244,25 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}), newRS: nil, - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: nil, - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)}, }, { name: "saturated but broken new replica set does not affect old pods", deployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil), oldDeployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil), - newRS: func() *extensions.ReplicaSet { + newRS: func() *apps.ReplicaSet { rs := rs("foo-v2", 2, nil, newTimestamp) rs.Status.AvailableReplicas = 0 return rs }(), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 2, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, }, } @@ -313,7 +313,7 @@ func TestScale(t *testing.T) { } // Get all the UPDATE actions and update nameToSize with all the updated sizes. for _, action := range fake.Actions() { - rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) + rs := action.(testclient.UpdateAction).GetObject().(*apps.ReplicaSet) if !test.wasntUpdated[rs.Name] { nameToSize[rs.Name] = *(rs.Spec.Replicas) } @@ -345,12 +345,12 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { alreadyDeleted.DeletionTimestamp = &now tests := []struct { - oldRSs []*extensions.ReplicaSet + oldRSs []*apps.ReplicaSet revisionHistoryLimit int32 expectedDeletions int }{ { - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ newRSWithStatus("foo-1", 0, 0, selector), newRSWithStatus("foo-2", 0, 0, selector), newRSWithStatus("foo-3", 0, 0, selector), @@ -360,7 +360,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { }, { // Only delete the replica set with Spec.Replicas = Status.Replicas = 0. - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ newRSWithStatus("foo-1", 0, 0, selector), newRSWithStatus("foo-2", 0, 1, selector), newRSWithStatus("foo-3", 1, 0, selector), @@ -371,7 +371,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { }, { - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ newRSWithStatus("foo-1", 0, 0, selector), newRSWithStatus("foo-2", 0, 0, selector), }, @@ -379,7 +379,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { expectedDeletions: 2, }, { - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ newRSWithStatus("foo-1", 1, 1, selector), newRSWithStatus("foo-2", 1, 1, selector), }, @@ -387,7 +387,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { expectedDeletions: 0, }, { - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ alreadyDeleted, }, revisionHistoryLimit: 0, @@ -401,7 +401,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { fake := &fake.Clientset{} informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc()) - controller, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake) + controller, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), fake) if err != nil { t.Fatalf("error creating Deployment controller: %v", err) } @@ -411,7 +411,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { controller.rsListerSynced = alwaysReady controller.podListerSynced = alwaysReady for _, rs := range test.oldRSs { - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) } stopCh := make(chan struct{}) diff --git a/pkg/controller/deployment/util/BUILD b/pkg/controller/deployment/util/BUILD index eabed9df65a..e3bd8440119 100644 --- a/pkg/controller/deployment/util/BUILD +++ b/pkg/controller/deployment/util/BUILD @@ -19,8 +19,8 @@ go_library( "//pkg/controller:go_default_library", "//pkg/util/labels:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -29,10 +29,10 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/listers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", ], @@ -48,8 +48,8 @@ go_test( deps = [ "//pkg/controller:go_default_library", "//pkg/util/hash:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index d5bbabfdd79..a435e5eacef 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -25,8 +25,8 @@ import ( "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +34,7 @@ import ( "k8s.io/apimachinery/pkg/types" intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" - extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" "k8s.io/client-go/util/integer" internalextensions "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/controller" @@ -98,8 +98,8 @@ const ( ) // NewDeploymentCondition creates a new deployment condition. -func NewDeploymentCondition(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *extensions.DeploymentCondition { - return &extensions.DeploymentCondition{ +func NewDeploymentCondition(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *apps.DeploymentCondition { + return &apps.DeploymentCondition{ Type: condType, Status: status, LastUpdateTime: metav1.Now(), @@ -110,7 +110,7 @@ func NewDeploymentCondition(condType extensions.DeploymentConditionType, status } // GetDeploymentCondition returns the condition with the provided type. -func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensions.DeploymentConditionType) *extensions.DeploymentCondition { +func GetDeploymentCondition(status apps.DeploymentStatus, condType apps.DeploymentConditionType) *apps.DeploymentCondition { for i := range status.Conditions { c := status.Conditions[i] if c.Type == condType { @@ -122,7 +122,7 @@ func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensi // SetDeploymentCondition updates the deployment to include the provided condition. If the condition that // we are about to add already exists and has the same status and reason then we are not going to update. -func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) { +func SetDeploymentCondition(status *apps.DeploymentStatus, condition apps.DeploymentCondition) { currentCond := GetDeploymentCondition(*status, condition.Type) if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { return @@ -136,13 +136,13 @@ func SetDeploymentCondition(status *extensions.DeploymentStatus, condition exten } // RemoveDeploymentCondition removes the deployment condition with the provided type. -func RemoveDeploymentCondition(status *extensions.DeploymentStatus, condType extensions.DeploymentConditionType) { +func RemoveDeploymentCondition(status *apps.DeploymentStatus, condType apps.DeploymentConditionType) { status.Conditions = filterOutCondition(status.Conditions, condType) } // filterOutCondition returns a new slice of deployment conditions without conditions with the provided type. -func filterOutCondition(conditions []extensions.DeploymentCondition, condType extensions.DeploymentConditionType) []extensions.DeploymentCondition { - var newConditions []extensions.DeploymentCondition +func filterOutCondition(conditions []apps.DeploymentCondition, condType apps.DeploymentConditionType) []apps.DeploymentCondition { + var newConditions []apps.DeploymentCondition for _, c := range conditions { if c.Type == condType { continue @@ -154,9 +154,9 @@ func filterOutCondition(conditions []extensions.DeploymentCondition, condType ex // ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition. // Useful for promoting replica set failure conditions into deployments. -func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extensions.DeploymentCondition { - return extensions.DeploymentCondition{ - Type: extensions.DeploymentConditionType(cond.Type), +func ReplicaSetToDeploymentCondition(cond apps.ReplicaSetCondition) apps.DeploymentCondition { + return apps.DeploymentCondition{ + Type: apps.DeploymentConditionType(cond.Type), Status: cond.Status, LastTransitionTime: cond.LastTransitionTime, LastUpdateTime: cond.LastTransitionTime, @@ -166,7 +166,7 @@ func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extens } // SetDeploymentRevision updates the revision for a deployment. -func SetDeploymentRevision(deployment *extensions.Deployment, revision string) bool { +func SetDeploymentRevision(deployment *apps.Deployment, revision string) bool { updated := false if deployment.Annotations == nil { @@ -181,7 +181,7 @@ func SetDeploymentRevision(deployment *extensions.Deployment, revision string) b } // MaxRevision finds the highest revision in the replica sets -func MaxRevision(allRSs []*extensions.ReplicaSet) int64 { +func MaxRevision(allRSs []*apps.ReplicaSet) int64 { max := int64(0) for _, rs := range allRSs { if v, err := Revision(rs); err != nil { @@ -195,7 +195,7 @@ func MaxRevision(allRSs []*extensions.ReplicaSet) int64 { } // LastRevision finds the second max revision number in all replica sets (the last revision) -func LastRevision(allRSs []*extensions.ReplicaSet) int64 { +func LastRevision(allRSs []*apps.ReplicaSet) int64 { max, secMax := int64(0), int64(0) for _, rs := range allRSs { if v, err := Revision(rs); err != nil { @@ -226,7 +226,7 @@ func Revision(obj runtime.Object) (int64, error) { // SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and // copying required deployment annotations to it; it returns true if replica set's annotation is changed. -func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, newRevision string, exists bool) bool { +func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool) bool { // First, copy deployment's annotations (except for apply and revision annotations) annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS) // Then, update replica set's revision annotation @@ -283,6 +283,7 @@ var annotationsToSkip = map[string]bool{ RevisionHistoryAnnotation: true, DesiredReplicasAnnotation: true, MaxReplicasAnnotation: true, + apps.DeprecatedRollbackTo: true, } // skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key @@ -295,7 +296,7 @@ func skipCopyAnnotation(key string) bool { // copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations, // and returns true if replica set's annotation is changed. // Note that apply and revision annotations are not copied. -func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool { +func copyDeploymentAnnotationsToReplicaSet(deployment *apps.Deployment, rs *apps.ReplicaSet) bool { rsAnnotationsChanged := false if rs.Annotations == nil { rs.Annotations = make(map[string]string) @@ -316,7 +317,7 @@ func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs // SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations. // This action should be done if and only if the deployment is rolling back to this rs. // Note that apply and revision annotations are not changed. -func SetDeploymentAnnotationsTo(deployment *extensions.Deployment, rollbackToRS *extensions.ReplicaSet) { +func SetDeploymentAnnotationsTo(deployment *apps.Deployment, rollbackToRS *apps.ReplicaSet) { deployment.Annotations = getSkippedAnnotations(deployment.Annotations) for k, v := range rollbackToRS.Annotations { if !skipCopyAnnotation(k) { @@ -337,7 +338,7 @@ func getSkippedAnnotations(annotations map[string]string) map[string]string { // FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active // replica set. If there are more active replica sets, then we should proportionally scale them. -func FindActiveOrLatest(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) *extensions.ReplicaSet { +func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps.ReplicaSet { if newRS == nil && len(oldRSs) == 0 { return nil } @@ -360,15 +361,15 @@ func FindActiveOrLatest(newRS *extensions.ReplicaSet, oldRSs []*extensions.Repli } // GetDesiredReplicasAnnotation returns the number of desired replicas -func GetDesiredReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) { +func GetDesiredReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) { return getIntFromAnnotation(rs, DesiredReplicasAnnotation) } -func getMaxReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) { +func getMaxReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) { return getIntFromAnnotation(rs, MaxReplicasAnnotation) } -func getIntFromAnnotation(rs *extensions.ReplicaSet, annotationKey string) (int32, bool) { +func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool) { annotationValue, ok := rs.Annotations[annotationKey] if !ok { return int32(0), false @@ -382,7 +383,7 @@ func getIntFromAnnotation(rs *extensions.ReplicaSet, annotationKey string) (int3 } // SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations -func SetReplicasAnnotations(rs *extensions.ReplicaSet, desiredReplicas, maxReplicas int32) bool { +func SetReplicasAnnotations(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool { updated := false if rs.Annotations == nil { rs.Annotations = make(map[string]string) @@ -401,7 +402,7 @@ func SetReplicasAnnotations(rs *extensions.ReplicaSet, desiredReplicas, maxRepli } // AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated -func ReplicasAnnotationsNeedUpdate(rs *extensions.ReplicaSet, desiredReplicas, maxReplicas int32) bool { +func ReplicasAnnotationsNeedUpdate(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool { if rs.Annotations == nil { return true } @@ -417,7 +418,7 @@ func ReplicasAnnotationsNeedUpdate(rs *extensions.ReplicaSet, desiredReplicas, m } // MaxUnavailable returns the maximum unavailable pods a rolling deployment can take. -func MaxUnavailable(deployment extensions.Deployment) int32 { +func MaxUnavailable(deployment apps.Deployment) int32 { if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { return int32(0) } @@ -430,7 +431,7 @@ func MaxUnavailable(deployment extensions.Deployment) int32 { } // MinAvailable returns the minimum available pods of a given deployment -func MinAvailable(deployment *extensions.Deployment) int32 { +func MinAvailable(deployment *apps.Deployment) int32 { if !IsRollingUpdate(deployment) { return int32(0) } @@ -438,7 +439,7 @@ func MinAvailable(deployment *extensions.Deployment) int32 { } // MaxSurge returns the maximum surge pods a rolling deployment can take. -func MaxSurge(deployment extensions.Deployment) int32 { +func MaxSurge(deployment apps.Deployment) int32 { if !IsRollingUpdate(&deployment) { return int32(0) } @@ -450,7 +451,7 @@ func MaxSurge(deployment extensions.Deployment) int32 { // GetProportion will estimate the proportion for the provided replica set using 1. the current size // of the parent deployment, 2. the replica count that needs be added on the replica sets of the // deployment, and 3. the total replicas added in the replica sets of the deployment so far. -func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { +func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { return int32(0) } @@ -472,7 +473,7 @@ func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymen // getReplicaSetFraction estimates the fraction of replicas a replica set can have in // 1. a scaling event during a rollout or 2. when scaling a paused deployment. -func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) int32 { +func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 { // If we are scaling down to zero then the fraction of this replica set is its whole size (negative) if *(d.Spec.Replicas) == int32(0) { return -*(rs.Spec.Replicas) @@ -497,7 +498,7 @@ func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) in // GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. // The third returned value is the new replica set, and it may be nil if it doesn't exist yet. -func GetAllReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, *extensions.ReplicaSet, error) { +func GetAllReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) { rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) if err != nil { return nil, nil, nil, err @@ -509,7 +510,7 @@ func GetAllReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex // GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func GetOldReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { +func GetOldReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) { rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) if err != nil { return nil, nil, err @@ -520,7 +521,7 @@ func GetOldReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex // GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. // Returns nil if the new replica set doesn't exist yet. -func GetNewReplicaSet(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) (*extensions.ReplicaSet, error) { +func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) { rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) if err != nil { return nil, err @@ -529,13 +530,13 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c extensionsv1beta1.Ext } // RsListFromClient returns an rsListFunc that wraps the given client. -func RsListFromClient(c extensionsv1beta1.ExtensionsV1beta1Interface) RsListFunc { - return func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) { +func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc { + return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) { rsList, err := c.ReplicaSets(namespace).List(options) if err != nil { return nil, err } - var ret []*extensions.ReplicaSet + var ret []*apps.ReplicaSet for i := range rsList.Items { ret = append(ret, &rsList.Items[i]) } @@ -544,14 +545,14 @@ func RsListFromClient(c extensionsv1beta1.ExtensionsV1beta1Interface) RsListFunc } // TODO: switch this to full namespacers -type RsListFunc func(string, metav1.ListOptions) ([]*extensions.ReplicaSet, error) +type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error) type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error) // ListReplicaSets returns a slice of RSes the given deployment targets. // Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), // because only the controller itself should do that. // However, it does filter out anything whose ControllerRef doesn't match. -func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([]*extensions.ReplicaSet, error) { +func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) { // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. namespace := deployment.Namespace @@ -565,7 +566,7 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([ return nil, err } // Only include those whose ControllerRef matches the Deployment. - owned := make([]*extensions.ReplicaSet, 0, len(all)) + owned := make([]*apps.ReplicaSet, 0, len(all)) for _, rs := range all { if metav1.IsControlledBy(rs, deployment) { owned = append(owned, rs) @@ -603,7 +604,7 @@ func ListReplicaSetsInternal(deployment *internalextensions.Deployment, getRSLis // Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), // because only the controller itself should do that. // However, it does filter out anything whose ControllerRef doesn't match. -func ListPods(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) { +func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) { namespace := deployment.Namespace selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { @@ -640,13 +641,13 @@ func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool { t1Copy := template1.DeepCopy() t2Copy := template2.DeepCopy() // Remove hash labels from template.Labels before comparing - delete(t1Copy.Labels, extensions.DefaultDeploymentUniqueLabelKey) - delete(t2Copy.Labels, extensions.DefaultDeploymentUniqueLabelKey) + delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) } // FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). -func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) *extensions.ReplicaSet { +func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet { sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList)) for i := range rsList { if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { @@ -663,9 +664,9 @@ func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.R // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet) { - var requiredRSs []*extensions.ReplicaSet - var allRSs []*extensions.ReplicaSet +func FindOldReplicaSets(deployment *apps.Deployment, rsList []*apps.ReplicaSet) ([]*apps.ReplicaSet, []*apps.ReplicaSet) { + var requiredRSs []*apps.ReplicaSet + var allRSs []*apps.ReplicaSet newRS := FindNewReplicaSet(deployment, rsList) for _, rs := range rsList { // Filter out new replica set @@ -681,17 +682,17 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions. } // SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment. -func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template v1.PodTemplateSpec) *extensions.Deployment { +func SetFromReplicaSetTemplate(deployment *apps.Deployment, template v1.PodTemplateSpec) *apps.Deployment { deployment.Spec.Template.ObjectMeta = template.ObjectMeta deployment.Spec.Template.Spec = template.Spec deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel( deployment.Spec.Template.ObjectMeta.Labels, - extensions.DefaultDeploymentUniqueLabelKey) + apps.DefaultDeploymentUniqueLabelKey) return deployment } // GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets. -func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { +func GetReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { totalReplicas := int32(0) for _, rs := range replicaSets { if rs != nil { @@ -702,7 +703,7 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { } // GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. -func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { +func GetActualReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { totalActualReplicas := int32(0) for _, rs := range replicaSets { if rs != nil { @@ -713,7 +714,7 @@ func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) i } // GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets. -func GetReadyReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { +func GetReadyReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { totalReadyReplicas := int32(0) for _, rs := range replicaSets { if rs != nil { @@ -724,7 +725,7 @@ func GetReadyReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) in } // GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets. -func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { +func GetAvailableReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { totalAvailableReplicas := int32(0) for _, rs := range replicaSets { if rs != nil { @@ -735,13 +736,13 @@ func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet } // IsRollingUpdate returns true if the strategy type is a rolling update. -func IsRollingUpdate(deployment *extensions.Deployment) bool { - return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType +func IsRollingUpdate(deployment *apps.Deployment) bool { + return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType } // DeploymentComplete considers a deployment to be complete once all of its desired replicas // are updated and available, and no old pods are running. -func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { +func DeploymentComplete(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool { return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) && newStatus.Replicas == *(deployment.Spec.Replicas) && newStatus.AvailableReplicas == *(deployment.Spec.Replicas) && @@ -752,7 +753,7 @@ func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions // current with the new status of the deployment that the controller is observing. More specifically, // when new pods are scaled up or become ready or available, or old pods are scaled down, then we // consider the deployment is progressing. -func DeploymentProgressing(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { +func DeploymentProgressing(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool { oldStatus := deployment.Status // Old replicas that need to be scaled down @@ -771,7 +772,7 @@ var nowFn = func() time.Time { return time.Now() } // DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress // is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already // exists. -func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { +func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool { if deployment.Spec.ProgressDeadlineSeconds == nil { return false } @@ -779,7 +780,7 @@ func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions // Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress. // If it's already set with a TimedOutReason reason, we have already timed out, no need to check // again. - condition := GetDeploymentCondition(*newStatus, extensions.DeploymentProgressing) + condition := GetDeploymentCondition(*newStatus, apps.DeploymentProgressing) if condition == nil { return false } @@ -817,9 +818,9 @@ func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions // When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. // 1) The new RS is saturated: newRS's replicas == deployment's replicas // 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas -func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) { +func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) (int32, error) { switch deployment.Spec.Strategy.Type { - case extensions.RollingUpdateDeploymentStrategyType: + case apps.RollingUpdateDeploymentStrategyType: // Check if we can scale up. maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true) if err != nil { @@ -837,7 +838,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re // Do not exceed the number of desired replicas. scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas)))) return *(newRS.Spec.Replicas) + scaleUpCount, nil - case extensions.RecreateDeploymentStrategyType: + case apps.RecreateDeploymentStrategyType: return *(deployment.Spec.Replicas), nil default: return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) @@ -848,7 +849,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re // Both the deployment and the replica set have to believe this replica set can own all of the desired // replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet // need to be available. -func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool { +func IsSaturated(deployment *apps.Deployment, rs *apps.ReplicaSet) bool { if rs == nil { return false } @@ -864,7 +865,7 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b // WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. // Returns error if polling timesout. -func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { +func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { // TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface. return wait.PollImmediate(interval, timeout, func() (bool, error) { deployment, err := getDeploymentFunc() diff --git a/pkg/controller/deployment/util/deployment_util_test.go b/pkg/controller/deployment/util/deployment_util_test.go index 0f71f8ae763..1d90e848d0d 100644 --- a/pkg/controller/deployment/util/deployment_util_test.go +++ b/pkg/controller/deployment/util/deployment_util_test.go @@ -25,8 +25,8 @@ import ( "testing" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -53,7 +53,7 @@ func addListPodsReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Cl } func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { - rsList, ok := obj.(*extensions.ReplicaSetList) + rsList, ok := obj.(*apps.ReplicaSetList) fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { name := action.(core.GetAction).GetName() if ok { @@ -71,7 +71,7 @@ func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clien func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset { fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - obj := action.(core.UpdateAction).GetObject().(*extensions.ReplicaSet) + obj := action.(core.UpdateAction).GetObject().(*apps.ReplicaSet) return true, obj, nil }) return fakeClient @@ -85,13 +85,13 @@ func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset { return fakeClient } -func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet { - return extensions.ReplicaSet{ +func generateRSWithLabel(labels map[string]string, image string) apps.ReplicaSet { + return apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: names.SimpleNameGenerator.GenerateName("replicaset"), Labels: labels, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: func(i int32) *int32 { return &i }(1), Selector: &metav1.LabelSelector{MatchLabels: labels}, Template: v1.PodTemplateSpec{ @@ -113,10 +113,10 @@ func generateRSWithLabel(labels map[string]string, image string) extensions.Repl } } -func newDControllerRef(d *extensions.Deployment) *metav1.OwnerReference { +func newDControllerRef(d *apps.Deployment) *metav1.OwnerReference { isController := true return &metav1.OwnerReference{ - APIVersion: "extensions/v1beta1", + APIVersion: "apps/v1", Kind: "Deployment", Name: d.GetName(), UID: d.GetUID(), @@ -125,16 +125,16 @@ func newDControllerRef(d *extensions.Deployment) *metav1.OwnerReference { } // generateRS creates a replica set, with the input deployment's template as its template -func generateRS(deployment extensions.Deployment) extensions.ReplicaSet { +func generateRS(deployment apps.Deployment) apps.ReplicaSet { template := deployment.Spec.Template.DeepCopy() - return extensions.ReplicaSet{ + return apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ UID: randomUID(), Name: names.SimpleNameGenerator.GenerateName("replicaset"), Labels: template.Labels, OwnerReferences: []metav1.OwnerReference{*newDControllerRef(&deployment)}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: new(int32), Template: *template, Selector: &metav1.LabelSelector{MatchLabels: template.Labels}, @@ -147,15 +147,15 @@ func randomUID() types.UID { } // generateDeployment creates a deployment, with the input image as its template -func generateDeployment(image string) extensions.Deployment { +func generateDeployment(image string) apps.Deployment { podLabels := map[string]string{"name": image} terminationSec := int64(30) - return extensions.Deployment{ + return apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: image, Annotations: make(map[string]string), }, - Spec: extensions.DeploymentSpec{ + Spec: apps.DeploymentSpec{ Replicas: func(i int32) *int32 { return &i }(1), Selector: &metav1.LabelSelector{MatchLabels: podLabels}, Template: v1.PodTemplateSpec{ @@ -188,14 +188,14 @@ func TestGetNewRS(t *testing.T) { tests := []struct { Name string objs []runtime.Object - expected *extensions.ReplicaSet + expected *apps.ReplicaSet }{ { "No new ReplicaSet", []runtime.Object{ &v1.PodList{}, - &extensions.ReplicaSetList{ - Items: []extensions.ReplicaSet{ + &apps.ReplicaSetList{ + Items: []apps.ReplicaSet{ generateRS(generateDeployment("foo")), generateRS(generateDeployment("bar")), }, @@ -207,8 +207,8 @@ func TestGetNewRS(t *testing.T) { "Has new ReplicaSet", []runtime.Object{ &v1.PodList{}, - &extensions.ReplicaSetList{ - Items: []extensions.ReplicaSet{ + &apps.ReplicaSetList{ + Items: []apps.ReplicaSet{ generateRS(generateDeployment("foo")), generateRS(generateDeployment("bar")), generateRS(generateDeployment("abc")), @@ -228,7 +228,7 @@ func TestGetNewRS(t *testing.T) { fakeClient = addListRSReactor(fakeClient, test.objs[1]) fakeClient = addUpdatePodsReactor(fakeClient) fakeClient = addUpdateRSReactor(fakeClient) - rs, err := GetNewReplicaSet(&newDeployment, fakeClient.ExtensionsV1beta1()) + rs, err := GetNewReplicaSet(&newDeployment, fakeClient.AppsV1()) if err != nil { t.Errorf("In test case %s, got unexpected error %v", test.Name, err) } @@ -262,13 +262,13 @@ func TestGetOldRSs(t *testing.T) { tests := []struct { Name string objs []runtime.Object - expected []*extensions.ReplicaSet + expected []*apps.ReplicaSet }{ { "No old ReplicaSets", []runtime.Object{ - &extensions.ReplicaSetList{ - Items: []extensions.ReplicaSet{ + &apps.ReplicaSetList{ + Items: []apps.ReplicaSet{ generateRS(generateDeployment("foo")), newRS, generateRS(generateDeployment("bar")), @@ -280,8 +280,8 @@ func TestGetOldRSs(t *testing.T) { { "Has old ReplicaSet", []runtime.Object{ - &extensions.ReplicaSetList{ - Items: []extensions.ReplicaSet{ + &apps.ReplicaSetList{ + Items: []apps.ReplicaSet{ oldRS2, oldRS, existedRS, @@ -291,7 +291,7 @@ func TestGetOldRSs(t *testing.T) { }, }, }, - []*extensions.ReplicaSet{&oldRS, &oldRS2}, + []*apps.ReplicaSet{&oldRS, &oldRS2}, }, } @@ -301,7 +301,7 @@ func TestGetOldRSs(t *testing.T) { fakeClient = addListRSReactor(fakeClient, test.objs[0]) fakeClient = addGetRSReactor(fakeClient, test.objs[0]) fakeClient = addUpdateRSReactor(fakeClient) - _, rss, err := GetOldReplicaSets(&newDeployment, fakeClient.ExtensionsV1beta1()) + _, rss, err := GetOldReplicaSets(&newDeployment, fakeClient.AppsV1()) if err != nil { t.Errorf("In test case %s, got unexpected error %v", test.Name, err) } @@ -340,56 +340,56 @@ func TestEqualIgnoreHash(t *testing.T) { }{ { "Same spec, same labels", - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), true, }, { "Same spec, only pod-template-hash label value is different", - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), true, }, { "Same spec, the former doesn't have pod-template-hash label", generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), true, }, { "Same spec, the label is different, the former doesn't have pod-template-hash label, same number of labels", generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2"}), false, }, { "Same spec, the label is different, the latter doesn't have pod-template-hash label, same number of labels", - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1"}), generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}), false, }, { "Same spec, the label is different, and the pod-template-hash label value is the same", - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), false, }, { "Different spec, same labels", - generatePodTemplateSpec("foo", "foo-node", map[string]string{"former": "value"}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{"latter": "value"}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{"former": "value"}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{"latter": "value"}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), false, }, { "Different spec, different pod-template-hash label value", - generatePodTemplateSpec("foo-1", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), - generatePodTemplateSpec("foo-2", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), + generatePodTemplateSpec("foo-1", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo-2", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), false, }, { "Different spec, the former doesn't have pod-template-hash label", generatePodTemplateSpec("foo-1", "foo-node-1", map[string]string{}, map[string]string{"something": "else"}), - generatePodTemplateSpec("foo-2", "foo-node-2", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), + generatePodTemplateSpec("foo-2", "foo-node-2", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), false, }, { @@ -431,11 +431,11 @@ func TestFindNewReplicaSet(t *testing.T) { deployment := generateDeployment("nginx") newRS := generateRS(deployment) - newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "hash" + newRS.Labels[apps.DefaultDeploymentUniqueLabelKey] = "hash" newRS.CreationTimestamp = later newRSDup := generateRS(deployment) - newRSDup.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "different-hash" + newRSDup.Labels[apps.DefaultDeploymentUniqueLabelKey] = "different-hash" newRSDup.CreationTimestamp = now oldDeployment := generateDeployment("nginx") @@ -445,26 +445,26 @@ func TestFindNewReplicaSet(t *testing.T) { tests := []struct { Name string - deployment extensions.Deployment - rsList []*extensions.ReplicaSet - expected *extensions.ReplicaSet + deployment apps.Deployment + rsList []*apps.ReplicaSet + expected *apps.ReplicaSet }{ { Name: "Get new ReplicaSet with the same template as Deployment spec but different pod-template-hash value", deployment: deployment, - rsList: []*extensions.ReplicaSet{&newRS, &oldRS}, + rsList: []*apps.ReplicaSet{&newRS, &oldRS}, expected: &newRS, }, { Name: "Get the oldest new ReplicaSet when there are more than one ReplicaSet with the same template", deployment: deployment, - rsList: []*extensions.ReplicaSet{&newRS, &oldRS, &newRSDup}, + rsList: []*apps.ReplicaSet{&newRS, &oldRS, &newRSDup}, expected: &newRSDup, }, { Name: "Get nil new ReplicaSet", deployment: deployment, - rsList: []*extensions.ReplicaSet{&oldRS}, + rsList: []*apps.ReplicaSet{&oldRS}, expected: nil, }, } @@ -486,11 +486,11 @@ func TestFindOldReplicaSets(t *testing.T) { deployment := generateDeployment("nginx") newRS := generateRS(deployment) *(newRS.Spec.Replicas) = 1 - newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "hash" + newRS.Labels[apps.DefaultDeploymentUniqueLabelKey] = "hash" newRS.CreationTimestamp = later newRSDup := generateRS(deployment) - newRSDup.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "different-hash" + newRSDup.Labels[apps.DefaultDeploymentUniqueLabelKey] = "different-hash" newRSDup.CreationTimestamp = now oldDeployment := generateDeployment("nginx") @@ -501,37 +501,37 @@ func TestFindOldReplicaSets(t *testing.T) { tests := []struct { Name string - deployment extensions.Deployment - rsList []*extensions.ReplicaSet + deployment apps.Deployment + rsList []*apps.ReplicaSet podList *v1.PodList - expected []*extensions.ReplicaSet - expectedRequire []*extensions.ReplicaSet + expected []*apps.ReplicaSet + expectedRequire []*apps.ReplicaSet }{ { Name: "Get old ReplicaSets", deployment: deployment, - rsList: []*extensions.ReplicaSet{&newRS, &oldRS}, - expected: []*extensions.ReplicaSet{&oldRS}, + rsList: []*apps.ReplicaSet{&newRS, &oldRS}, + expected: []*apps.ReplicaSet{&oldRS}, expectedRequire: nil, }, { Name: "Get old ReplicaSets with no new ReplicaSet", deployment: deployment, - rsList: []*extensions.ReplicaSet{&oldRS}, - expected: []*extensions.ReplicaSet{&oldRS}, + rsList: []*apps.ReplicaSet{&oldRS}, + expected: []*apps.ReplicaSet{&oldRS}, expectedRequire: nil, }, { Name: "Get old ReplicaSets with two new ReplicaSets, only the oldest new ReplicaSet is seen as new ReplicaSet", deployment: deployment, - rsList: []*extensions.ReplicaSet{&oldRS, &newRS, &newRSDup}, - expected: []*extensions.ReplicaSet{&oldRS, &newRS}, - expectedRequire: []*extensions.ReplicaSet{&newRS}, + rsList: []*apps.ReplicaSet{&oldRS, &newRS, &newRSDup}, + expected: []*apps.ReplicaSet{&oldRS, &newRS}, + expectedRequire: []*apps.ReplicaSet{&newRS}, }, { Name: "Get empty old ReplicaSets", deployment: deployment, - rsList: []*extensions.ReplicaSet{&newRS}, + rsList: []*apps.ReplicaSet{&newRS}, expected: nil, expectedRequire: nil, }, @@ -554,7 +554,7 @@ func TestFindOldReplicaSets(t *testing.T) { } // equal compares the equality of two ReplicaSet slices regardless of their ordering -func equal(rss1, rss2 []*extensions.ReplicaSet) bool { +func equal(rss1, rss2 []*apps.ReplicaSet) bool { if reflect.DeepEqual(rss1, rss2) { return true } @@ -583,19 +583,19 @@ func TestGetReplicaCountForReplicaSets(t *testing.T) { tests := []struct { Name string - sets []*extensions.ReplicaSet + sets []*apps.ReplicaSet expectedCount int32 expectedActual int32 }{ { "1:2 Replicas", - []*extensions.ReplicaSet{&rs1}, + []*apps.ReplicaSet{&rs1}, 1, 2, }, { "3:5 Replicas", - []*extensions.ReplicaSet{&rs1, &rs2}, + []*apps.ReplicaSet{&rs1, &rs2}, 3, 5, }, @@ -679,7 +679,7 @@ func TestResolveFenceposts(t *testing.T) { func TestNewRSNewReplicas(t *testing.T) { tests := []struct { Name string - strategyType extensions.DeploymentStrategyType + strategyType apps.DeploymentStrategyType depReplicas int32 newRSReplicas int32 maxSurge int @@ -687,17 +687,17 @@ func TestNewRSNewReplicas(t *testing.T) { }{ { "can not scale up - to newRSReplicas", - extensions.RollingUpdateDeploymentStrategyType, + apps.RollingUpdateDeploymentStrategyType, 1, 5, 1, 5, }, { "scale up - to depReplicas", - extensions.RollingUpdateDeploymentStrategyType, + apps.RollingUpdateDeploymentStrategyType, 6, 2, 10, 6, }, { "recreate - to depReplicas", - extensions.RecreateDeploymentStrategyType, + apps.RecreateDeploymentStrategyType, 3, 1, 1, 3, }, } @@ -709,8 +709,8 @@ func TestNewRSNewReplicas(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { *(newDeployment.Spec.Replicas) = test.depReplicas - newDeployment.Spec.Strategy = extensions.DeploymentStrategy{Type: test.strategyType} - newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{ + newDeployment.Spec.Strategy = apps.DeploymentStrategy{Type: test.strategyType} + newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{ MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i) return &x @@ -721,7 +721,7 @@ func TestNewRSNewReplicas(t *testing.T) { }(test.maxSurge), } *(newRC.Spec.Replicas) = test.newRSReplicas - rs, err := NewRSNewReplicas(&newDeployment, []*extensions.ReplicaSet{&rs5}, &newRC) + rs, err := NewRSNewReplicas(&newDeployment, []*apps.ReplicaSet{&rs5}, &newRC) if err != nil { t.Errorf("In test case %s, got unexpected error %v", test.Name, err) } @@ -733,33 +733,33 @@ func TestNewRSNewReplicas(t *testing.T) { } var ( - condProgressing = func() extensions.DeploymentCondition { - return extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + condProgressing = func() apps.DeploymentCondition { + return apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionFalse, Reason: "ForSomeReason", } } - condProgressing2 = func() extensions.DeploymentCondition { - return extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + condProgressing2 = func() apps.DeploymentCondition { + return apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionTrue, Reason: "BecauseItIs", } } - condAvailable = func() extensions.DeploymentCondition { - return extensions.DeploymentCondition{ - Type: extensions.DeploymentAvailable, + condAvailable = func() apps.DeploymentCondition { + return apps.DeploymentCondition{ + Type: apps.DeploymentAvailable, Status: v1.ConditionTrue, Reason: "AwesomeController", } } - status = func() *extensions.DeploymentStatus { - return &extensions.DeploymentStatus{ - Conditions: []extensions.DeploymentCondition{condProgressing(), condAvailable()}, + status = func() *apps.DeploymentStatus { + return &apps.DeploymentStatus{ + Conditions: []apps.DeploymentCondition{condProgressing(), condAvailable()}, } } ) @@ -770,8 +770,8 @@ func TestGetCondition(t *testing.T) { tests := []struct { name string - status extensions.DeploymentStatus - condType extensions.DeploymentConditionType + status apps.DeploymentStatus + condType apps.DeploymentConditionType expected bool }{ @@ -779,7 +779,7 @@ func TestGetCondition(t *testing.T) { name: "condition exists", status: *exampleStatus, - condType: extensions.DeploymentAvailable, + condType: apps.DeploymentAvailable, expected: true, }, @@ -787,7 +787,7 @@ func TestGetCondition(t *testing.T) { name: "condition does not exist", status: *exampleStatus, - condType: extensions.DeploymentReplicaFailure, + condType: apps.DeploymentReplicaFailure, expected: false, }, @@ -808,23 +808,23 @@ func TestSetCondition(t *testing.T) { tests := []struct { name string - status *extensions.DeploymentStatus - cond extensions.DeploymentCondition + status *apps.DeploymentStatus + cond apps.DeploymentCondition - expectedStatus *extensions.DeploymentStatus + expectedStatus *apps.DeploymentStatus }{ { name: "set for the first time", - status: &extensions.DeploymentStatus{}, + status: &apps.DeploymentStatus{}, cond: condAvailable(), - expectedStatus: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condAvailable()}}, + expectedStatus: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condAvailable()}}, }, { name: "simple set", - status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}}, + status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}}, cond: condAvailable(), expectedStatus: status(), @@ -832,10 +832,10 @@ func TestSetCondition(t *testing.T) { { name: "overwrite", - status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}}, + status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}}, cond: condProgressing2(), - expectedStatus: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing2()}}, + expectedStatus: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing2()}}, }, } @@ -853,32 +853,32 @@ func TestRemoveCondition(t *testing.T) { tests := []struct { name string - status *extensions.DeploymentStatus - condType extensions.DeploymentConditionType + status *apps.DeploymentStatus + condType apps.DeploymentConditionType - expectedStatus *extensions.DeploymentStatus + expectedStatus *apps.DeploymentStatus }{ { name: "remove from empty status", - status: &extensions.DeploymentStatus{}, - condType: extensions.DeploymentProgressing, + status: &apps.DeploymentStatus{}, + condType: apps.DeploymentProgressing, - expectedStatus: &extensions.DeploymentStatus{}, + expectedStatus: &apps.DeploymentStatus{}, }, { name: "simple remove", - status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}}, - condType: extensions.DeploymentProgressing, + status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}}, + condType: apps.DeploymentProgressing, - expectedStatus: &extensions.DeploymentStatus{}, + expectedStatus: &apps.DeploymentStatus{}, }, { name: "doesn't remove anything", status: status(), - condType: extensions.DeploymentReplicaFailure, + condType: apps.DeploymentReplicaFailure, expectedStatus: status(), }, @@ -895,19 +895,19 @@ func TestRemoveCondition(t *testing.T) { } func TestDeploymentComplete(t *testing.T) { - deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *extensions.Deployment { - return &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *apps.Deployment { + return &apps.Deployment{ + Spec: apps.DeploymentSpec{ Replicas: &desired, - Strategy: extensions.DeploymentStrategy{ - RollingUpdate: &extensions.RollingUpdateDeployment{ + Strategy: apps.DeploymentStrategy{ + RollingUpdate: &apps.RollingUpdateDeployment{ MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)), MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxSurge)), }, - Type: extensions.RollingUpdateDeploymentStrategyType, + Type: apps.RollingUpdateDeploymentStrategyType, }, }, - Status: extensions.DeploymentStatus{ + Status: apps.DeploymentStatus{ Replicas: current, UpdatedReplicas: updated, AvailableReplicas: available, @@ -918,7 +918,7 @@ func TestDeploymentComplete(t *testing.T) { tests := []struct { name string - d *extensions.Deployment + d *apps.Deployment expected bool }{ @@ -972,9 +972,9 @@ func TestDeploymentComplete(t *testing.T) { } func TestDeploymentProgressing(t *testing.T) { - deployment := func(current, updated, ready, available int32) *extensions.Deployment { - return &extensions.Deployment{ - Status: extensions.DeploymentStatus{ + deployment := func(current, updated, ready, available int32) *apps.Deployment { + return &apps.Deployment{ + Status: apps.DeploymentStatus{ Replicas: current, UpdatedReplicas: updated, ReadyReplicas: ready, @@ -982,8 +982,8 @@ func TestDeploymentProgressing(t *testing.T) { }, } } - newStatus := func(current, updated, ready, available int32) extensions.DeploymentStatus { - return extensions.DeploymentStatus{ + newStatus := func(current, updated, ready, available int32) apps.DeploymentStatus { + return apps.DeploymentStatus{ Replicas: current, UpdatedReplicas: updated, ReadyReplicas: ready, @@ -994,8 +994,8 @@ func TestDeploymentProgressing(t *testing.T) { tests := []struct { name string - d *extensions.Deployment - newStatus extensions.DeploymentStatus + d *apps.Deployment + newStatus apps.DeploymentStatus expected bool }{ @@ -1075,13 +1075,13 @@ func TestDeploymentTimedOut(t *testing.T) { timeFn := func(min, sec int) time.Time { return time.Date(2016, 1, 1, 0, min, sec, 0, time.UTC) } - deployment := func(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason string, pds *int32, from time.Time) extensions.Deployment { - return extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + deployment := func(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason string, pds *int32, from time.Time) apps.Deployment { + return apps.Deployment{ + Spec: apps.DeploymentSpec{ ProgressDeadlineSeconds: pds, }, - Status: extensions.DeploymentStatus{ - Conditions: []extensions.DeploymentCondition{ + Status: apps.DeploymentStatus{ + Conditions: []apps.DeploymentCondition{ { Type: condType, Status: status, @@ -1096,7 +1096,7 @@ func TestDeploymentTimedOut(t *testing.T) { tests := []struct { name string - d extensions.Deployment + d apps.Deployment nowFn func() time.Time expected bool @@ -1104,28 +1104,28 @@ func TestDeploymentTimedOut(t *testing.T) { { name: "no progressDeadlineSeconds specified - no timeout", - d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)), + d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)), nowFn: func() time.Time { return timeFn(1, 20) }, expected: false, }, { name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s", - d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 9)), + d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 9)), nowFn: func() time.Time { return timeFn(1, 20) }, expected: true, }, { name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:11 => 9s", - d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 11)), + d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 11)), nowFn: func() time.Time { return timeFn(1, 20) }, expected: false, }, { name: "previous status was a complete deployment", - d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, NewRSAvailableReason, nil, time.Time{}), + d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, NewRSAvailableReason, nil, time.Time{}), expected: false, }, } @@ -1141,23 +1141,23 @@ func TestDeploymentTimedOut(t *testing.T) { } func TestMaxUnavailable(t *testing.T) { - deployment := func(replicas int32, maxUnavailable intstr.IntOrString) extensions.Deployment { - return extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + deployment := func(replicas int32, maxUnavailable intstr.IntOrString) apps.Deployment { + return apps.Deployment{ + Spec: apps.DeploymentSpec{ Replicas: func(i int32) *int32 { return &i }(replicas), - Strategy: extensions.DeploymentStrategy{ - RollingUpdate: &extensions.RollingUpdateDeployment{ + Strategy: apps.DeploymentStrategy{ + RollingUpdate: &apps.RollingUpdateDeployment{ MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1)), MaxUnavailable: &maxUnavailable, }, - Type: extensions.RollingUpdateDeploymentStrategyType, + Type: apps.RollingUpdateDeploymentStrategyType, }, }, } } tests := []struct { name string - deployment extensions.Deployment + deployment apps.Deployment expected int32 }{ { @@ -1182,10 +1182,10 @@ func TestMaxUnavailable(t *testing.T) { }, { name: "maxUnavailable with Recreate deployment strategy", - deployment: extensions.Deployment{ - Spec: extensions.DeploymentSpec{ - Strategy: extensions.DeploymentStrategy{ - Type: extensions.RecreateDeploymentStrategyType, + deployment: apps.Deployment{ + Spec: apps.DeploymentSpec{ + Strategy: apps.DeploymentStrategy{ + Type: apps.RecreateDeploymentStrategyType, }, }, }, @@ -1285,14 +1285,14 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { tests := []struct { name string - replicaSet *extensions.ReplicaSet + replicaSet *apps.ReplicaSet expected bool }{ { name: "test Annotations nil", - replicaSet: &extensions.ReplicaSet{ + replicaSet: &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"}, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -1300,13 +1300,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { }, { name: "test desiredReplicas update", - replicaSet: &extensions.ReplicaSet{ + replicaSet: &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "test", Annotations: map[string]string{DesiredReplicasAnnotation: "8", MaxReplicasAnnotation: maxReplicas}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -1314,13 +1314,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { }, { name: "test maxReplicas update", - replicaSet: &extensions.ReplicaSet{ + replicaSet: &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "test", Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: "16"}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -1328,13 +1328,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { }, { name: "test needn't update", - replicaSet: &extensions.ReplicaSet{ + replicaSet: &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "test", Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: maxReplicas}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, diff --git a/pkg/controller/deployment/util/replicaset_util.go b/pkg/controller/deployment/util/replicaset_util.go index 83e68a0a209..bf05a59278a 100644 --- a/pkg/controller/deployment/util/replicaset_util.go +++ b/pkg/controller/deployment/util/replicaset_util.go @@ -19,21 +19,21 @@ package util import ( "github.com/golang/glog" - extensions "k8s.io/api/extensions/v1beta1" + apps "k8s.io/api/apps/v1" errorsutil "k8s.io/apimachinery/pkg/util/errors" - unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" + appslisters "k8s.io/client-go/listers/apps/v1" "k8s.io/client-go/util/retry" ) // TODO: use client library instead when it starts to support update retries // see https://github.com/kubernetes/kubernetes/issues/21479 -type updateRSFunc func(rs *extensions.ReplicaSet) error +type updateRSFunc func(rs *apps.ReplicaSet) error // UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored. // The returned bool value can be used to tell if the RS is actually updated. -func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister extensionslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) { - var rs *extensions.ReplicaSet +func UpdateRSWithRetries(rsClient appsclient.ReplicaSetInterface, rsLister appslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*apps.ReplicaSet, error) { + var rs *apps.ReplicaSet retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error