mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 07:27:21 +00:00
cmd/kube-controller-manager
This commit is contained in:
@@ -27,12 +27,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
@@ -91,14 +91,14 @@ func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: client.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.Core().Events("")})
|
||||
|
||||
if client != nil && client.Core().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter())
|
||||
}
|
||||
dc := &DeploymentController{
|
||||
client: client,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "deployment-controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
|
||||
}
|
||||
// A number of things could affect the old deployment: labels changing,
|
||||
// pod template changing, etc.
|
||||
if !api.Semantic.DeepEqual(oldRS, curRS) {
|
||||
if !v1.Semantic.DeepEqual(oldRS, curRS) {
|
||||
if oldD := dc.getDeploymentForReplicaSet(oldRS); oldD != nil {
|
||||
dc.enqueueDeployment(oldD)
|
||||
}
|
||||
@@ -333,7 +333,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
|
||||
everything := unversioned.LabelSelector{}
|
||||
if reflect.DeepEqual(d.Spec.Selector, &everything) {
|
||||
dc.eventRecorder.Eventf(d, api.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
||||
if d.Status.ObservedGeneration < d.Generation {
|
||||
d.Status.ObservedGeneration = d.Generation
|
||||
dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
|
||||
@@ -347,7 +347,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
|
||||
// Handle overlapping deployments by deterministically avoid syncing deployments that fight over ReplicaSets.
|
||||
if err = dc.handleOverlap(d); err != nil {
|
||||
dc.eventRecorder.Eventf(d, api.EventTypeWarning, "SelectorOverlap", err.Error())
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectorOverlap", err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -20,11 +20,11 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -41,15 +41,15 @@ var (
|
||||
|
||||
func rs(name string, replicas int, selector map[string]string, timestamp unversioned.Time) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
CreationTimestamp: timestamp,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: int32(replicas),
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: selector},
|
||||
Template: api.PodTemplateSpec{},
|
||||
Template: v1.PodTemplateSpec{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -65,24 +65,27 @@ func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map
|
||||
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment {
|
||||
d := extensions.Deployment{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(extensions.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{},
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{
|
||||
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
|
||||
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
|
||||
},
|
||||
},
|
||||
Replicas: int32(replicas),
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: selector},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: selector,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
},
|
||||
@@ -93,22 +96,22 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
||||
},
|
||||
}
|
||||
if maxSurge != nil {
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = *maxSurge
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = maxSurge
|
||||
}
|
||||
if maxUnavailable != nil {
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = *maxUnavailable
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = maxUnavailable
|
||||
}
|
||||
return &d
|
||||
}
|
||||
|
||||
func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: int32(replicas),
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Template: d.Spec.Template,
|
||||
},
|
||||
}
|
||||
@@ -130,7 +133,7 @@ type fixture struct {
|
||||
// Objects to put in the store.
|
||||
dLister []*extensions.Deployment
|
||||
rsLister []*extensions.ReplicaSet
|
||||
podLister []*api.Pod
|
||||
podLister []*v1.Pod
|
||||
|
||||
// Actions expected to happen on the client. Objects from here are also
|
||||
// preloaded into NewSimpleFake.
|
||||
@@ -161,7 +164,7 @@ func newFixture(t *testing.T) *fixture {
|
||||
|
||||
func (f *fixture) run(deploymentName string) {
|
||||
f.client = fake.NewSimpleClientset(f.objects...)
|
||||
informers := informers.NewSharedInformerFactory(f.client, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
|
||||
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
|
||||
c.eventRecorder = &record.FakeRecorder{}
|
||||
c.dListerSynced = alwaysReady
|
||||
@@ -234,7 +237,7 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
||||
// issue: https://github.com/kubernetes/kubernetes/issues/23218
|
||||
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
|
||||
fake := &fake.Clientset{}
|
||||
informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
|
||||
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
|
||||
controller.eventRecorder = &record.FakeRecorder{}
|
||||
controller.dListerSynced = alwaysReady
|
||||
|
||||
@@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
@@ -95,14 +95,14 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
// Update the deployment conditions with a message for the new replica set that
|
||||
// was successfully deployed. If the condition already exists, we ignore this update.
|
||||
msg := fmt.Sprintf("Replica set %q has successfully progressed.", newRS.Name)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, util.NewRSAvailableReason, msg)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
|
||||
util.SetDeploymentCondition(&newStatus, *condition)
|
||||
|
||||
case util.DeploymentProgressing(d, &newStatus):
|
||||
// If there is any progress made, continue by not checking if the deployment failed. This
|
||||
// behavior emulates the rolling updater progressDeadline check.
|
||||
msg := fmt.Sprintf("Replica set %q is progressing.", newRS.Name)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
|
||||
// Update the current Progressing condition or add a new one if it doesn't exist.
|
||||
// If a Progressing condition with status=true already exists, we should update
|
||||
// everything but lastTransitionTime. SetDeploymentCondition already does that but
|
||||
@@ -111,7 +111,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
// update with the same reason and change just lastUpdateTime iff we notice any
|
||||
// progress. That's why we handle it here.
|
||||
if currentCond != nil {
|
||||
if currentCond.Status == api.ConditionTrue {
|
||||
if currentCond.Status == v1.ConditionTrue {
|
||||
condition.LastTransitionTime = currentCond.LastTransitionTime
|
||||
}
|
||||
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
|
||||
@@ -122,7 +122,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
// Update the deployment with a timeout condition. If the condition already exists,
|
||||
// we ignore this update.
|
||||
msg := fmt.Sprintf("Replica set %q has timed out progressing.", newRS.Name)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionFalse, util.TimedOutReason, msg)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
|
||||
util.SetDeploymentCondition(&newStatus, *condition)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ package deployment
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/retry"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -82,7 +82,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*ext
|
||||
for i := range oldRSs {
|
||||
rs := oldRSs[i]
|
||||
// Scaling not required.
|
||||
if rs.Spec.Replicas == 0 {
|
||||
if *(rs.Spec.Replicas) == 0 {
|
||||
continue
|
||||
}
|
||||
scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(rs, 0, deployment)
|
||||
@@ -104,7 +104,7 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
|
||||
rs := oldRSs[i]
|
||||
desiredGeneration := rs.Generation
|
||||
observedGeneration := rs.Status.ObservedGeneration
|
||||
specReplicas := rs.Spec.Replicas
|
||||
specReplicas := *(rs.Spec.Replicas)
|
||||
statusReplicas := rs.Status.Replicas
|
||||
|
||||
if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
|
||||
@@ -113,13 +113,13 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
|
||||
return false, err
|
||||
}
|
||||
|
||||
specReplicas = replicaSet.Spec.Replicas
|
||||
specReplicas = *(replicaSet.Spec.Replicas)
|
||||
statusReplicas = replicaSet.Status.Replicas
|
||||
observedGeneration = replicaSet.Status.ObservedGeneration
|
||||
|
||||
// TODO: We also need to wait for terminating replicas to actually terminate.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/32567
|
||||
return observedGeneration >= desiredGeneration && replicaSet.Spec.Replicas == 0 && replicaSet.Status.Replicas == 0, nil
|
||||
return observedGeneration >= desiredGeneration && *(replicaSet.Spec.Replicas) == 0 && replicaSet.Status.Replicas == 0, nil
|
||||
}); err != nil {
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("replica set %q never became inactive: synced=%t, spec.replicas=%d, status.replicas=%d",
|
||||
@@ -133,6 +133,6 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
|
||||
|
||||
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate"
|
||||
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, deployment.Spec.Replicas, deployment)
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
|
||||
return scaled, err
|
||||
}
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
@@ -91,11 +91,11 @@ func (dc *DeploymentController) rollbackToTemplate(deployment *extensions.Deploy
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) emitRollbackWarningEvent(deployment *extensions.Deployment, reason, message string) {
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeWarning, reason, message)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, reason, message)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) emitRollbackNormalEvent(deployment *extensions.Deployment, message string) {
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeNormal, deploymentutil.RollbackDone, message)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, deploymentutil.RollbackDone, message)
|
||||
}
|
||||
|
||||
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
@@ -62,13 +62,13 @@ func (dc *DeploymentController) rolloutRolling(deployment *extensions.Deployment
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
|
||||
if newRS.Spec.Replicas == deployment.Spec.Replicas {
|
||||
if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
|
||||
// Scaling not required.
|
||||
return false, nil
|
||||
}
|
||||
if newRS.Spec.Replicas > deployment.Spec.Replicas {
|
||||
if *(newRS.Spec.Replicas) > *(deployment.Spec.Replicas) {
|
||||
// Scale down.
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, deployment.Spec.Replicas, deployment)
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
|
||||
return scaled, err
|
||||
}
|
||||
newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, newRS)
|
||||
@@ -120,8 +120,8 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
|
||||
// * The new replica set created must start with 0 replicas because allPodsCount is already at 13.
|
||||
// * However, newRSPodsUnavailable would also be 0, so the 2 old replica sets could be scaled down by 5 (13 - 8 - 0), which would then
|
||||
// allow the new replica set to be scaled up by 5.
|
||||
minAvailable := deployment.Spec.Replicas - maxUnavailable
|
||||
newRSUnavailablePodCount := newRS.Spec.Replicas - newRS.Status.AvailableReplicas
|
||||
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
|
||||
newRSUnavailablePodCount := *(newRS.Spec.Replicas) - newRS.Status.AvailableReplicas
|
||||
maxScaledDown := allPodsCount - minAvailable - newRSUnavailablePodCount
|
||||
if maxScaledDown <= 0 {
|
||||
return false, nil
|
||||
@@ -158,20 +158,20 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
|
||||
if totalScaledDown >= maxCleanupCount {
|
||||
break
|
||||
}
|
||||
if targetRS.Spec.Replicas == 0 {
|
||||
if *(targetRS.Spec.Replicas) == 0 {
|
||||
// cannot scale down this replica set.
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
|
||||
if targetRS.Spec.Replicas == targetRS.Status.AvailableReplicas {
|
||||
if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
|
||||
// no unhealthy replicas found, no scaling required.
|
||||
continue
|
||||
}
|
||||
|
||||
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(targetRS.Spec.Replicas-targetRS.Status.AvailableReplicas)))
|
||||
newReplicasCount := targetRS.Spec.Replicas - scaledDownCount
|
||||
if newReplicasCount > targetRS.Spec.Replicas {
|
||||
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
|
||||
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(*(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas)))
|
||||
newReplicasCount := *(targetRS.Spec.Replicas) - scaledDownCount
|
||||
if newReplicasCount > *(targetRS.Spec.Replicas) {
|
||||
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
|
||||
}
|
||||
_, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
|
||||
if err != nil {
|
||||
@@ -189,7 +189,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
|
||||
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
|
||||
|
||||
// Check if we can scale down.
|
||||
minAvailable := deployment.Spec.Replicas - maxUnavailable
|
||||
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
|
||||
// Find the number of available pods.
|
||||
availablePodCount := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
|
||||
if availablePodCount <= minAvailable {
|
||||
@@ -207,15 +207,15 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
|
||||
// No further scaling required.
|
||||
break
|
||||
}
|
||||
if targetRS.Spec.Replicas == 0 {
|
||||
if *(targetRS.Spec.Replicas) == 0 {
|
||||
// cannot scale down this ReplicaSet.
|
||||
continue
|
||||
}
|
||||
// Scale down.
|
||||
scaleDownCount := int32(integer.IntMin(int(targetRS.Spec.Replicas), int(totalScaleDownCount-totalScaledDown)))
|
||||
newReplicasCount := targetRS.Spec.Replicas - scaleDownCount
|
||||
if newReplicasCount > targetRS.Spec.Replicas {
|
||||
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
|
||||
scaleDownCount := int32(integer.IntMin(int(*(targetRS.Spec.Replicas)), int(totalScaleDownCount-totalScaledDown)))
|
||||
newReplicasCount := *(targetRS.Spec.Replicas) - scaleDownCount
|
||||
if newReplicasCount > *(targetRS.Spec.Replicas) {
|
||||
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
|
||||
}
|
||||
_, _, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
|
||||
if err != nil {
|
||||
|
||||
@@ -19,8 +19,8 @@ package deployment
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
@@ -110,7 +110,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
if e, a := test.expectedNewReplicas, int(updated.Spec.Replicas); e != a {
|
||||
if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
}
|
||||
@@ -372,7 +372,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
||||
continue
|
||||
}
|
||||
updated := updateAction.GetObject().(*extensions.ReplicaSet)
|
||||
if e, a := test.expectedOldReplicas, int(updated.Spec.Replicas); e != a {
|
||||
if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,9 +24,11 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
@@ -80,11 +82,11 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
|
||||
|
||||
needsUpdate := false
|
||||
if d.Spec.Paused && !pausedCondExists {
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
needsUpdate = true
|
||||
} else if !d.Spec.Paused && pausedCondExists {
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
needsUpdate = true
|
||||
}
|
||||
@@ -126,10 +128,14 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *ext
|
||||
}
|
||||
|
||||
// rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced.
|
||||
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *api.PodList, error) {
|
||||
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) {
|
||||
rsList, err := deploymentutil.ListReplicaSets(deployment,
|
||||
func(namespace string, options api.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
return dc.rsLister.ReplicaSets(namespace).List(options.LabelSelector)
|
||||
func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dc.rsLister.ReplicaSets(namespace).List(parsed)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error listing ReplicaSets: %v", err)
|
||||
@@ -201,12 +207,16 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err)
|
||||
}
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
pods, err := dc.podLister.Pods(namespace).List(options.LabelSelector)
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(namespace).List(parsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", namespace, options, err)
|
||||
}
|
||||
podList := api.PodList{Items: make([]api.Pod, 0, len(pods))}
|
||||
podList := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
|
||||
for i := range pods {
|
||||
podList.Items = append(podList.Items, *pods[i])
|
||||
}
|
||||
@@ -253,11 +263,15 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
|
||||
return updatedRS, nil
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*api.PodList, error) {
|
||||
func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) {
|
||||
return deploymentutil.ListPods(deployment,
|
||||
func(namespace string, options api.ListOptions) (*api.PodList, error) {
|
||||
pods, err := dc.podLister.Pods(namespace).List(options.LabelSelector)
|
||||
result := api.PodList{Items: make([]api.Pod, 0, len(pods))}
|
||||
func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(namespace).List(parsed)
|
||||
result := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
|
||||
for i := range pods {
|
||||
result.Items = append(result.Items, *pods[i])
|
||||
}
|
||||
@@ -307,7 +321,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
cond := deploymentutil.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing)
|
||||
if deployment.Spec.ProgressDeadlineSeconds != nil && cond == nil {
|
||||
msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *condition)
|
||||
updateConditions = true
|
||||
}
|
||||
@@ -333,13 +347,13 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
|
||||
// Create new ReplicaSet
|
||||
newRS := extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
// Make the name deterministic, to ensure idempotence
|
||||
Name: deployment.Name + "-" + fmt.Sprintf("%d", podTemplateSpecHash),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: 0,
|
||||
Replicas: func(i int32) *int32 { return &i }(0),
|
||||
MinReadySeconds: deployment.Spec.MinReadySeconds,
|
||||
Selector: newRSSelector,
|
||||
Template: newRSTemplate,
|
||||
@@ -351,7 +365,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newRS.Spec.Replicas = newReplicasCount
|
||||
*(newRS.Spec.Replicas) = newReplicasCount
|
||||
// Set new replica set's annotation
|
||||
deploymentutil.SetNewReplicaSetAnnotations(deployment, &newRS, newRevision, false)
|
||||
createdRS, err := dc.client.Extensions().ReplicaSets(namespace).Create(&newRS)
|
||||
@@ -365,7 +379,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
case err != nil:
|
||||
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
|
||||
if deployment.Spec.ProgressDeadlineSeconds != nil {
|
||||
cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
|
||||
cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *cond)
|
||||
// We don't really care about this error at this point, since we have a bigger issue to report.
|
||||
// TODO: Update the rest of the Deployment status, too. We may need to do this every time we
|
||||
@@ -375,17 +389,17 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
|
||||
_, _ = dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(deployment)
|
||||
}
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
||||
return nil, err
|
||||
}
|
||||
if newReplicasCount > 0 {
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount)
|
||||
}
|
||||
|
||||
deploymentutil.SetDeploymentRevision(deployment, newRevision)
|
||||
if deployment.Spec.ProgressDeadlineSeconds != nil {
|
||||
msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *condition)
|
||||
}
|
||||
_, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment)
|
||||
@@ -401,10 +415,10 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
|
||||
// If there is only one active replica set then we should scale that up to the full count of the
|
||||
// deployment. If there is no active replica set, then we should scale up the newest replica set.
|
||||
if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {
|
||||
if activeOrLatest.Spec.Replicas == deployment.Spec.Replicas {
|
||||
if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) {
|
||||
return nil
|
||||
}
|
||||
_, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, deployment.Spec.Replicas, deployment)
|
||||
_, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, *(deployment.Spec.Replicas), deployment)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -427,8 +441,8 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
|
||||
allRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
|
||||
allowedSize := int32(0)
|
||||
if deployment.Spec.Replicas > 0 {
|
||||
allowedSize = deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
|
||||
if *(deployment.Spec.Replicas) > 0 {
|
||||
allowedSize = *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
|
||||
}
|
||||
|
||||
// Number of additional replicas that can be either added or removed from the total
|
||||
@@ -465,10 +479,10 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
|
||||
if deploymentReplicasToAdd != 0 {
|
||||
proportion := deploymentutil.GetProportion(rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded)
|
||||
|
||||
nameToSize[rs.Name] = rs.Spec.Replicas + proportion
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas) + proportion
|
||||
deploymentReplicasAdded += proportion
|
||||
} else {
|
||||
nameToSize[rs.Name] = rs.Spec.Replicas
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -497,11 +511,11 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
|
||||
|
||||
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) {
|
||||
// No need to scale
|
||||
if rs.Spec.Replicas == newScale {
|
||||
if *(rs.Spec.Replicas) == newScale {
|
||||
return false, rs, nil
|
||||
}
|
||||
var scalingOperation string
|
||||
if rs.Spec.Replicas < newScale {
|
||||
if *(rs.Spec.Replicas) < newScale {
|
||||
scalingOperation = "up"
|
||||
} else {
|
||||
scalingOperation = "down"
|
||||
@@ -517,14 +531,14 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
|
||||
}
|
||||
rsCopy := objCopy.(*extensions.ReplicaSet)
|
||||
|
||||
sizeNeedsUpdate := rsCopy.Spec.Replicas != newScale
|
||||
annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, deployment.Spec.Replicas, deployment.Spec.Replicas+deploymentutil.MaxSurge(*deployment))
|
||||
sizeNeedsUpdate := *(rsCopy.Spec.Replicas) != newScale
|
||||
annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
||||
|
||||
if sizeNeedsUpdate || annotationsNeedUpdate {
|
||||
rsCopy.Spec.Replicas = newScale
|
||||
*(rsCopy.Spec.Replicas) = newScale
|
||||
rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
|
||||
if err == nil && sizeNeedsUpdate {
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
||||
}
|
||||
}
|
||||
return rs, err
|
||||
@@ -549,7 +563,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
|
||||
for i := int32(0); i < diff; i++ {
|
||||
rs := oldRSs[i]
|
||||
// Avoid delete replica set with non-zero replica counts
|
||||
if rs.Status.Replicas != 0 || rs.Spec.Replicas != 0 || rs.Generation > rs.Status.ObservedGeneration {
|
||||
if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration {
|
||||
continue
|
||||
}
|
||||
if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
@@ -579,11 +593,11 @@ func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet,
|
||||
availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
|
||||
totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
|
||||
if availableReplicas >= deployment.Spec.Replicas-deploymentutil.MaxUnavailable(*deployment) {
|
||||
minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, api.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
|
||||
if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) {
|
||||
minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *minAvailability)
|
||||
} else {
|
||||
noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, api.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
|
||||
noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *noMinAvailability)
|
||||
}
|
||||
|
||||
@@ -611,7 +625,7 @@ func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment) (bool,
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if desired != d.Spec.Replicas {
|
||||
if desired != *(d.Spec.Replicas) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
testclient "k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -261,7 +261,7 @@ func TestScale(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.newRS != nil {
|
||||
desiredReplicas := test.oldDeployment.Spec.Replicas
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func TestScale(t *testing.T) {
|
||||
if rs == nil {
|
||||
continue
|
||||
}
|
||||
desiredReplicas := test.oldDeployment.Spec.Replicas
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
@@ -289,22 +289,22 @@ func TestScale(t *testing.T) {
|
||||
// no update action for it.
|
||||
nameToSize := make(map[string]int32)
|
||||
if test.newRS != nil {
|
||||
nameToSize[test.newRS.Name] = test.newRS.Spec.Replicas
|
||||
nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
nameToSize[rs.Name] = rs.Spec.Replicas
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
|
||||
for _, action := range fake.Actions() {
|
||||
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
if !test.wasntUpdated[rs.Name] {
|
||||
nameToSize[rs.Name] = rs.Spec.Replicas
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
if test.expectedNew != nil && test.newRS != nil && test.expectedNew.Spec.Replicas != nameToSize[test.newRS.Name] {
|
||||
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, test.expectedNew.Spec.Replicas, nameToSize[test.newRS.Name])
|
||||
if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
|
||||
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
|
||||
continue
|
||||
}
|
||||
if len(test.expectedOld) != len(test.oldRSs) {
|
||||
@@ -314,8 +314,8 @@ func TestScale(t *testing.T) {
|
||||
for n := range test.oldRSs {
|
||||
rs := test.oldRSs[n]
|
||||
expected := test.expectedOld[n]
|
||||
if expected.Spec.Replicas != nameToSize[rs.Name] {
|
||||
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, expected.Spec.Replicas, nameToSize[rs.Name])
|
||||
if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
|
||||
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -371,7 +371,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
for i := range tests {
|
||||
test := tests[i]
|
||||
fake := &fake.Clientset{}
|
||||
informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
|
||||
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
|
||||
|
||||
controller.eventRecorder = &record.FakeRecorder{}
|
||||
|
||||
@@ -29,8 +29,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/annotations"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -108,7 +110,7 @@ const (
|
||||
)
|
||||
|
||||
// NewDeploymentCondition creates a new deployment condition.
|
||||
func NewDeploymentCondition(condType extensions.DeploymentConditionType, status api.ConditionStatus, reason, message string) *extensions.DeploymentCondition {
|
||||
func NewDeploymentCondition(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *extensions.DeploymentCondition {
|
||||
return &extensions.DeploymentCondition{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
@@ -266,7 +268,7 @@ func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *exten
|
||||
}
|
||||
}
|
||||
// If the new replica set is about to be created, we need to add replica annotations to it.
|
||||
if !exists && SetReplicasAnnotations(newRS, deployment.Spec.Replicas, deployment.Spec.Replicas+MaxSurge(*deployment)) {
|
||||
if !exists && SetReplicasAnnotations(newRS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) {
|
||||
annotationChanged = true
|
||||
}
|
||||
return annotationChanged
|
||||
@@ -404,7 +406,7 @@ func MaxUnavailable(deployment extensions.Deployment) int32 {
|
||||
return int32(0)
|
||||
}
|
||||
// Error caught by validation
|
||||
_, maxUnavailable, _ := ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas)
|
||||
_, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
|
||||
return maxUnavailable
|
||||
}
|
||||
|
||||
@@ -413,7 +415,7 @@ func MinAvailable(deployment *extensions.Deployment) int32 {
|
||||
if !IsRollingUpdate(deployment) {
|
||||
return int32(0)
|
||||
}
|
||||
return deployment.Spec.Replicas - MaxUnavailable(*deployment)
|
||||
return *(deployment.Spec.Replicas) - MaxUnavailable(*deployment)
|
||||
}
|
||||
|
||||
// MaxSurge returns the maximum surge pods a rolling deployment can take.
|
||||
@@ -422,7 +424,7 @@ func MaxSurge(deployment extensions.Deployment) int32 {
|
||||
return int32(0)
|
||||
}
|
||||
// Error caught by validation
|
||||
maxSurge, _, _ := ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas)
|
||||
maxSurge, _, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
|
||||
return maxSurge
|
||||
}
|
||||
|
||||
@@ -430,7 +432,7 @@ func MaxSurge(deployment extensions.Deployment) int32 {
|
||||
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
|
||||
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
|
||||
func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
|
||||
if rs == nil || rs.Spec.Replicas == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
|
||||
if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
|
||||
return int32(0)
|
||||
}
|
||||
|
||||
@@ -453,11 +455,11 @@ func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymen
|
||||
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
|
||||
func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) int32 {
|
||||
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
|
||||
if d.Spec.Replicas == int32(0) {
|
||||
return -rs.Spec.Replicas
|
||||
if *(d.Spec.Replicas) == int32(0) {
|
||||
return -*(rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
deploymentReplicas := d.Spec.Replicas + MaxSurge(d)
|
||||
deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d)
|
||||
annotatedReplicas, ok := getMaxReplicasAnnotation(&rs)
|
||||
if !ok {
|
||||
// If we cannot find the annotation then fallback to the current deployment size. Note that this
|
||||
@@ -469,8 +471,8 @@ func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) in
|
||||
|
||||
// We should never proportionally scale up from zero which means rs.spec.replicas and annotatedReplicas
|
||||
// will never be zero here.
|
||||
newRSsize := (float64(rs.Spec.Replicas * deploymentReplicas)) / float64(annotatedReplicas)
|
||||
return integer.RoundToInt32(newRSsize) - rs.Spec.Replicas
|
||||
newRSsize := (float64(*(rs.Spec.Replicas) * deploymentReplicas)) / float64(annotatedReplicas)
|
||||
return integer.RoundToInt32(newRSsize) - *(rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
|
||||
@@ -523,7 +525,7 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface)
|
||||
// listReplicaSets lists all RSes the given deployment targets with the given client interface.
|
||||
func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, error) {
|
||||
return ListReplicaSets(deployment,
|
||||
func(namespace string, options api.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
rsList, err := c.Extensions().ReplicaSets(namespace).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -537,16 +539,16 @@ func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) (
|
||||
}
|
||||
|
||||
// listReplicaSets lists all Pods the given deployment targets with the given client interface.
|
||||
func listPods(deployment *extensions.Deployment, c clientset.Interface) (*api.PodList, error) {
|
||||
func listPods(deployment *extensions.Deployment, c clientset.Interface) (*v1.PodList, error) {
|
||||
return ListPods(deployment,
|
||||
func(namespace string, options api.ListOptions) (*api.PodList, error) {
|
||||
func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
|
||||
return c.Core().Pods(namespace).List(options)
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: switch this to full namespacers
|
||||
type rsListFunc func(string, api.ListOptions) ([]*extensions.ReplicaSet, error)
|
||||
type podListFunc func(string, api.ListOptions) (*api.PodList, error)
|
||||
type rsListFunc func(string, v1.ListOptions) ([]*extensions.ReplicaSet, error)
|
||||
type podListFunc func(string, v1.ListOptions) (*v1.PodList, error)
|
||||
|
||||
// ListReplicaSets returns a slice of RSes the given deployment targets.
|
||||
func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]*extensions.ReplicaSet, error) {
|
||||
@@ -558,18 +560,18 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
return getRSList(namespace, options)
|
||||
}
|
||||
|
||||
// ListPods returns a list of pods the given deployment targets.
|
||||
func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*api.PodList, error) {
|
||||
func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*v1.PodList, error) {
|
||||
namespace := deployment.Namespace
|
||||
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
return getPodList(namespace, options)
|
||||
}
|
||||
|
||||
@@ -577,7 +579,7 @@ func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*api.P
|
||||
// We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes
|
||||
// (e.g. the addition of a new field will cause the hash code to change)
|
||||
// Note that we assume input podTemplateSpecs contain non-empty labels
|
||||
func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) {
|
||||
func equalIgnoreHash(template1, template2 v1.PodTemplateSpec) (bool, error) {
|
||||
// First, compare template.Labels (ignoring hash)
|
||||
labels1, labels2 := template1.Labels, template2.Labels
|
||||
// The podTemplateSpec must have a non-empty label so that label selectors can find them.
|
||||
@@ -597,7 +599,7 @@ func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) {
|
||||
|
||||
// Then, compare the templates without comparing their labels
|
||||
template1.Labels, template2.Labels = nil, nil
|
||||
result := api.Semantic.DeepEqual(template1, template2)
|
||||
result := v1.Semantic.DeepEqual(template1, template2)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -620,7 +622,7 @@ func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.R
|
||||
|
||||
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given PodList and slice of RSes.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, podList *api.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, podList *v1.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
// Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList.
|
||||
// All pods and replica sets are labeled with pod-template-hash to prevent overlapping
|
||||
oldRSs := map[string]*extensions.ReplicaSet{}
|
||||
@@ -679,19 +681,19 @@ func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, na
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration &&
|
||||
rs.Status.FullyLabeledReplicas == rs.Spec.Replicas, nil
|
||||
rs.Status.FullyLabeledReplicas == *(rs.Spec.Replicas), nil
|
||||
})
|
||||
}
|
||||
|
||||
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
|
||||
// The returned bool value can be used to tell if all pods are actually labeled.
|
||||
func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
|
||||
func LabelPodsWithHash(podList *v1.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
|
||||
allPodsLabeled := true
|
||||
for _, pod := range podList.Items {
|
||||
// Only label the pod that doesn't already have the new hash
|
||||
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
|
||||
if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod,
|
||||
func(podToUpdate *api.Pod) error {
|
||||
func(podToUpdate *v1.Pod) error {
|
||||
// Precondition: the pod doesn't contain the new hash in its label.
|
||||
if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
return errors.ErrPreconditionViolated
|
||||
@@ -713,9 +715,9 @@ func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c client
|
||||
}
|
||||
|
||||
// GetNewReplicaSetTemplate returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet.
|
||||
func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplateSpec {
|
||||
func GetNewReplicaSetTemplate(deployment *extensions.Deployment) v1.PodTemplateSpec {
|
||||
// newRS will have the same template as in deployment spec, plus a unique label in some cases.
|
||||
newRSTemplate := api.PodTemplateSpec{
|
||||
newRSTemplate := v1.PodTemplateSpec{
|
||||
ObjectMeta: deployment.Spec.Template.ObjectMeta,
|
||||
Spec: deployment.Spec.Template.Spec,
|
||||
}
|
||||
@@ -726,8 +728,23 @@ func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplate
|
||||
return newRSTemplate
|
||||
}
|
||||
|
||||
// TODO: remove the duplicate
|
||||
// GetNewInternalReplicaSetTemplate returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet.
|
||||
func GetNewInternalReplicaSetTemplate(deployment *internalextensions.Deployment) api.PodTemplateSpec {
|
||||
// newRS will have the same template as in deployment spec, plus a unique label in some cases.
|
||||
newRSTemplate := api.PodTemplateSpec{
|
||||
ObjectMeta: deployment.Spec.Template.ObjectMeta,
|
||||
Spec: deployment.Spec.Template.Spec,
|
||||
}
|
||||
newRSTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
|
||||
deployment.Spec.Template.ObjectMeta.Labels,
|
||||
internalextensions.DefaultDeploymentUniqueLabelKey,
|
||||
podutil.GetInternalPodTemplateSpecHash(newRSTemplate))
|
||||
return newRSTemplate
|
||||
}
|
||||
|
||||
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
|
||||
func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment {
|
||||
func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template v1.PodTemplateSpec) *extensions.Deployment {
|
||||
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
|
||||
deployment.Spec.Template.Spec = template.Spec
|
||||
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
|
||||
@@ -741,7 +758,7 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
totalReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
totalReplicas += rs.Spec.Replicas
|
||||
totalReplicas += *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
return totalReplicas
|
||||
@@ -772,7 +789,7 @@ func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet
|
||||
// IsPodAvailable return true if the pod is available.
|
||||
// TODO: Remove this once we start using replica set status for calculating available pods
|
||||
// for a deployment.
|
||||
func IsPodAvailable(pod *api.Pod, minReadySeconds int32, now time.Time) bool {
|
||||
func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now time.Time) bool {
|
||||
if !controller.IsPodActive(pod) {
|
||||
return false
|
||||
}
|
||||
@@ -780,7 +797,7 @@ func IsPodAvailable(pod *api.Pod, minReadySeconds int32, now time.Time) bool {
|
||||
// If so, this pod is ready
|
||||
for _, c := range pod.Status.Conditions {
|
||||
// we only care about pod ready conditions
|
||||
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
|
||||
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
|
||||
glog.V(4).Infof("Comparing pod %s/%s ready condition last transition time %s + minReadySeconds %d with now %s.", pod.Namespace, pod.Name, c.LastTransitionTime.String(), minReadySeconds, now.String())
|
||||
// 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is available):
|
||||
// 1. minReadySeconds == 0, or
|
||||
@@ -802,8 +819,8 @@ func IsRollingUpdate(deployment *extensions.Deployment) bool {
|
||||
// DeploymentComplete considers a deployment to be complete once its desired replicas equals its
|
||||
// updatedReplicas and it doesn't violate minimum availability.
|
||||
func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
|
||||
return newStatus.UpdatedReplicas == deployment.Spec.Replicas &&
|
||||
newStatus.AvailableReplicas >= deployment.Spec.Replicas-MaxUnavailable(*deployment)
|
||||
return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
|
||||
newStatus.AvailableReplicas >= *(deployment.Spec.Replicas)-MaxUnavailable(*deployment)
|
||||
}
|
||||
|
||||
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
|
||||
@@ -857,24 +874,24 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
|
||||
switch deployment.Spec.Strategy.Type {
|
||||
case extensions.RollingUpdateDeploymentStrategyType:
|
||||
// Check if we can scale up.
|
||||
maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(deployment.Spec.Replicas), true)
|
||||
maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Find the total number of pods
|
||||
currentPodCount := GetReplicaCountForReplicaSets(allRSs)
|
||||
maxTotalPods := deployment.Spec.Replicas + int32(maxSurge)
|
||||
maxTotalPods := *(deployment.Spec.Replicas) + int32(maxSurge)
|
||||
if currentPodCount >= maxTotalPods {
|
||||
// Cannot scale up.
|
||||
return newRS.Spec.Replicas, nil
|
||||
return *(newRS.Spec.Replicas), nil
|
||||
}
|
||||
// Scale up.
|
||||
scaleUpCount := maxTotalPods - currentPodCount
|
||||
// Do not exceed the number of desired replicas.
|
||||
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(deployment.Spec.Replicas-newRS.Spec.Replicas)))
|
||||
return newRS.Spec.Replicas + scaleUpCount, nil
|
||||
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))))
|
||||
return *(newRS.Spec.Replicas) + scaleUpCount, nil
|
||||
case extensions.RecreateDeploymentStrategyType:
|
||||
return deployment.Spec.Replicas, nil
|
||||
return *(deployment.Spec.Replicas), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type)
|
||||
}
|
||||
@@ -892,7 +909,7 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return rs.Spec.Replicas == deployment.Spec.Replicas && int32(desired) == deployment.Spec.Replicas
|
||||
return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) && int32(desired) == *(deployment.Spec.Replicas)
|
||||
}
|
||||
|
||||
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
|
||||
|
||||
@@ -26,8 +26,9 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
@@ -74,22 +75,22 @@ func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
||||
|
||||
func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
||||
fakeClient.AddReactor("update", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := action.(core.UpdateAction).GetObject().(*api.Pod)
|
||||
obj := action.(core.UpdateAction).GetObject().(*v1.Pod)
|
||||
return true, obj, nil
|
||||
})
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func newPod(now time.Time, ready bool, beforeSec int) api.Pod {
|
||||
conditionStatus := api.ConditionFalse
|
||||
func newPod(now time.Time, ready bool, beforeSec int) v1.Pod {
|
||||
conditionStatus := v1.ConditionFalse
|
||||
if ready {
|
||||
conditionStatus = api.ConditionTrue
|
||||
conditionStatus = v1.ConditionTrue
|
||||
}
|
||||
return api.Pod{
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
return v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: api.PodReady,
|
||||
Type: v1.PodReady,
|
||||
LastTransitionTime: unversioned.NewTime(now.Add(-1 * time.Duration(beforeSec) * time.Second)),
|
||||
Status: conditionStatus,
|
||||
},
|
||||
@@ -99,27 +100,27 @@ func newPod(now time.Time, ready bool, beforeSec int) api.Pod {
|
||||
}
|
||||
|
||||
// generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template
|
||||
func generatePodFromRS(rs extensions.ReplicaSet) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func generatePodFromRS(rs extensions.ReplicaSet) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: rs.Labels,
|
||||
},
|
||||
Spec: rs.Spec.Template.Spec,
|
||||
}
|
||||
}
|
||||
|
||||
func generatePod(labels map[string]string, image string) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func generatePod(labels map[string]string, image string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: image,
|
||||
Image: image,
|
||||
ImagePullPolicy: api.PullAlways,
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -128,24 +129,24 @@ func generatePod(labels map[string]string, image string) api.Pod {
|
||||
|
||||
func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet {
|
||||
return extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: api.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: v1.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: 1,
|
||||
Replicas: func(i int32) *int32 { return &i }(1),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: labels},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: image,
|
||||
Image: image,
|
||||
ImagePullPolicy: api.PullAlways,
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -158,11 +159,12 @@ func generateRSWithLabel(labels map[string]string, image string) extensions.Repl
|
||||
func generateRS(deployment extensions.Deployment) extensions.ReplicaSet {
|
||||
template := GetNewReplicaSetTemplate(&deployment)
|
||||
return extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: api.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: v1.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: template.Labels,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: func() *int32 { i := int32(0); return &i }(),
|
||||
Template: template,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: template.Labels},
|
||||
},
|
||||
@@ -174,29 +176,29 @@ func generateDeployment(image string) extensions.Deployment {
|
||||
podLabels := map[string]string{"name": image}
|
||||
terminationSec := int64(30)
|
||||
return extensions.Deployment{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: image,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: func(i int32) *int32 { return &i }(1),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: podLabels},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: image,
|
||||
Image: image,
|
||||
ImagePullPolicy: api.PullAlways,
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
},
|
||||
},
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
DNSPolicy: v1.DNSClusterFirst,
|
||||
TerminationGracePeriodSeconds: &terminationSec,
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
SecurityContext: &api.PodSecurityContext{},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -215,7 +217,7 @@ func TestGetNewRC(t *testing.T) {
|
||||
{
|
||||
"No new ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&api.PodList{},
|
||||
&v1.PodList{},
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
@@ -228,7 +230,7 @@ func TestGetNewRC(t *testing.T) {
|
||||
{
|
||||
"Has new ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&api.PodList{},
|
||||
&v1.PodList{},
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
@@ -253,7 +255,7 @@ func TestGetNewRC(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("In test case %s, got unexpected error %v", test.test, err)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(rs, test.expected) {
|
||||
if !v1.Semantic.DeepEqual(rs, test.expected) {
|
||||
t.Errorf("In test case %s, expected %#v, got %#v", test.test, test.expected, rs)
|
||||
}
|
||||
}
|
||||
@@ -262,25 +264,25 @@ func TestGetNewRC(t *testing.T) {
|
||||
func TestGetOldRCs(t *testing.T) {
|
||||
newDeployment := generateDeployment("nginx")
|
||||
newRS := generateRS(newDeployment)
|
||||
newRS.Status.FullyLabeledReplicas = newRS.Spec.Replicas
|
||||
newRS.Status.FullyLabeledReplicas = *(newRS.Spec.Replicas)
|
||||
newPod := generatePodFromRS(newRS)
|
||||
|
||||
// create 2 old deployments and related replica sets/pods, with the same labels but different template
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
|
||||
oldRS := generateRS(oldDeployment)
|
||||
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas
|
||||
oldRS.Status.FullyLabeledReplicas = *(oldRS.Spec.Replicas)
|
||||
oldPod := generatePodFromRS(oldRS)
|
||||
oldDeployment2 := generateDeployment("nginx")
|
||||
oldDeployment2.Spec.Template.Spec.Containers[0].Name = "nginx-old-2"
|
||||
oldRS2 := generateRS(oldDeployment2)
|
||||
oldRS2.Status.FullyLabeledReplicas = oldRS2.Spec.Replicas
|
||||
oldRS2.Status.FullyLabeledReplicas = *(oldRS2.Spec.Replicas)
|
||||
oldPod2 := generatePodFromRS(oldRS2)
|
||||
|
||||
// create 1 ReplicaSet that existed before the deployment, with the same labels as the deployment
|
||||
existedPod := generatePod(newDeployment.Spec.Template.Labels, "foo")
|
||||
existedRS := generateRSWithLabel(newDeployment.Spec.Template.Labels, "foo")
|
||||
existedRS.Status.FullyLabeledReplicas = existedRS.Spec.Replicas
|
||||
existedRS.Status.FullyLabeledReplicas = *(existedRS.Spec.Replicas)
|
||||
|
||||
tests := []struct {
|
||||
test string
|
||||
@@ -290,8 +292,8 @@ func TestGetOldRCs(t *testing.T) {
|
||||
{
|
||||
"No old ReplicaSets",
|
||||
[]runtime.Object{
|
||||
&api.PodList{
|
||||
Items: []api.Pod{
|
||||
&v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
generatePod(newDeployment.Spec.Template.Labels, "foo"),
|
||||
generatePod(newDeployment.Spec.Template.Labels, "bar"),
|
||||
newPod,
|
||||
@@ -310,8 +312,8 @@ func TestGetOldRCs(t *testing.T) {
|
||||
{
|
||||
"Has old ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&api.PodList{
|
||||
Items: []api.Pod{
|
||||
&v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
oldPod,
|
||||
oldPod2,
|
||||
generatePod(map[string]string{"name": "bar"}, "bar"),
|
||||
@@ -359,14 +361,14 @@ func TestGetOldRCs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func generatePodTemplateSpec(name, nodeName string, annotations, labels map[string]string) api.PodTemplateSpec {
|
||||
return api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func generatePodTemplateSpec(name, nodeName string, annotations, labels map[string]string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: annotations,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
},
|
||||
}
|
||||
@@ -375,7 +377,7 @@ func generatePodTemplateSpec(name, nodeName string, annotations, labels map[stri
|
||||
func TestEqualIgnoreHash(t *testing.T) {
|
||||
tests := []struct {
|
||||
test string
|
||||
former, latter api.PodTemplateSpec
|
||||
former, latter v1.PodTemplateSpec
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
@@ -429,7 +431,7 @@ func TestEqualIgnoreHash(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
runTest := func(t1, t2 api.PodTemplateSpec, reversed bool) {
|
||||
runTest := func(t1, t2 v1.PodTemplateSpec, reversed bool) {
|
||||
// Set up
|
||||
t1Copy, err := api.Scheme.DeepCopy(t1)
|
||||
if err != nil {
|
||||
@@ -472,7 +474,7 @@ func TestFindNewReplicaSet(t *testing.T) {
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
|
||||
oldRS := generateRS(oldDeployment)
|
||||
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas
|
||||
oldRS.Status.FullyLabeledReplicas = *(oldRS.Spec.Replicas)
|
||||
|
||||
tests := []struct {
|
||||
test string
|
||||
@@ -508,7 +510,7 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
|
||||
oldRS := generateRS(oldDeployment)
|
||||
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas
|
||||
oldRS.Status.FullyLabeledReplicas = *(oldRS.Spec.Replicas)
|
||||
newPod := generatePodFromRS(newRS)
|
||||
oldPod := generatePodFromRS(oldRS)
|
||||
|
||||
@@ -516,15 +518,15 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
test string
|
||||
deployment extensions.Deployment
|
||||
rsList []*extensions.ReplicaSet
|
||||
podList *api.PodList
|
||||
podList *v1.PodList
|
||||
expected []*extensions.ReplicaSet
|
||||
}{
|
||||
{
|
||||
test: "Get old ReplicaSets",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&newRS, &oldRS},
|
||||
podList: &api.PodList{
|
||||
Items: []api.Pod{
|
||||
podList: &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
newPod,
|
||||
oldPod,
|
||||
},
|
||||
@@ -535,8 +537,8 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
test: "Get old ReplicaSets with no new ReplicaSet",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&oldRS},
|
||||
podList: &api.PodList{
|
||||
Items: []api.Pod{
|
||||
podList: &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
oldPod,
|
||||
},
|
||||
},
|
||||
@@ -546,8 +548,8 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
test: "Get empty old ReplicaSets",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&newRS},
|
||||
podList: &api.PodList{
|
||||
Items: []api.Pod{
|
||||
podList: &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
newPod,
|
||||
},
|
||||
},
|
||||
@@ -584,10 +586,10 @@ func equal(rss1, rss2 []*extensions.ReplicaSet) bool {
|
||||
|
||||
func TestGetReplicaCountForReplicaSets(t *testing.T) {
|
||||
rs1 := generateRS(generateDeployment("foo"))
|
||||
rs1.Spec.Replicas = 1
|
||||
*(rs1.Spec.Replicas) = 1
|
||||
rs1.Status.Replicas = 2
|
||||
rs2 := generateRS(generateDeployment("bar"))
|
||||
rs2.Spec.Replicas = 2
|
||||
*(rs2.Spec.Replicas) = 2
|
||||
rs2.Status.Replicas = 3
|
||||
|
||||
tests := []struct {
|
||||
@@ -715,16 +717,16 @@ func TestNewRSNewReplicas(t *testing.T) {
|
||||
newDeployment := generateDeployment("nginx")
|
||||
newRC := generateRS(newDeployment)
|
||||
rs5 := generateRS(newDeployment)
|
||||
rs5.Spec.Replicas = 5
|
||||
*(rs5.Spec.Replicas) = 5
|
||||
|
||||
for _, test := range tests {
|
||||
newDeployment.Spec.Replicas = test.depReplicas
|
||||
*(newDeployment.Spec.Replicas) = test.depReplicas
|
||||
newDeployment.Spec.Strategy = extensions.DeploymentStrategy{Type: test.strategyType}
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
|
||||
MaxUnavailable: intstr.FromInt(1),
|
||||
MaxSurge: intstr.FromInt(test.maxSurge),
|
||||
MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(1),
|
||||
MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(test.maxSurge),
|
||||
}
|
||||
newRC.Spec.Replicas = test.newRSReplicas
|
||||
*(newRC.Spec.Replicas) = test.newRSReplicas
|
||||
rs, err := NewRSNewReplicas(&newDeployment, []*extensions.ReplicaSet{&rs5}, &newRC)
|
||||
if err != nil {
|
||||
t.Errorf("In test case %s, got unexpected error %v", test.test, err)
|
||||
@@ -739,7 +741,7 @@ var (
|
||||
condProgressing = func() extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
Status: api.ConditionFalse,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "ForSomeReason",
|
||||
}
|
||||
}
|
||||
@@ -747,7 +749,7 @@ var (
|
||||
condProgressing2 = func() extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "BecauseItIs",
|
||||
}
|
||||
}
|
||||
@@ -755,7 +757,7 @@ var (
|
||||
condAvailable = func() extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentAvailable,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "AwesomeController",
|
||||
}
|
||||
}
|
||||
@@ -775,7 +777,7 @@ func TestGetCondition(t *testing.T) {
|
||||
|
||||
status extensions.DeploymentStatus
|
||||
condType extensions.DeploymentConditionType
|
||||
condStatus api.ConditionStatus
|
||||
condStatus v1.ConditionStatus
|
||||
condReason string
|
||||
|
||||
expected bool
|
||||
@@ -897,10 +899,11 @@ func TestDeploymentComplete(t *testing.T) {
|
||||
deployment := func(desired, current, updated, available, maxUnavailable int32) *extensions.Deployment {
|
||||
return &extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: desired,
|
||||
Replicas: &desired,
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{
|
||||
MaxUnavailable: intstr.FromInt(int(maxUnavailable)),
|
||||
MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)),
|
||||
MaxSurge: func() *intstr.IntOrString { x := intstr.FromInt(0); return &x }(),
|
||||
},
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
@@ -1047,7 +1050,7 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
timeFn := func(min, sec int) time.Time {
|
||||
return time.Date(2016, 1, 1, 0, min, sec, 0, time.UTC)
|
||||
}
|
||||
deployment := func(condType extensions.DeploymentConditionType, status api.ConditionStatus, pds *int32, from time.Time) extensions.Deployment {
|
||||
deployment := func(condType extensions.DeploymentConditionType, status v1.ConditionStatus, pds *int32, from time.Time) extensions.Deployment {
|
||||
return extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
ProgressDeadlineSeconds: pds,
|
||||
@@ -1075,21 +1078,21 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
{
|
||||
name: "no progressDeadlineSeconds specified - no timeout",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, api.ConditionTrue, null, timeFn(1, 9)),
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, null, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, api.ConditionTrue, &ten, timeFn(1, 9)),
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, &ten, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:11 => 9s",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, api.ConditionTrue, &ten, timeFn(1, 11)),
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, &ten, timeFn(1, 11)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user