Fix the rest of the code

This commit is contained in:
Clayton Coleman
2016-04-27 00:35:14 -04:00
parent 8d0187add2
commit fdb110c859
129 changed files with 625 additions and 663 deletions

View File

@@ -1065,12 +1065,12 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
}
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int) ([]*extensions.ReplicaSet, int, error) {
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) {
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
// been deleted first and won't increase unavailability.
totalScaledDown := 0
totalScaledDown := int32(0)
for i, targetRS := range oldRSs {
if totalScaledDown >= maxCleanupCount {
break
@@ -1088,7 +1088,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
continue
}
scaledDownCount := integer.IntMin(maxCleanupCount-totalScaledDown, targetRS.Spec.Replicas-readyPodCount)
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(targetRS.Spec.Replicas-readyPodCount)))
newReplicasCount := targetRS.Spec.Replicas - scaledDownCount
if newReplicasCount > targetRS.Spec.Replicas {
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
@@ -1105,7 +1105,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
// Need check maxUnavailable to ensure availability
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int, error) {
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) {
_, maxUnavailable, err := deploymentutil.ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas)
if err != nil {
return 0, err
@@ -1126,7 +1126,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
totalScaledDown := 0
totalScaledDown := int32(0)
totalScaleDownCount := readyPodCount - minAvailable
for _, targetRS := range oldRSs {
if totalScaledDown >= totalScaleDownCount {
@@ -1138,7 +1138,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
continue
}
// Scale down.
scaleDownCount := integer.IntMin(targetRS.Spec.Replicas, totalScaleDownCount-totalScaledDown)
scaleDownCount := int32(integer.IntMin(int(targetRS.Spec.Replicas), int(totalScaleDownCount-totalScaledDown)))
newReplicasCount := targetRS.Spec.Replicas - scaleDownCount
if newReplicasCount > targetRS.Spec.Replicas {
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
@@ -1180,7 +1180,7 @@ func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extension
}
func (dc *DeploymentController) cleanupOldReplicaSets(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error {
diff := len(oldRSs) - *deployment.Spec.RevisionHistoryLimit
diff := int32(len(oldRSs)) - *deployment.Spec.RevisionHistoryLimit
if diff <= 0 {
return nil
}
@@ -1189,7 +1189,7 @@ func (dc *DeploymentController) cleanupOldReplicaSets(oldRSs []*extensions.Repli
var errList []error
// TODO: This should be parallelized.
for i := 0; i < diff; i++ {
for i := int32(0); i < diff; i++ {
rs := oldRSs[i]
// Avoid delete replica set with non-zero replica counts
if rs.Status.Replicas != 0 || rs.Spec.Replicas != 0 || rs.Generation > rs.Status.ObservedGeneration {
@@ -1223,7 +1223,7 @@ func (dc *DeploymentController) updateDeploymentStatus(allRSs []*extensions.Repl
return err
}
func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (totalActualReplicas, updatedReplicas, availableReplicas, unavailableReplicas int, err error) {
func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (totalActualReplicas, updatedReplicas, availableReplicas, unavailableReplicas int32, err error) {
totalActualReplicas = deploymentutil.GetActualReplicaCountForReplicaSets(allRSs)
updatedReplicas = deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS})
minReadySeconds := deployment.Spec.MinReadySeconds
@@ -1237,7 +1237,7 @@ func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet,
return
}
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) {
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) {
// No need to scale
if rs.Spec.Replicas == newScale {
return false, rs, nil
@@ -1257,7 +1257,7 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.Rep
return true, newRS, err
}
func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int) (*extensions.ReplicaSet, error) {
func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32) (*extensions.ReplicaSet, error) {
// TODO: Using client for now, update to use store when it is ready.
// NOTE: This mutates the ReplicaSet passed in. Not sure if that's a good idea.
rs.Spec.Replicas = newScale

View File

@@ -39,7 +39,7 @@ func rs(name string, replicas int, selector map[string]string) *exp.ReplicaSet {
Name: name,
},
Spec: exp.ReplicaSetSpec{
Replicas: replicas,
Replicas: int32(replicas),
Selector: &unversioned.LabelSelector{MatchLabels: selector},
Template: api.PodTemplateSpec{},
},
@@ -49,7 +49,7 @@ func rs(name string, replicas int, selector map[string]string) *exp.ReplicaSet {
func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *exp.ReplicaSet {
rs := rs(name, specReplicas, selector)
rs.Status = exp.ReplicaSetStatus{
Replicas: statusReplicas,
Replicas: int32(statusReplicas),
}
return rs
}
@@ -60,7 +60,7 @@ func deployment(name string, replicas int, maxSurge, maxUnavailable intstr.IntOr
Name: name,
},
Spec: exp.DeploymentSpec{
Replicas: replicas,
Replicas: int32(replicas),
Strategy: exp.DeploymentStrategy{
Type: exp.RollingUpdateDeploymentStrategyType,
RollingUpdate: &exp.RollingUpdateDeployment{
@@ -75,6 +75,11 @@ func deployment(name string, replicas int, maxSurge, maxUnavailable intstr.IntOr
var alwaysReady = func() bool { return true }
func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment {
var v *int32
if revisionHistoryLimit != nil {
v = new(int32)
*v = int32(*revisionHistoryLimit)
}
d := exp.Deployment{
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()},
ObjectMeta: api.ObjectMeta{
@@ -88,7 +93,7 @@ func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment {
Type: exp.RollingUpdateDeploymentStrategyType,
RollingUpdate: &exp.RollingUpdateDeployment{},
},
Replicas: replicas,
Replicas: int32(replicas),
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
@@ -105,7 +110,7 @@ func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment {
},
},
},
RevisionHistoryLimit: revisionHistoryLimit,
RevisionHistoryLimit: v,
},
}
return &d
@@ -118,7 +123,7 @@ func newReplicaSet(d *exp.Deployment, name string, replicas int) *exp.ReplicaSet
Namespace: api.NamespaceDefault,
},
Spec: exp.ReplicaSetSpec{
Replicas: replicas,
Replicas: int32(replicas),
Template: d.Spec.Template,
},
}
@@ -211,7 +216,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
continue
}
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*exp.ReplicaSet)
if e, a := test.expectedNewReplicas, updated.Spec.Replicas; e != a {
if e, a := test.expectedNewReplicas, int(updated.Spec.Replicas); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}
@@ -470,12 +475,12 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, test.maxCleanupCount)
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, int32(test.maxCleanupCount))
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if cleanupCount != test.cleanupCountExpected {
if int(cleanupCount) != test.cleanupCountExpected {
t.Errorf("expected %v unhealthy replicas been cleaned up, got %v", test.cleanupCountExpected, cleanupCount)
continue
}
@@ -598,7 +603,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
continue
}
updated := updateAction.GetObject().(*exp.ReplicaSet)
if e, a := test.expectedOldReplicas, updated.Spec.Replicas; e != a {
if e, a := test.expectedOldReplicas, int(updated.Spec.Replicas); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}