diff --git a/pkg/controller/deployment/rolling.go b/pkg/controller/deployment/rolling.go index 4708dcf9b7a..24997bb60bf 100644 --- a/pkg/controller/deployment/rolling.go +++ b/pkg/controller/deployment/rolling.go @@ -93,6 +93,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep if err != nil { return false, fmt.Errorf("could not find available pods: %v", err) } + glog.V(4).Infof("New RS %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRSAvailablePodCount) maxUnavailable := maxUnavailable(*deployment) // Check if we can scale down. We can scale down in the following 2 cases: @@ -172,6 +173,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re if err != nil { return nil, totalScaledDown, fmt.Errorf("could not find available pods: %v", err) } + glog.V(4).Infof("Found %d available pods in old RS %s/%s", availablePodCount, targetRS.Namespace, targetRS.Name) if targetRS.Spec.Replicas == availablePodCount { // no unhealthy replicas found, no scaling required. continue diff --git a/pkg/util/deployment/deployment.go b/pkg/util/deployment/deployment.go index 79386167ca1..732361b871d 100644 --- a/pkg/util/deployment/deployment.go +++ b/pkg/util/deployment/deployment.go @@ -356,6 +356,7 @@ func countAvailablePods(pods []api.Pod, minReadySeconds int32) int32 { for _, pod := range pods { // TODO: Make the time.Now() as argument to allow unit test this. if IsPodAvailable(&pod, minReadySeconds, time.Now()) { + glog.V(4).Infof("Pod %s/%s is available.", pod.Namespace, pod.Name) availablePodCount++ } }