mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
deployment controller: use contextual logging
This commit is contained in:
@@ -85,14 +85,14 @@ func (dc *DeploymentController) reconcileNewReplicaSet(ctx context.Context, allR
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) reconcileOldReplicaSets(ctx context.Context, allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs)
|
||||
if oldPodsCount == 0 {
|
||||
// Can't scale down further
|
||||
return false, nil
|
||||
}
|
||||
|
||||
allPodsCount := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
klog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
|
||||
logger.V(4).Info("New replica set", "replicaSet", klog.KObj(newRS), "availableReplicas", newRS.Status.AvailableReplicas)
|
||||
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
|
||||
|
||||
// Check if we can scale down. We can scale down in the following 2 cases:
|
||||
@@ -138,7 +138,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(ctx context.Context, all
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
klog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
|
||||
logger.V(4).Info("Cleaned up unhealthy replicas from old RSes", "count", cleanupCount)
|
||||
|
||||
// Scale down old replica sets, need check maxUnavailable to ensure we can scale down
|
||||
allRSs = append(oldRSs, newRS)
|
||||
@@ -146,7 +146,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(ctx context.Context, all
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
klog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
|
||||
logger.V(4).Info("Scaled down old RSes", "deployment", klog.KObj(deployment), "count", scaledDownCount)
|
||||
|
||||
totalScaledDown := cleanupCount + scaledDownCount
|
||||
return totalScaledDown > 0, nil
|
||||
@@ -154,6 +154,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(ctx context.Context, all
|
||||
|
||||
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
|
||||
func (dc *DeploymentController) cleanupUnhealthyReplicas(ctx context.Context, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment, maxCleanupCount int32) ([]*apps.ReplicaSet, int32, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
|
||||
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
|
||||
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
|
||||
@@ -167,7 +168,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(ctx context.Context, ol
|
||||
// cannot scale down this replica set.
|
||||
continue
|
||||
}
|
||||
klog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
|
||||
logger.V(4).Info("Found available pods in old RS", "replicaSet", klog.KObj(targetRS), "availableReplicas", targetRS.Status.AvailableReplicas)
|
||||
if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
|
||||
// no unhealthy replicas found, no scaling required.
|
||||
continue
|
||||
@@ -191,6 +192,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(ctx context.Context, ol
|
||||
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
|
||||
// Need check maxUnavailable to ensure availability
|
||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(ctx context.Context, allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (int32, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
|
||||
|
||||
// Check if we can scale down.
|
||||
@@ -201,7 +203,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(ctx cont
|
||||
// Cannot scale down.
|
||||
return 0, nil
|
||||
}
|
||||
klog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
|
||||
logger.V(4).Info("Found available pods in deployment, scaling down old RSes", "deployment", klog.KObj(deployment), "availableReplicas", availablePodCount)
|
||||
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user