mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
Merge pull request #114687 from freddie400/migrate-hpa
Migrate pkg/controller/podautoscaler to contextual logging
This commit is contained in:
commit
d9ed2ff4b0
@ -21,11 +21,11 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/scale"
|
||||
"k8s.io/controller-manager/controller"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
||||
@ -35,6 +35,9 @@ import (
|
||||
)
|
||||
|
||||
func startHPAController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
|
||||
|
||||
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "hpa-controller"))
|
||||
|
||||
if !controllerContext.AvailableResources[schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"}] {
|
||||
return nil, false, nil
|
||||
}
|
||||
@ -43,6 +46,7 @@ func startHPAController(ctx context.Context, controllerContext ControllerContext
|
||||
}
|
||||
|
||||
func startHPAControllerWithRESTClient(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
|
||||
|
||||
clientConfig := controllerContext.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
|
||||
hpaClient := controllerContext.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||
|
||||
@ -63,6 +67,7 @@ func startHPAControllerWithRESTClient(ctx context.Context, controllerContext Con
|
||||
}
|
||||
|
||||
func startHPAControllerWithMetricsClient(ctx context.Context, controllerContext ControllerContext, metricsClient metrics.MetricsClient) (controller.Interface, bool, error) {
|
||||
|
||||
hpaClient := controllerContext.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||
hpaClientConfig := controllerContext.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
|
||||
|
||||
|
@ -178,8 +178,9 @@ func (a *HorizontalController) Run(ctx context.Context, workers int) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer a.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting HPA controller")
|
||||
defer klog.Infof("Shutting down HPA controller")
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.Info("Starting HPA controller")
|
||||
defer logger.Info("Shutting down HPA controller")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("HPA", ctx.Done(), a.hpaListerSynced, a.podListerSynced) {
|
||||
return
|
||||
@ -239,7 +240,8 @@ func (a *HorizontalController) deleteHPA(obj interface{}) {
|
||||
func (a *HorizontalController) worker(ctx context.Context) {
|
||||
for a.processNextWorkItem(ctx) {
|
||||
}
|
||||
klog.Infof("horizontal pod autoscaler controller worker shutting down")
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.Info("Horizontal Pod Autoscaler controller worker shutting down")
|
||||
}
|
||||
|
||||
func (a *HorizontalController) processNextWorkItem(ctx context.Context) bool {
|
||||
@ -447,9 +449,11 @@ func (a *HorizontalController) reconcileKey(ctx context.Context, key string) (de
|
||||
return true, err
|
||||
}
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace)
|
||||
logger.Info("Horizontal Pod Autoscaler has been deleted", "HPA", klog.KRef(namespace, name))
|
||||
|
||||
a.recommendationsLock.Lock()
|
||||
delete(a.recommendations, key)
|
||||
@ -738,6 +742,7 @@ func (a *HorizontalController) reconcileAutoscaler(ctx context.Context, hpaShare
|
||||
}
|
||||
|
||||
rescale := true
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
if scale.Spec.Replicas == 0 && minReplicas != 0 {
|
||||
// Autoscaling is disabled for this resource
|
||||
@ -762,7 +767,11 @@ func (a *HorizontalController) reconcileAutoscaler(ctx context.Context, hpaShare
|
||||
return fmt.Errorf("failed to compute desired number of replicas based on listed metrics for %s: %v", reference, err)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("proposing %v desired replicas (based on %s from %s) for %s", metricDesiredReplicas, metricName, metricTimestamp, reference)
|
||||
logger.V(4).Info("Proposing desired replicas",
|
||||
"desiredReplicas", metricDesiredReplicas,
|
||||
"metric", metricName,
|
||||
"timestamp", metricTimestamp,
|
||||
"scaleTarget", reference)
|
||||
|
||||
rescaleMetric := ""
|
||||
if metricDesiredReplicas > desiredReplicas {
|
||||
@ -798,10 +807,16 @@ func (a *HorizontalController) reconcileAutoscaler(ctx context.Context, hpaShare
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededRescale", "the HPA controller was able to update the target scale to %d", desiredReplicas)
|
||||
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
|
||||
a.storeScaleEvent(hpa.Spec.Behavior, key, currentReplicas, desiredReplicas)
|
||||
klog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s",
|
||||
hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
|
||||
logger.Info("Successfully rescaled",
|
||||
"HPA", klog.KObj(hpa),
|
||||
"currentReplicas", currentReplicas,
|
||||
"desiredReplicas", desiredReplicas,
|
||||
"reason", rescaleReason)
|
||||
} else {
|
||||
klog.V(4).Infof("decided not to scale %s to %v (last scale time was %s)", reference, desiredReplicas, hpa.Status.LastScaleTime)
|
||||
logger.V(4).Info("Decided not to scale",
|
||||
"scaleTarget", reference,
|
||||
"desiredReplicas", desiredReplicas,
|
||||
"lastScaleTime", hpa.Status.LastScaleTime)
|
||||
desiredReplicas = currentReplicas
|
||||
}
|
||||
|
||||
@ -1286,7 +1301,8 @@ func (a *HorizontalController) updateStatus(ctx context.Context, hpa *autoscalin
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
||||
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
||||
}
|
||||
klog.V(2).Infof("Successfully updated status for %s", hpa.Name)
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(2).Info("Successfully updated status", "HPA", klog.KObj(hpa))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -80,7 +80,7 @@ func (c *resourceMetricsClient) GetResourceMetric(ctx context.Context, resource
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get container metrics: %v", err)
|
||||
}
|
||||
} else {
|
||||
res = getPodMetrics(metrics.Items, resource)
|
||||
res = getPodMetrics(ctx, metrics.Items, resource)
|
||||
}
|
||||
timestamp := metrics.Items[0].Timestamp.Time
|
||||
return res, timestamp, nil
|
||||
@ -110,7 +110,7 @@ func getContainerMetrics(rawMetrics []metricsapi.PodMetrics, resource v1.Resourc
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func getPodMetrics(rawMetrics []metricsapi.PodMetrics, resource v1.ResourceName) PodMetricsInfo {
|
||||
func getPodMetrics(ctx context.Context, rawMetrics []metricsapi.PodMetrics, resource v1.ResourceName) PodMetricsInfo {
|
||||
res := make(PodMetricsInfo, len(rawMetrics))
|
||||
for _, m := range rawMetrics {
|
||||
podSum := int64(0)
|
||||
@ -119,7 +119,7 @@ func getPodMetrics(rawMetrics []metricsapi.PodMetrics, resource v1.ResourceName)
|
||||
resValue, found := c.Usage[resource]
|
||||
if !found {
|
||||
missing = true
|
||||
klog.V(2).Infof("missing resource metric %v for %s/%s", resource, m.Namespace, m.Name)
|
||||
klog.FromContext(ctx).V(2).Info("Missing resource metric", "resourceMetric", resource, "pod", klog.KRef(m.Namespace, m.Name))
|
||||
break
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
|
Loading…
Reference in New Issue
Block a user