e2e dra: update logging

When running as part of the scheduler_perf benchmark testing, we want to print
less information by default, so we should use V to limit verbosity

Pretty-printing doesn't belong into "application" code. I am moving that into
the ktesting formatting (https://github.com/kubernetes/kubernetes/pull/116180).
This commit is contained in:
Patrick Ohly 2023-03-01 15:02:03 +01:00
parent 106fce6fae
commit 74785074c6

View File

@ -163,7 +163,7 @@ func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha
func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error) { func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error) {
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Allocate"), "claim", klog.KObj(claim), "uid", claim.UID) logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Allocate"), "claim", klog.KObj(claim), "uid", claim.UID)
defer func() { defer func() {
logger.Info("done", "result", prettyPrint(result), "err", err) logger.V(3).Info("done", "result", result, "err", err)
}() }()
c.mutex.Lock() c.mutex.Lock()
@ -175,9 +175,9 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
// Idempotent result - kind of. We don't check whether // Idempotent result - kind of. We don't check whether
// the parameters changed in the meantime. A real // the parameters changed in the meantime. A real
// driver would have to do that. // driver would have to do that.
logger.Info("already allocated") logger.V(3).V(3).Info("already allocated")
} else { } else {
logger.Info("starting", "selectedNode", selectedNode) logger.V(3).Info("starting", "selectedNode", selectedNode)
if c.resources.NodeLocal { if c.resources.NodeLocal {
node = selectedNode node = selectedNode
if node == "" { if node == "" {
@ -196,7 +196,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
// Pick randomly. We could also prefer the one with the least // Pick randomly. We could also prefer the one with the least
// number of allocations (even spreading) or the most (packing). // number of allocations (even spreading) or the most (packing).
node = viableNodes[rand.Intn(len(viableNodes))] node = viableNodes[rand.Intn(len(viableNodes))]
logger.Info("picked a node ourselves", "selectedNode", selectedNode) logger.V(3).Info("picked a node ourselves", "selectedNode", selectedNode)
} else if !contains(c.resources.Nodes, node) || } else if !contains(c.resources.Nodes, node) ||
c.resources.MaxAllocations > 0 && c.resources.MaxAllocations > 0 &&
c.countAllocations(node) >= c.resources.MaxAllocations { c.countAllocations(node) >= c.resources.MaxAllocations {
@ -258,11 +258,11 @@ func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alp
defer c.mutex.Unlock() defer c.mutex.Unlock()
if _, ok := c.allocated[claim.UID]; !ok { if _, ok := c.allocated[claim.UID]; !ok {
logger.Info("already deallocated") logger.V(3).Info("already deallocated")
return nil return nil
} }
logger.Info("done") logger.V(3).Info("done")
c.numDeallocations++ c.numDeallocations++
delete(c.allocated, claim.UID) delete(c.allocated, claim.UID)
return nil return nil
@ -270,11 +270,15 @@ func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alp
func (c *ExampleController) UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*controller.ClaimAllocation, potentialNodes []string) (finalErr error) { func (c *ExampleController) UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*controller.ClaimAllocation, potentialNodes []string) (finalErr error) {
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "UnsuitableNodes"), "pod", klog.KObj(pod)) logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "UnsuitableNodes"), "pod", klog.KObj(pod))
logger.Info("starting", "claim", prettyPrintSlice(claims), "potentialNodes", potentialNodes) c.mutex.Lock()
defer c.mutex.Unlock()
logger.V(3).Info("starting", "claims", claims, "potentialNodes", potentialNodes)
defer func() { defer func() {
// UnsuitableNodes is the same for all claims. // UnsuitableNodes is the same for all claims.
logger.Info("done", "unsuitableNodes", claims[0].UnsuitableNodes, "err", finalErr) logger.V(3).Info("done", "unsuitableNodes", claims[0].UnsuitableNodes, "err", finalErr)
}() }()
if c.resources.MaxAllocations == 0 { if c.resources.MaxAllocations == 0 {
// All nodes are suitable. // All nodes are suitable.
return nil return nil
@ -335,23 +339,3 @@ func contains[T comparable](list []T, value T) bool {
return false return false
} }
func prettyPrint[T any](obj *T) interface{} {
if obj == nil {
return "<nil>"
}
return *obj
}
// prettyPrintSlice prints the values the slice points to, not the pointers.
func prettyPrintSlice[T any](slice []*T) interface{} {
var values []interface{}
for _, v := range slice {
if v == nil {
values = append(values, "<nil>")
} else {
values = append(values, *v)
}
}
return values
}