Merge pull request #116181 from pohly/dra-test-driver-update

e2e: dra test driver update
This commit is contained in:
Kubernetes Prow Robot 2023-03-01 10:10:39 -08:00 committed by GitHub
commit fe671737ec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 16 additions and 36 deletions

View File

@ -140,10 +140,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) {
d.ctx = ctx
d.cleanup = append(d.cleanup, cancel)
// The controller is easy: we simply connect to the API server. It
// would be slightly nicer if we had a way to wait for all goroutines, but
// SharedInformerFactory has no API for that. At least we can wait
// for our own goroutine to stop once the context gets cancelled.
// The controller is easy: we simply connect to the API server.
d.Controller = app.NewController(d.f.ClientSet, d.Name, resources)
d.wg.Add(1)
go func() {

View File

@ -58,7 +58,6 @@ type ExampleController struct {
resources Resources
driverName string
// mutex must be locked at the gRPC call level.
mutex sync.Mutex
// allocated maps claim.UID to the node (if network-attached) or empty (if not).
allocated map[types.UID]string
@ -77,13 +76,13 @@ func NewController(clientset kubernetes.Interface, driverName string, resources
return c
}
func (c *ExampleController) Run(ctx context.Context, workers int) *ExampleController {
func (c *ExampleController) Run(ctx context.Context, workers int) {
informerFactory := informers.NewSharedInformerFactory(c.clientset, 0 /* resync period */)
ctrl := controller.New(ctx, c.driverName, c, c.clientset, informerFactory)
informerFactory.Start(ctx.Done())
ctrl.Run(workers)
return c
// If we get here, the context was canceled and we can wait for informer factory goroutines.
informerFactory.Shutdown()
}
type parameters struct {
@ -164,7 +163,7 @@ func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha
func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error) {
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Allocate"), "claim", klog.KObj(claim), "uid", claim.UID)
defer func() {
logger.Info("done", "result", prettyPrint(result), "err", err)
logger.V(3).Info("done", "result", result, "err", err)
}()
c.mutex.Lock()
@ -176,9 +175,9 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
// Idempotent result - kind of. We don't check whether
// the parameters changed in the meantime. A real
// driver would have to do that.
logger.Info("already allocated")
logger.V(3).V(3).Info("already allocated")
} else {
logger.Info("starting", "selectedNode", selectedNode)
logger.V(3).Info("starting", "selectedNode", selectedNode)
if c.resources.NodeLocal {
node = selectedNode
if node == "" {
@ -197,7 +196,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
// Pick randomly. We could also prefer the one with the least
// number of allocations (even spreading) or the most (packing).
node = viableNodes[rand.Intn(len(viableNodes))]
logger.Info("picked a node ourselves", "selectedNode", selectedNode)
logger.V(3).Info("picked a node ourselves", "selectedNode", selectedNode)
} else if !contains(c.resources.Nodes, node) ||
c.resources.MaxAllocations > 0 &&
c.countAllocations(node) >= c.resources.MaxAllocations {
@ -259,11 +258,11 @@ func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alp
defer c.mutex.Unlock()
if _, ok := c.allocated[claim.UID]; !ok {
logger.Info("already deallocated")
logger.V(3).Info("already deallocated")
return nil
}
logger.Info("done")
logger.V(3).Info("done")
c.numDeallocations++
delete(c.allocated, claim.UID)
return nil
@ -271,11 +270,15 @@ func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alp
func (c *ExampleController) UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*controller.ClaimAllocation, potentialNodes []string) (finalErr error) {
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "UnsuitableNodes"), "pod", klog.KObj(pod))
logger.Info("starting", "claim", prettyPrintSlice(claims), "potentialNodes", potentialNodes)
c.mutex.Lock()
defer c.mutex.Unlock()
logger.V(3).Info("starting", "claims", claims, "potentialNodes", potentialNodes)
defer func() {
// UnsuitableNodes is the same for all claims.
logger.Info("done", "unsuitableNodes", claims[0].UnsuitableNodes, "err", finalErr)
logger.V(3).Info("done", "unsuitableNodes", claims[0].UnsuitableNodes, "err", finalErr)
}()
if c.resources.MaxAllocations == 0 {
// All nodes are suitable.
return nil
@ -336,23 +339,3 @@ func contains[T comparable](list []T, value T) bool {
return false
}
func prettyPrint[T any](obj *T) interface{} {
if obj == nil {
return "<nil>"
}
return *obj
}
// prettyPrintSlice prints the values the slice points to, not the pointers.
func prettyPrintSlice[T any](slice []*T) interface{} {
var values []interface{}
for _, v := range slice {
if v == nil {
values = append(values, "<nil>")
} else {
values = append(values, *v)
}
}
return values
}