chore(dra): refector controller to adapt the mock workqueue in unit test

This commit is contained in:
googs1025 2024-10-02 21:48:47 +08:00
parent 1c67ed08f2
commit 484443ffab
2 changed files with 92 additions and 38 deletions

View File

@ -328,7 +328,7 @@ func (ctrl *controller) Run(workers int) {
} }
for i := 0; i < workers; i++ { for i := 0; i < workers; i++ {
go wait.Until(ctrl.sync, 0, stopCh) go wait.Until(func() { ctrl.sync(ctrl.queue) }, 0, stopCh)
} }
<-stopCh <-stopCh
@ -344,12 +344,12 @@ var errRequeue = errors.New("requeue")
var errPeriodic = errors.New("periodic") var errPeriodic = errors.New("periodic")
// sync is the main worker. // sync is the main worker.
func (ctrl *controller) sync() { func (ctrl *controller) sync(queue workqueue.TypedRateLimitingInterface[string]) {
key, quit := ctrl.queue.Get() key, quit := queue.Get()
if quit { if quit {
return return
} }
defer ctrl.queue.Done(key) defer queue.Done(key)
logger := klog.LoggerWithValues(ctrl.logger, "key", key) logger := klog.LoggerWithValues(ctrl.logger, "key", key)
ctx := klog.NewContext(ctrl.ctx, logger) ctx := klog.NewContext(ctrl.ctx, logger)
@ -358,20 +358,20 @@ func (ctrl *controller) sync() {
switch err { switch err {
case nil: case nil:
logger.V(5).Info("completed") logger.V(5).Info("completed")
ctrl.queue.Forget(key) queue.Forget(key)
case errRequeue: case errRequeue:
logger.V(5).Info("requeue") logger.V(5).Info("requeue")
ctrl.queue.AddRateLimited(key) queue.AddRateLimited(key)
case errPeriodic: case errPeriodic:
logger.V(5).Info("recheck periodically") logger.V(5).Info("recheck periodically")
ctrl.queue.AddAfter(key, recheckDelay) queue.AddAfter(key, recheckDelay)
default: default:
logger.Error(err, "processing failed") logger.Error(err, "processing failed")
if obj != nil { if obj != nil {
// TODO: We don't know here *what* failed. Determine based on error? // TODO: We don't know here *what* failed. Determine based on error?
ctrl.eventRecorder.Event(obj, v1.EventTypeWarning, "Failed", err.Error()) ctrl.eventRecorder.Event(obj, v1.EventTypeWarning, "Failed", err.Error())
} }
ctrl.queue.AddRateLimited(key) queue.AddRateLimited(key)
} }
} }

View File

@ -21,6 +21,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"testing" "testing"
"time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -49,7 +50,7 @@ func TestController(t *testing.T) {
claim := createClaim(claimName, claimNamespace, driverName) claim := createClaim(claimName, claimNamespace, driverName)
otherClaim := createClaim(claimName, claimNamespace, otherDriverName) otherClaim := createClaim(claimName, claimNamespace, otherDriverName)
podName := "pod" podName := "pod"
podKey := "schedulingCtx:default/pod" podSchedulingCtxKey := "schedulingCtx:default/pod"
pod := createPod(podName, claimNamespace, nil) pod := createPod(podName, claimNamespace, nil)
podClaimName := "my-pod-claim" podClaimName := "my-pod-claim"
podSchedulingCtx := createPodSchedulingContexts(pod) podSchedulingCtx := createPodSchedulingContexts(pod)
@ -125,11 +126,15 @@ func TestController(t *testing.T) {
pod *corev1.Pod pod *corev1.Pod
schedulingCtx, expectedSchedulingCtx *resourceapi.PodSchedulingContext schedulingCtx, expectedSchedulingCtx *resourceapi.PodSchedulingContext
claim, expectedClaim *resourceapi.ResourceClaim claim, expectedClaim *resourceapi.ResourceClaim
expectedError string expectedWorkQueueState Mock[string]
}{ }{
"invalid-key": { "invalid-key": {
key: "claim:x/y/z", key: "claim:x/y/z",
expectedError: `unexpected key format: "x/y/z"`, expectedWorkQueueState: Mock[string]{
Failures: map[string]int{
"claim:x/y/z": 1,
},
},
}, },
"not-found": { "not-found": {
key: "claim:default/claim", key: "claim:default/claim",
@ -154,7 +159,11 @@ func TestController(t *testing.T) {
claim: withDeallocate(withAllocate(claim)), claim: withDeallocate(withAllocate(claim)),
driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}), driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}),
expectedClaim: withDeallocate(withAllocate(claim)), expectedClaim: withDeallocate(withAllocate(claim)),
expectedError: "deallocate: fake error", expectedWorkQueueState: Mock[string]{
Failures: map[string]int{
claimKey: 1,
},
},
}, },
// deletion time stamp set, our finalizer set, not allocated -> remove finalizer // deletion time stamp set, our finalizer set, not allocated -> remove finalizer
@ -170,7 +179,11 @@ func TestController(t *testing.T) {
claim: withFinalizer(withDeletionTimestamp(claim), ourFinalizer), claim: withFinalizer(withDeletionTimestamp(claim), ourFinalizer),
driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}), driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}),
expectedClaim: withFinalizer(withDeletionTimestamp(claim), ourFinalizer), expectedClaim: withFinalizer(withDeletionTimestamp(claim), ourFinalizer),
expectedError: "stop allocation: fake error", expectedWorkQueueState: Mock[string]{
Failures: map[string]int{
claimKey: 1,
},
},
}, },
// deletion time stamp set, other finalizer set, not allocated -> do nothing // deletion time stamp set, other finalizer set, not allocated -> do nothing
"deleted-finalizer-no-removal": { "deleted-finalizer-no-removal": {
@ -191,7 +204,11 @@ func TestController(t *testing.T) {
claim: withAllocate(withDeletionTimestamp(claim)), claim: withAllocate(withDeletionTimestamp(claim)),
driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}), driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}),
expectedClaim: withAllocate(withDeletionTimestamp(claim)), expectedClaim: withAllocate(withDeletionTimestamp(claim)),
expectedError: "deallocate: fake error", expectedWorkQueueState: Mock[string]{
Failures: map[string]int{
claimKey: 1,
},
},
}, },
// deletion time stamp set, finalizer not set -> do nothing // deletion time stamp set, finalizer not set -> do nothing
"deleted-no-finalizer": { "deleted-no-finalizer": {
@ -208,16 +225,23 @@ func TestController(t *testing.T) {
// pod with no claims -> shouldn't occur, check again anyway // pod with no claims -> shouldn't occur, check again anyway
"pod-nop": { "pod-nop": {
key: podKey, key: podSchedulingCtxKey,
pod: pod, pod: pod,
schedulingCtx: withSelectedNode(podSchedulingCtx), schedulingCtx: withSelectedNode(podSchedulingCtx),
expectedSchedulingCtx: withSelectedNode(podSchedulingCtx), expectedSchedulingCtx: withSelectedNode(podSchedulingCtx),
expectedError: errPeriodic.Error(), expectedWorkQueueState: Mock[string]{
Later: []MockDelayedItem[string]{
{
Item: podSchedulingCtxKey,
Duration: time.Second * 30,
},
},
},
}, },
// no potential nodes -> shouldn't occur // no potential nodes -> shouldn't occur
"no-nodes": { "no-nodes": {
key: podKey, key: podSchedulingCtxKey,
claim: claim, claim: claim,
expectedClaim: claim, expectedClaim: claim,
pod: podWithClaim, pod: podWithClaim,
@ -227,7 +251,7 @@ func TestController(t *testing.T) {
// potential nodes -> provide unsuitable nodes // potential nodes -> provide unsuitable nodes
"info": { "info": {
key: podKey, key: podSchedulingCtxKey,
claim: claim, claim: claim,
expectedClaim: claim, expectedClaim: claim,
pod: podWithClaim, pod: podWithClaim,
@ -236,12 +260,19 @@ func TestController(t *testing.T) {
expectClaimParameters(map[string]interface{}{claimName: 2}). expectClaimParameters(map[string]interface{}{claimName: 2}).
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil), expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil),
expectedSchedulingCtx: withUnsuitableNodes(withPotentialNodes(podSchedulingCtx)), expectedSchedulingCtx: withUnsuitableNodes(withPotentialNodes(podSchedulingCtx)),
expectedError: errPeriodic.Error(), expectedWorkQueueState: Mock[string]{
Later: []MockDelayedItem[string]{
{
Item: podSchedulingCtxKey,
Duration: time.Second * 30,
},
},
},
}, },
// potential nodes, selected node -> allocate // potential nodes, selected node -> allocate
"allocate": { "allocate": {
key: podKey, key: podSchedulingCtxKey,
claim: claim, claim: claim,
expectedClaim: withReservedFor(withAllocate(claim), pod), expectedClaim: withReservedFor(withAllocate(claim), pod),
pod: podWithClaim, pod: podWithClaim,
@ -251,11 +282,18 @@ func TestController(t *testing.T) {
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil). expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil).
expectAllocate(map[string]allocate{claimName: {allocResult: &allocation, selectedNode: nodeName, allocErr: nil}}), expectAllocate(map[string]allocate{claimName: {allocResult: &allocation, selectedNode: nodeName, allocErr: nil}}),
expectedSchedulingCtx: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))), expectedSchedulingCtx: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))),
expectedError: errPeriodic.Error(), expectedWorkQueueState: Mock[string]{
Later: []MockDelayedItem[string]{
{
Item: "schedulingCtx:default/pod",
Duration: time.Second * 30,
},
},
},
}, },
// potential nodes, selected node, all unsuitable -> update unsuitable nodes // potential nodes, selected node, all unsuitable -> update unsuitable nodes
"is-potential-node": { "is-potential-node": {
key: podKey, key: podSchedulingCtxKey,
claim: claim, claim: claim,
expectedClaim: claim, expectedClaim: claim,
pod: podWithClaim, pod: podWithClaim,
@ -264,11 +302,18 @@ func TestController(t *testing.T) {
expectClaimParameters(map[string]interface{}{claimName: 2}). expectClaimParameters(map[string]interface{}{claimName: 2}).
expectUnsuitableNodes(map[string][]string{podClaimName: potentialNodes}, nil), expectUnsuitableNodes(map[string][]string{podClaimName: potentialNodes}, nil),
expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx)), potentialNodes), expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx)), potentialNodes),
expectedError: errPeriodic.Error(), expectedWorkQueueState: Mock[string]{
Later: []MockDelayedItem[string]{
{
Item: podSchedulingCtxKey,
Duration: time.Second * 30,
},
},
},
}, },
// max potential nodes, other selected node, all unsuitable -> update unsuitable nodes with truncation at start // max potential nodes, other selected node, all unsuitable -> update unsuitable nodes with truncation at start
"is-potential-node-truncate-first": { "is-potential-node-truncate-first": {
key: podKey, key: podSchedulingCtxKey,
claim: claim, claim: claim,
expectedClaim: claim, expectedClaim: claim,
pod: podWithClaim, pod: podWithClaim,
@ -277,11 +322,18 @@ func TestController(t *testing.T) {
expectClaimParameters(map[string]interface{}{claimName: 2}). expectClaimParameters(map[string]interface{}{claimName: 2}).
expectUnsuitableNodes(map[string][]string{podClaimName: append(maxNodes, nodeName)}, nil), expectUnsuitableNodes(map[string][]string{podClaimName: append(maxNodes, nodeName)}, nil),
expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append(maxNodes[1:], nodeName)), expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append(maxNodes[1:], nodeName)),
expectedError: errPeriodic.Error(), expectedWorkQueueState: Mock[string]{
Later: []MockDelayedItem[string]{
{
Item: podSchedulingCtxKey,
Duration: time.Second * 30,
},
},
},
}, },
// max potential nodes, other selected node, all unsuitable (but in reverse order) -> update unsuitable nodes with truncation at end // max potential nodes, other selected node, all unsuitable (but in reverse order) -> update unsuitable nodes with truncation at end
"pod-selected-is-potential-node-truncate-last": { "pod-selected-is-potential-node-truncate-last": {
key: podKey, key: podSchedulingCtxKey,
claim: claim, claim: claim,
expectedClaim: claim, expectedClaim: claim,
pod: podWithClaim, pod: podWithClaim,
@ -290,7 +342,14 @@ func TestController(t *testing.T) {
expectClaimParameters(map[string]interface{}{claimName: 2}). expectClaimParameters(map[string]interface{}{claimName: 2}).
expectUnsuitableNodes(map[string][]string{podClaimName: append([]string{nodeName}, maxNodes...)}, nil), expectUnsuitableNodes(map[string][]string{podClaimName: append([]string{nodeName}, maxNodes...)}, nil),
expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append([]string{nodeName}, maxNodes[:len(maxNodes)-1]...)), expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append([]string{nodeName}, maxNodes[:len(maxNodes)-1]...)),
expectedError: errPeriodic.Error(), expectedWorkQueueState: Mock[string]{
Later: []MockDelayedItem[string]{
{
Item: podSchedulingCtxKey,
Duration: time.Second * 30,
},
},
},
}, },
} { } {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
@ -340,16 +399,11 @@ func TestController(t *testing.T) {
) { ) {
t.Fatal("could not sync caches") t.Fatal("could not sync caches")
} }
_, err := ctrl.(*controller).syncKey(ctx, test.key) var workQueueState Mock[string]
if err != nil && test.expectedError == "" { c := ctrl.(*controller)
t.Fatalf("unexpected error: %v", err) workQueueState.SyncOne(test.key, c.sync)
} assert.Equal(t, test.expectedWorkQueueState, workQueueState)
if err == nil && test.expectedError != "" {
t.Fatalf("did not get expected error %q", test.expectedError)
}
if err != nil && err.Error() != test.expectedError {
t.Fatalf("expected error %q, got %q", test.expectedError, err.Error())
}
claims, err := kubeClient.ResourceV1alpha3().ResourceClaims("").List(ctx, metav1.ListOptions{}) claims, err := kubeClient.ResourceV1alpha3().ResourceClaims("").List(ctx, metav1.ListOptions{})
require.NoError(t, err, "list claims") require.NoError(t, err, "list claims")
var expectedClaims []resourceapi.ResourceClaim var expectedClaims []resourceapi.ResourceClaim