mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
chore(dra): refector controller to adapt the mock workqueue in unit test
This commit is contained in:
parent
1c67ed08f2
commit
484443ffab
@ -328,7 +328,7 @@ func (ctrl *controller) Run(workers int) {
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(ctrl.sync, 0, stopCh)
|
||||
go wait.Until(func() { ctrl.sync(ctrl.queue) }, 0, stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
@ -344,12 +344,12 @@ var errRequeue = errors.New("requeue")
|
||||
var errPeriodic = errors.New("periodic")
|
||||
|
||||
// sync is the main worker.
|
||||
func (ctrl *controller) sync() {
|
||||
key, quit := ctrl.queue.Get()
|
||||
func (ctrl *controller) sync(queue workqueue.TypedRateLimitingInterface[string]) {
|
||||
key, quit := queue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
defer ctrl.queue.Done(key)
|
||||
defer queue.Done(key)
|
||||
|
||||
logger := klog.LoggerWithValues(ctrl.logger, "key", key)
|
||||
ctx := klog.NewContext(ctrl.ctx, logger)
|
||||
@ -358,20 +358,20 @@ func (ctrl *controller) sync() {
|
||||
switch err {
|
||||
case nil:
|
||||
logger.V(5).Info("completed")
|
||||
ctrl.queue.Forget(key)
|
||||
queue.Forget(key)
|
||||
case errRequeue:
|
||||
logger.V(5).Info("requeue")
|
||||
ctrl.queue.AddRateLimited(key)
|
||||
queue.AddRateLimited(key)
|
||||
case errPeriodic:
|
||||
logger.V(5).Info("recheck periodically")
|
||||
ctrl.queue.AddAfter(key, recheckDelay)
|
||||
queue.AddAfter(key, recheckDelay)
|
||||
default:
|
||||
logger.Error(err, "processing failed")
|
||||
if obj != nil {
|
||||
// TODO: We don't know here *what* failed. Determine based on error?
|
||||
ctrl.eventRecorder.Event(obj, v1.EventTypeWarning, "Failed", err.Error())
|
||||
}
|
||||
ctrl.queue.AddRateLimited(key)
|
||||
queue.AddRateLimited(key)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -49,7 +50,7 @@ func TestController(t *testing.T) {
|
||||
claim := createClaim(claimName, claimNamespace, driverName)
|
||||
otherClaim := createClaim(claimName, claimNamespace, otherDriverName)
|
||||
podName := "pod"
|
||||
podKey := "schedulingCtx:default/pod"
|
||||
podSchedulingCtxKey := "schedulingCtx:default/pod"
|
||||
pod := createPod(podName, claimNamespace, nil)
|
||||
podClaimName := "my-pod-claim"
|
||||
podSchedulingCtx := createPodSchedulingContexts(pod)
|
||||
@ -125,11 +126,15 @@ func TestController(t *testing.T) {
|
||||
pod *corev1.Pod
|
||||
schedulingCtx, expectedSchedulingCtx *resourceapi.PodSchedulingContext
|
||||
claim, expectedClaim *resourceapi.ResourceClaim
|
||||
expectedError string
|
||||
expectedWorkQueueState Mock[string]
|
||||
}{
|
||||
"invalid-key": {
|
||||
key: "claim:x/y/z",
|
||||
expectedError: `unexpected key format: "x/y/z"`,
|
||||
key: "claim:x/y/z",
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Failures: map[string]int{
|
||||
"claim:x/y/z": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
"not-found": {
|
||||
key: "claim:default/claim",
|
||||
@ -154,7 +159,11 @@ func TestController(t *testing.T) {
|
||||
claim: withDeallocate(withAllocate(claim)),
|
||||
driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}),
|
||||
expectedClaim: withDeallocate(withAllocate(claim)),
|
||||
expectedError: "deallocate: fake error",
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Failures: map[string]int{
|
||||
claimKey: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// deletion time stamp set, our finalizer set, not allocated -> remove finalizer
|
||||
@ -170,7 +179,11 @@ func TestController(t *testing.T) {
|
||||
claim: withFinalizer(withDeletionTimestamp(claim), ourFinalizer),
|
||||
driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}),
|
||||
expectedClaim: withFinalizer(withDeletionTimestamp(claim), ourFinalizer),
|
||||
expectedError: "stop allocation: fake error",
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Failures: map[string]int{
|
||||
claimKey: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
// deletion time stamp set, other finalizer set, not allocated -> do nothing
|
||||
"deleted-finalizer-no-removal": {
|
||||
@ -191,7 +204,11 @@ func TestController(t *testing.T) {
|
||||
claim: withAllocate(withDeletionTimestamp(claim)),
|
||||
driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}),
|
||||
expectedClaim: withAllocate(withDeletionTimestamp(claim)),
|
||||
expectedError: "deallocate: fake error",
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Failures: map[string]int{
|
||||
claimKey: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
// deletion time stamp set, finalizer not set -> do nothing
|
||||
"deleted-no-finalizer": {
|
||||
@ -208,16 +225,23 @@ func TestController(t *testing.T) {
|
||||
|
||||
// pod with no claims -> shouldn't occur, check again anyway
|
||||
"pod-nop": {
|
||||
key: podKey,
|
||||
key: podSchedulingCtxKey,
|
||||
pod: pod,
|
||||
schedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedSchedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedError: errPeriodic.Error(),
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Later: []MockDelayedItem[string]{
|
||||
{
|
||||
Item: podSchedulingCtxKey,
|
||||
Duration: time.Second * 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// no potential nodes -> shouldn't occur
|
||||
"no-nodes": {
|
||||
key: podKey,
|
||||
key: podSchedulingCtxKey,
|
||||
claim: claim,
|
||||
expectedClaim: claim,
|
||||
pod: podWithClaim,
|
||||
@ -227,7 +251,7 @@ func TestController(t *testing.T) {
|
||||
|
||||
// potential nodes -> provide unsuitable nodes
|
||||
"info": {
|
||||
key: podKey,
|
||||
key: podSchedulingCtxKey,
|
||||
claim: claim,
|
||||
expectedClaim: claim,
|
||||
pod: podWithClaim,
|
||||
@ -236,12 +260,19 @@ func TestController(t *testing.T) {
|
||||
expectClaimParameters(map[string]interface{}{claimName: 2}).
|
||||
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil),
|
||||
expectedSchedulingCtx: withUnsuitableNodes(withPotentialNodes(podSchedulingCtx)),
|
||||
expectedError: errPeriodic.Error(),
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Later: []MockDelayedItem[string]{
|
||||
{
|
||||
Item: podSchedulingCtxKey,
|
||||
Duration: time.Second * 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// potential nodes, selected node -> allocate
|
||||
"allocate": {
|
||||
key: podKey,
|
||||
key: podSchedulingCtxKey,
|
||||
claim: claim,
|
||||
expectedClaim: withReservedFor(withAllocate(claim), pod),
|
||||
pod: podWithClaim,
|
||||
@ -251,11 +282,18 @@ func TestController(t *testing.T) {
|
||||
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil).
|
||||
expectAllocate(map[string]allocate{claimName: {allocResult: &allocation, selectedNode: nodeName, allocErr: nil}}),
|
||||
expectedSchedulingCtx: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))),
|
||||
expectedError: errPeriodic.Error(),
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Later: []MockDelayedItem[string]{
|
||||
{
|
||||
Item: "schedulingCtx:default/pod",
|
||||
Duration: time.Second * 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// potential nodes, selected node, all unsuitable -> update unsuitable nodes
|
||||
"is-potential-node": {
|
||||
key: podKey,
|
||||
key: podSchedulingCtxKey,
|
||||
claim: claim,
|
||||
expectedClaim: claim,
|
||||
pod: podWithClaim,
|
||||
@ -264,11 +302,18 @@ func TestController(t *testing.T) {
|
||||
expectClaimParameters(map[string]interface{}{claimName: 2}).
|
||||
expectUnsuitableNodes(map[string][]string{podClaimName: potentialNodes}, nil),
|
||||
expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx)), potentialNodes),
|
||||
expectedError: errPeriodic.Error(),
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Later: []MockDelayedItem[string]{
|
||||
{
|
||||
Item: podSchedulingCtxKey,
|
||||
Duration: time.Second * 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// max potential nodes, other selected node, all unsuitable -> update unsuitable nodes with truncation at start
|
||||
"is-potential-node-truncate-first": {
|
||||
key: podKey,
|
||||
key: podSchedulingCtxKey,
|
||||
claim: claim,
|
||||
expectedClaim: claim,
|
||||
pod: podWithClaim,
|
||||
@ -277,11 +322,18 @@ func TestController(t *testing.T) {
|
||||
expectClaimParameters(map[string]interface{}{claimName: 2}).
|
||||
expectUnsuitableNodes(map[string][]string{podClaimName: append(maxNodes, nodeName)}, nil),
|
||||
expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append(maxNodes[1:], nodeName)),
|
||||
expectedError: errPeriodic.Error(),
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Later: []MockDelayedItem[string]{
|
||||
{
|
||||
Item: podSchedulingCtxKey,
|
||||
Duration: time.Second * 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// max potential nodes, other selected node, all unsuitable (but in reverse order) -> update unsuitable nodes with truncation at end
|
||||
"pod-selected-is-potential-node-truncate-last": {
|
||||
key: podKey,
|
||||
key: podSchedulingCtxKey,
|
||||
claim: claim,
|
||||
expectedClaim: claim,
|
||||
pod: podWithClaim,
|
||||
@ -290,7 +342,14 @@ func TestController(t *testing.T) {
|
||||
expectClaimParameters(map[string]interface{}{claimName: 2}).
|
||||
expectUnsuitableNodes(map[string][]string{podClaimName: append([]string{nodeName}, maxNodes...)}, nil),
|
||||
expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append([]string{nodeName}, maxNodes[:len(maxNodes)-1]...)),
|
||||
expectedError: errPeriodic.Error(),
|
||||
expectedWorkQueueState: Mock[string]{
|
||||
Later: []MockDelayedItem[string]{
|
||||
{
|
||||
Item: podSchedulingCtxKey,
|
||||
Duration: time.Second * 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
@ -340,16 +399,11 @@ func TestController(t *testing.T) {
|
||||
) {
|
||||
t.Fatal("could not sync caches")
|
||||
}
|
||||
_, err := ctrl.(*controller).syncKey(ctx, test.key)
|
||||
if err != nil && test.expectedError == "" {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if err == nil && test.expectedError != "" {
|
||||
t.Fatalf("did not get expected error %q", test.expectedError)
|
||||
}
|
||||
if err != nil && err.Error() != test.expectedError {
|
||||
t.Fatalf("expected error %q, got %q", test.expectedError, err.Error())
|
||||
}
|
||||
var workQueueState Mock[string]
|
||||
c := ctrl.(*controller)
|
||||
workQueueState.SyncOne(test.key, c.sync)
|
||||
assert.Equal(t, test.expectedWorkQueueState, workQueueState)
|
||||
|
||||
claims, err := kubeClient.ResourceV1alpha3().ResourceClaims("").List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err, "list claims")
|
||||
var expectedClaims []resourceapi.ResourceClaim
|
||||
|
Loading…
Reference in New Issue
Block a user