mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 19:23:40 +00:00
Use the generic/typed workqueue throughout
This change makes us use the generic workqueue throughout the project in order to improve type safety and readability of the code.
This commit is contained in:
parent
d387c0c903
commit
6d0ac8c561
@ -82,7 +82,7 @@ type Signer struct {
|
||||
// have one item (Named <ConfigMapName>) in this queue. We are using it
|
||||
// serializes and collapses updates as they can come from both the ConfigMap
|
||||
// and Secrets controllers.
|
||||
syncQueue workqueue.RateLimitingInterface
|
||||
syncQueue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
secretLister corelisters.SecretLister
|
||||
secretSynced cache.InformerSynced
|
||||
@ -103,7 +103,12 @@ func NewSigner(cl clientset.Interface, secrets informers.SecretInformer, configM
|
||||
secretSynced: secrets.Informer().HasSynced,
|
||||
configMapLister: configMaps.Lister(),
|
||||
configMapSynced: configMaps.Informer().HasSynced,
|
||||
syncQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "bootstrap_signer_queue"),
|
||||
syncQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "bootstrap_signer_queue",
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
configMaps.Informer().AddEventHandlerWithResyncPeriod(
|
||||
|
@ -68,7 +68,7 @@ type TokenCleaner struct {
|
||||
// secretSynced returns true if the secret shared informer has been synced at least once.
|
||||
secretSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewTokenCleaner returns a new *NewTokenCleaner.
|
||||
@ -78,7 +78,12 @@ func NewTokenCleaner(cl clientset.Interface, secrets coreinformers.SecretInforme
|
||||
secretLister: secrets.Lister(),
|
||||
secretSynced: secrets.Informer().HasSynced,
|
||||
tokenSecretNamespace: options.TokenSecretNamespace,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "token_cleaner"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "token_cleaner",
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
secrets.Informer().AddEventHandlerWithResyncPeriod(
|
||||
@ -144,7 +149,7 @@ func (tc *TokenCleaner) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer tc.queue.Done(key)
|
||||
|
||||
if err := tc.syncFunc(ctx, key.(string)); err != nil {
|
||||
if err := tc.syncFunc(ctx, key); err != nil {
|
||||
tc.queue.AddRateLimited(key)
|
||||
utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", key, err))
|
||||
return true
|
||||
|
@ -49,7 +49,7 @@ type CertificateController struct {
|
||||
|
||||
handler func(context.Context, *certificates.CertificateSigningRequest) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
func NewCertificateController(
|
||||
@ -63,11 +63,16 @@ func NewCertificateController(
|
||||
cc := &CertificateController{
|
||||
name: name,
|
||||
kubeClient: kubeClient,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
|
||||
workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 1000*time.Second),
|
||||
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
|
||||
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
|
||||
), "certificate"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedMaxOfRateLimiter[string](
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](200*time.Millisecond, 1000*time.Second),
|
||||
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
|
||||
&workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
|
||||
),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "certificate",
|
||||
},
|
||||
),
|
||||
handler: handler,
|
||||
}
|
||||
|
||||
@ -140,7 +145,7 @@ func (cc *CertificateController) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer cc.queue.Done(cKey)
|
||||
|
||||
if err := cc.syncFunc(ctx, cKey.(string)); err != nil {
|
||||
if err := cc.syncFunc(ctx, cKey); err != nil {
|
||||
cc.queue.AddRateLimited(cKey)
|
||||
if _, ignorable := err.(ignorableError); !ignorable {
|
||||
utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", cKey, err))
|
||||
|
@ -55,7 +55,12 @@ func NewPublisher(cmInformer coreinformers.ConfigMapInformer, nsInformer coreinf
|
||||
e := &Publisher{
|
||||
client: cl,
|
||||
rootCA: rootCA,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "root_ca_cert_publisher"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "root_ca_cert_publisher",
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -90,7 +95,7 @@ type Publisher struct {
|
||||
|
||||
nsListerSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// Run starts process
|
||||
@ -164,7 +169,7 @@ func (c *Publisher) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
if err := c.syncHandler(ctx, key.(string)); err != nil {
|
||||
if err := c.syncHandler(ctx, key); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("syncing %q failed: %v", key, err))
|
||||
c.queue.AddRateLimited(key)
|
||||
return true
|
||||
|
@ -48,7 +48,7 @@ type ClusterRoleAggregationController struct {
|
||||
clusterRolesSynced cache.InformerSynced
|
||||
|
||||
syncHandler func(ctx context.Context, key string) error
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewClusterRoleAggregation creates a new controller
|
||||
@ -58,7 +58,12 @@ func NewClusterRoleAggregation(clusterRoleInformer rbacinformers.ClusterRoleInfo
|
||||
clusterRoleLister: clusterRoleInformer.Lister(),
|
||||
clusterRolesSynced: clusterRoleInformer.Informer().HasSynced,
|
||||
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClusterRoleAggregator"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "ClusterRoleAggregator",
|
||||
},
|
||||
),
|
||||
}
|
||||
c.syncHandler = c.syncClusterRole
|
||||
|
||||
@ -212,7 +217,7 @@ func (c *ClusterRoleAggregationController) processNextWorkItem(ctx context.Conte
|
||||
}
|
||||
defer c.queue.Done(dsKey)
|
||||
|
||||
err := c.syncHandler(ctx, dsKey.(string))
|
||||
err := c.syncHandler(ctx, dsKey)
|
||||
if err == nil {
|
||||
c.queue.Forget(dsKey)
|
||||
return true
|
||||
|
@ -60,7 +60,7 @@ var (
|
||||
// ControllerV2 is a controller for CronJobs.
|
||||
// Refactored Cronjob controller that uses DelayingQueue and informers
|
||||
type ControllerV2 struct {
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
kubeClient clientset.Interface
|
||||
recorder record.EventRecorder
|
||||
@ -85,7 +85,12 @@ func NewControllerV2(ctx context.Context, jobInformer batchv1informers.JobInform
|
||||
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||
|
||||
jm := &ControllerV2{
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cronjob"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "cronjob",
|
||||
},
|
||||
),
|
||||
kubeClient: kubeClient,
|
||||
broadcaster: eventBroadcaster,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "cronjob-controller"}),
|
||||
@ -162,10 +167,10 @@ func (jm *ControllerV2) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer jm.queue.Done(key)
|
||||
|
||||
requeueAfter, err := jm.sync(ctx, key.(string))
|
||||
requeueAfter, err := jm.sync(ctx, key)
|
||||
switch {
|
||||
case err != nil:
|
||||
utilruntime.HandleError(fmt.Errorf("error syncing CronJobController %v, requeuing: %v", key.(string), err))
|
||||
utilruntime.HandleError(fmt.Errorf("error syncing CronJobController %v, requeuing: %w", key, err))
|
||||
jm.queue.AddRateLimited(key)
|
||||
case requeueAfter != nil:
|
||||
jm.queue.Forget(key)
|
||||
|
@ -1375,12 +1375,12 @@ func TestControllerV2SyncCronJob(t *testing.T) {
|
||||
}
|
||||
|
||||
type fakeQueue struct {
|
||||
workqueue.RateLimitingInterface
|
||||
workqueue.TypedRateLimitingInterface[string]
|
||||
delay time.Duration
|
||||
key interface{}
|
||||
}
|
||||
|
||||
func (f *fakeQueue) AddAfter(key interface{}, delay time.Duration) {
|
||||
func (f *fakeQueue) AddAfter(key string, delay time.Duration) {
|
||||
f.delay = delay
|
||||
f.key = key
|
||||
}
|
||||
@ -1593,7 +1593,12 @@ func TestControllerV2UpdateCronJob(t *testing.T) {
|
||||
return
|
||||
}
|
||||
jm.now = justASecondBeforeTheHour
|
||||
queue := &fakeQueue{RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test-update-cronjob")}
|
||||
queue := &fakeQueue{TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "test-update-cronjob",
|
||||
},
|
||||
)}
|
||||
jm.queue = queue
|
||||
jm.jobControl = &fakeJobControl{}
|
||||
jm.cronJobControl = &fakeCJControl{}
|
||||
|
@ -123,7 +123,7 @@ type DaemonSetsController struct {
|
||||
nodeStoreSynced cache.InformerSynced
|
||||
|
||||
// DaemonSet keys that need to be synced.
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
failedPodsBackoff *flowcontrol.Backoff
|
||||
}
|
||||
@ -153,7 +153,12 @@ func NewDaemonSetsController(
|
||||
},
|
||||
burstReplicas: BurstReplicas,
|
||||
expectations: controller.NewControllerExpectations(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "daemonset"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "daemonset",
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -315,7 +320,7 @@ func (dsc *DaemonSetsController) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer dsc.queue.Done(dsKey)
|
||||
|
||||
err := dsc.syncHandler(ctx, dsKey.(string))
|
||||
err := dsc.syncHandler(ctx, dsKey)
|
||||
if err == nil {
|
||||
dsc.queue.Forget(dsKey)
|
||||
return true
|
||||
|
@ -474,7 +474,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
// DeletedFinalStateUnknown should queue the embedded DS if found.
|
||||
manager.deleteDaemonset(logger, cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
|
||||
enqueuedKey, _ := manager.queue.Get()
|
||||
if enqueuedKey.(string) != "default/foo" {
|
||||
if enqueuedKey != "default/foo" {
|
||||
t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey)
|
||||
}
|
||||
}
|
||||
@ -2890,7 +2890,7 @@ func TestAddNode(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done := manager.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for node %v", node2.Name)
|
||||
}
|
||||
}
|
||||
@ -2920,11 +2920,11 @@ func TestAddPod(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done := manager.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod1.Name)
|
||||
}
|
||||
expectedKey, _ := controller.KeyFunc(ds1)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
|
||||
@ -2934,11 +2934,11 @@ func TestAddPod(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done = manager.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod2.Name)
|
||||
}
|
||||
expectedKey, _ = controller.KeyFunc(ds2)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
@ -3011,11 +3011,11 @@ func TestUpdatePod(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done := manager.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod1.Name)
|
||||
}
|
||||
expectedKey, _ := controller.KeyFunc(ds1)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
|
||||
@ -3027,11 +3027,11 @@ func TestUpdatePod(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done = manager.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod2.Name)
|
||||
}
|
||||
expectedKey, _ = controller.KeyFunc(ds2)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
@ -3189,11 +3189,11 @@ func TestDeletePod(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done := manager.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod1.Name)
|
||||
}
|
||||
expectedKey, _ := controller.KeyFunc(ds1)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
|
||||
@ -3203,11 +3203,11 @@ func TestDeletePod(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done = manager.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod2.Name)
|
||||
}
|
||||
expectedKey, _ = controller.KeyFunc(ds2)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
@ -3255,7 +3255,7 @@ func bumpResourceVersion(obj metav1.Object) {
|
||||
|
||||
// getQueuedKeys returns a sorted list of keys in the queue.
|
||||
// It can be used to quickly check that multiple keys are in there.
|
||||
func getQueuedKeys(queue workqueue.RateLimitingInterface) []string {
|
||||
func getQueuedKeys(queue workqueue.TypedRateLimitingInterface[string]) []string {
|
||||
var keys []string
|
||||
count := queue.Len()
|
||||
for i := 0; i < count; i++ {
|
||||
@ -3263,7 +3263,7 @@ func getQueuedKeys(queue workqueue.RateLimitingInterface) []string {
|
||||
if done {
|
||||
return keys
|
||||
}
|
||||
keys = append(keys, key.(string))
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
|
@ -94,7 +94,7 @@ type DeploymentController struct {
|
||||
podListerSynced cache.InformerSynced
|
||||
|
||||
// Deployments that need to be synced
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewDeploymentController creates a new DeploymentController.
|
||||
@ -105,7 +105,12 @@ func NewDeploymentController(ctx context.Context, dInformer appsinformers.Deploy
|
||||
client: client,
|
||||
eventBroadcaster: eventBroadcaster,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "deployment-controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "deployment",
|
||||
},
|
||||
),
|
||||
}
|
||||
dc.rsControl = controller.RealRSControl{
|
||||
KubeClient: client,
|
||||
@ -486,19 +491,19 @@ func (dc *DeploymentController) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer dc.queue.Done(key)
|
||||
|
||||
err := dc.syncHandler(ctx, key.(string))
|
||||
err := dc.syncHandler(ctx, key)
|
||||
dc.handleErr(ctx, err, key)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) handleErr(ctx context.Context, err error, key interface{}) {
|
||||
func (dc *DeploymentController) handleErr(ctx context.Context, err error, key string) {
|
||||
logger := klog.FromContext(ctx)
|
||||
if err == nil || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||
dc.queue.Forget(key)
|
||||
return
|
||||
}
|
||||
ns, name, keyErr := cache.SplitMetaNamespaceKey(key.(string))
|
||||
ns, name, keyErr := cache.SplitMetaNamespaceKey(key)
|
||||
if keyErr != nil {
|
||||
logger.Error(err, "Failed to split meta namespace cache key", "cacheKey", key)
|
||||
}
|
||||
|
@ -716,11 +716,11 @@ func TestAddReplicaSet(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done := dc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for rs %v", rs1.Name)
|
||||
}
|
||||
expectedKey, _ := controller.KeyFunc(d1)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
|
||||
@ -729,11 +729,11 @@ func TestAddReplicaSet(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done = dc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for rs %v", rs2.Name)
|
||||
}
|
||||
expectedKey, _ = controller.KeyFunc(d2)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
@ -801,11 +801,11 @@ func TestUpdateReplicaSet(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done := dc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for rs %v", rs1.Name)
|
||||
}
|
||||
expectedKey, _ := controller.KeyFunc(d1)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
|
||||
@ -817,11 +817,11 @@ func TestUpdateReplicaSet(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done = dc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for rs %v", rs2.Name)
|
||||
}
|
||||
expectedKey, _ = controller.KeyFunc(d2)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
@ -953,11 +953,11 @@ func TestDeleteReplicaSet(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done := dc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for rs %v", rs1.Name)
|
||||
}
|
||||
expectedKey, _ := controller.KeyFunc(d1)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
|
||||
@ -966,11 +966,11 @@ func TestDeleteReplicaSet(t *testing.T) {
|
||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||
}
|
||||
key, done = dc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for rs %v", rs2.Name)
|
||||
}
|
||||
expectedKey, _ = controller.KeyFunc(d2)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
@ -168,7 +168,12 @@ func TestRequeueStuckDeployment(t *testing.T) {
|
||||
}
|
||||
|
||||
dc := &DeploymentController{
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "doesnt_matter"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "doesnt_matter",
|
||||
},
|
||||
),
|
||||
}
|
||||
dc.enqueueDeployment = dc.enqueue
|
||||
|
||||
|
@ -103,11 +103,11 @@ type DisruptionController struct {
|
||||
ssListerSynced cache.InformerSynced
|
||||
|
||||
// PodDisruptionBudget keys that need to be synced.
|
||||
queue workqueue.RateLimitingInterface
|
||||
recheckQueue workqueue.DelayingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
recheckQueue workqueue.TypedDelayingInterface[string]
|
||||
|
||||
// pod keys that need to be synced due to a stale DisruptionTarget condition.
|
||||
stalePodDisruptionQueue workqueue.RateLimitingInterface
|
||||
stalePodDisruptionQueue workqueue.TypedRateLimitingInterface[string]
|
||||
stalePodDisruptionTimeout time.Duration
|
||||
|
||||
broadcaster record.EventBroadcaster
|
||||
@ -177,10 +177,29 @@ func NewDisruptionControllerInternal(ctx context.Context,
|
||||
) *DisruptionController {
|
||||
logger := klog.FromContext(ctx)
|
||||
dc := &DisruptionController{
|
||||
kubeClient: kubeClient,
|
||||
queue: workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "disruption"), workqueue.DefaultControllerRateLimiter()),
|
||||
recheckQueue: workqueue.NewDelayingQueueWithCustomClock(clock, "disruption_recheck"),
|
||||
stalePodDisruptionQueue: workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "stale_pod_disruption"), workqueue.DefaultControllerRateLimiter()),
|
||||
kubeClient: kubeClient,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
DelayingQueue: workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{
|
||||
Clock: clock,
|
||||
Name: "disruption",
|
||||
}),
|
||||
},
|
||||
),
|
||||
recheckQueue: workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{
|
||||
Clock: clock,
|
||||
Name: "disruption_recheck",
|
||||
}),
|
||||
stalePodDisruptionQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
DelayingQueue: workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{
|
||||
Clock: clock,
|
||||
Name: "stale_pod_disruption",
|
||||
}),
|
||||
},
|
||||
),
|
||||
broadcaster: record.NewBroadcaster(record.WithContext(ctx)),
|
||||
stalePodDisruptionTimeout: stalePodDisruptionTimeout,
|
||||
}
|
||||
@ -617,13 +636,13 @@ func (dc *DisruptionController) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer dc.queue.Done(dKey)
|
||||
|
||||
err := dc.sync(ctx, dKey.(string))
|
||||
err := dc.sync(ctx, dKey)
|
||||
if err == nil {
|
||||
dc.queue.Forget(dKey)
|
||||
return true
|
||||
}
|
||||
|
||||
utilruntime.HandleError(fmt.Errorf("Error syncing PodDisruptionBudget %v, requeuing: %v", dKey.(string), err))
|
||||
utilruntime.HandleError(fmt.Errorf("Error syncing PodDisruptionBudget %v, requeuing: %w", dKey, err)) //nolint:stylecheck
|
||||
dc.queue.AddRateLimited(dKey)
|
||||
|
||||
return true
|
||||
@ -655,12 +674,12 @@ func (dc *DisruptionController) processNextStalePodDisruptionWorkItem(ctx contex
|
||||
return false
|
||||
}
|
||||
defer dc.stalePodDisruptionQueue.Done(key)
|
||||
err := dc.syncStalePodDisruption(ctx, key.(string))
|
||||
err := dc.syncStalePodDisruption(ctx, key)
|
||||
if err == nil {
|
||||
dc.stalePodDisruptionQueue.Forget(key)
|
||||
return true
|
||||
}
|
||||
utilruntime.HandleError(fmt.Errorf("error syncing Pod %v to clear DisruptionTarget condition, requeueing: %v", key.(string), err))
|
||||
utilruntime.HandleError(fmt.Errorf("error syncing Pod %v to clear DisruptionTarget condition, requeueing: %w", key, err))
|
||||
dc.stalePodDisruptionQueue.AddRateLimited(key)
|
||||
return true
|
||||
}
|
||||
|
@ -1029,7 +1029,7 @@ func TestPDBNotExist(t *testing.T) {
|
||||
func TestUpdateDisruptedPods(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
dc, ps := newFakeDisruptionController(ctx)
|
||||
dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb_queue")
|
||||
dc.recheckQueue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{Name: "pdb_queue"})
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(1))
|
||||
currentTime := dc.clock.Now()
|
||||
pdb.Status.DisruptedPods = map[string]metav1.Time{
|
||||
|
@ -76,8 +76,13 @@ func NewEndpointController(ctx context.Context, podInformer coreinformers.PodInf
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-controller"})
|
||||
|
||||
e := &Controller{
|
||||
client: client,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "endpoint"),
|
||||
client: client,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "endpoint",
|
||||
},
|
||||
),
|
||||
workerLoopPeriod: time.Second,
|
||||
}
|
||||
|
||||
@ -146,7 +151,7 @@ type Controller struct {
|
||||
// more often than services with few pods; it also would cause a
|
||||
// service that's inserted multiple times to be processed more than
|
||||
// necessary.
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// workerLoopPeriod is the time between worker runs. The workers process the queue of service and pod changes.
|
||||
workerLoopPeriod time.Duration
|
||||
@ -324,19 +329,19 @@ func (e *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
defer e.queue.Done(eKey)
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
err := e.syncService(ctx, eKey.(string))
|
||||
err := e.syncService(ctx, eKey)
|
||||
e.handleErr(logger, err, eKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (e *Controller) handleErr(logger klog.Logger, err error, key interface{}) {
|
||||
func (e *Controller) handleErr(logger klog.Logger, err error, key string) {
|
||||
if err == nil {
|
||||
e.queue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
ns, name, keyErr := cache.SplitMetaNamespaceKey(key.(string))
|
||||
ns, name, keyErr := cache.SplitMetaNamespaceKey(key)
|
||||
if keyErr != nil {
|
||||
logger.Error(err, "Failed to split meta namespace cache key", "key", key)
|
||||
}
|
||||
|
@ -99,12 +99,17 @@ func NewController(ctx context.Context, podInformer coreinformers.PodInformer,
|
||||
// such as an update to a Service or Deployment. A more significant
|
||||
// rate limit back off here helps ensure that the Controller does not
|
||||
// overwhelm the API Server.
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
|
||||
workqueue.NewItemExponentialFailureRateLimiter(defaultSyncBackOff, maxSyncBackOff),
|
||||
// 10 qps, 100 bucket size. This is only for retry speed and its
|
||||
// only the overall factor (not per item).
|
||||
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
|
||||
), "endpoint_slice"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedMaxOfRateLimiter(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](defaultSyncBackOff, maxSyncBackOff),
|
||||
// 10 qps, 100 bucket size. This is only for retry speed and its
|
||||
// only the overall factor (not per item).
|
||||
&workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
|
||||
),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "endpoint_slice",
|
||||
},
|
||||
),
|
||||
workerLoopPeriod: time.Second,
|
||||
}
|
||||
|
||||
@ -231,7 +236,7 @@ type Controller struct {
|
||||
// more often than services with few pods; it also would cause a
|
||||
// service that's inserted multiple times to be processed more than
|
||||
// necessary.
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// maxEndpointsPerSlice references the maximum number of endpoints that
|
||||
// should be added to an EndpointSlice
|
||||
@ -293,13 +298,13 @@ func (c *Controller) processNextWorkItem(logger klog.Logger) bool {
|
||||
}
|
||||
defer c.queue.Done(cKey)
|
||||
|
||||
err := c.syncService(logger, cKey.(string))
|
||||
err := c.syncService(logger, cKey)
|
||||
c.handleErr(logger, err, cKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) handleErr(logger klog.Logger, err error, key interface{}) {
|
||||
func (c *Controller) handleErr(logger klog.Logger, err error, key string) {
|
||||
trackSync(err)
|
||||
|
||||
if err == nil {
|
||||
|
@ -88,12 +88,16 @@ func NewController(ctx context.Context, endpointsInformer coreinformers.Endpoint
|
||||
// processes events that can require significant EndpointSlice changes.
|
||||
// A more significant rate limit back off here helps ensure that the
|
||||
// Controller does not overwhelm the API Server.
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
|
||||
workqueue.NewItemExponentialFailureRateLimiter(defaultSyncBackOff, maxSyncBackOff),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.NewTypedMaxOfRateLimiter(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](defaultSyncBackOff, maxSyncBackOff),
|
||||
// 10 qps, 100 bucket size. This is only for retry speed and its
|
||||
// only the overall factor (not per item).
|
||||
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
|
||||
), "endpoint_slice_mirroring"),
|
||||
&workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
|
||||
),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "endpoint_slice_mirroring",
|
||||
},
|
||||
),
|
||||
workerLoopPeriod: time.Second,
|
||||
}
|
||||
|
||||
@ -192,7 +196,7 @@ type Controller struct {
|
||||
// more often than Endpoints with few addresses; it also would cause an
|
||||
// Endpoints resource that's inserted multiple times to be processed more
|
||||
// than necessary.
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// maxEndpointsPerSubset references the maximum number of endpoints that
|
||||
// should be added to an EndpointSlice for an EndpointSubset.
|
||||
@ -251,13 +255,13 @@ func (c *Controller) processNextWorkItem(logger klog.Logger) bool {
|
||||
}
|
||||
defer c.queue.Done(cKey)
|
||||
|
||||
err := c.syncEndpoints(logger, cKey.(string))
|
||||
err := c.syncEndpoints(logger, cKey)
|
||||
c.handleErr(logger, err, cKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) handleErr(logger klog.Logger, err error, key interface{}) {
|
||||
func (c *Controller) handleErr(logger klog.Logger, err error, key string) {
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"k8s.io/controller-manager/pkg/informerfactory"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
@ -42,6 +41,7 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/controller-manager/controller"
|
||||
"k8s.io/controller-manager/pkg/informerfactory"
|
||||
"k8s.io/klog/v2"
|
||||
c "k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metrics"
|
||||
@ -65,9 +65,9 @@ type GarbageCollector struct {
|
||||
restMapper meta.ResettableRESTMapper
|
||||
metadataClient metadata.Interface
|
||||
// garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe.
|
||||
attemptToDelete workqueue.RateLimitingInterface
|
||||
attemptToDelete workqueue.TypedRateLimitingInterface[*node]
|
||||
// garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items.
|
||||
attemptToOrphan workqueue.RateLimitingInterface
|
||||
attemptToOrphan workqueue.TypedRateLimitingInterface[*node]
|
||||
dependencyGraphBuilder *GraphBuilder
|
||||
// GC caches the owners that do not exist according to the API server.
|
||||
absentOwnerCache *ReferenceCache
|
||||
|
@ -414,12 +414,12 @@ func TestProcessEvent(t *testing.T) {
|
||||
|
||||
dependencyGraphBuilder := &GraphBuilder{
|
||||
informersStarted: alwaysStarted,
|
||||
graphChanges: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
graphChanges: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[*event]()),
|
||||
uidToNode: &concurrentUIDToNode{
|
||||
uidToNodeLock: sync.RWMutex{},
|
||||
uidToNode: make(map[types.UID]*node),
|
||||
},
|
||||
attemptToDelete: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
attemptToDelete: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[*node]()),
|
||||
absentOwnerCache: NewReferenceCache(2),
|
||||
}
|
||||
for i := 0; i < len(scenario.events); i++ {
|
||||
@ -2318,9 +2318,9 @@ func TestConflictingData(t *testing.T) {
|
||||
restMapper := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
|
||||
|
||||
// set up our workqueues
|
||||
attemptToDelete := newTrackingWorkqueue()
|
||||
attemptToOrphan := newTrackingWorkqueue()
|
||||
graphChanges := newTrackingWorkqueue()
|
||||
attemptToDelete := newTrackingWorkqueue[*node]()
|
||||
attemptToOrphan := newTrackingWorkqueue[*node]()
|
||||
graphChanges := newTrackingWorkqueue[*event]()
|
||||
|
||||
gc := &GarbageCollector{
|
||||
metadataClient: metadataClient,
|
||||
@ -2459,9 +2459,9 @@ type stepContext struct {
|
||||
gc *GarbageCollector
|
||||
eventRecorder *record.FakeRecorder
|
||||
metadataClient *fakemetadata.FakeMetadataClient
|
||||
attemptToDelete *trackingWorkqueue
|
||||
attemptToOrphan *trackingWorkqueue
|
||||
graphChanges *trackingWorkqueue
|
||||
attemptToDelete *trackingWorkqueue[*node]
|
||||
attemptToOrphan *trackingWorkqueue[*node]
|
||||
graphChanges *trackingWorkqueue[*event]
|
||||
}
|
||||
|
||||
type step struct {
|
||||
@ -2521,7 +2521,7 @@ func insertEvent(e *event) step {
|
||||
check: func(ctx stepContext) {
|
||||
ctx.t.Helper()
|
||||
// drain queue into items
|
||||
var items []interface{}
|
||||
var items []*event
|
||||
for ctx.gc.dependencyGraphBuilder.graphChanges.Len() > 0 {
|
||||
item, _ := ctx.gc.dependencyGraphBuilder.graphChanges.Get()
|
||||
ctx.gc.dependencyGraphBuilder.graphChanges.Done(item)
|
||||
@ -2711,7 +2711,7 @@ func assertState(s state) step {
|
||||
break
|
||||
}
|
||||
|
||||
a := ctx.graphChanges.pendingList[i].(*event)
|
||||
a := ctx.graphChanges.pendingList[i]
|
||||
if !reflect.DeepEqual(e, a) {
|
||||
objectDiff := ""
|
||||
if !reflect.DeepEqual(e.obj, a.obj) {
|
||||
@ -2739,18 +2739,18 @@ func assertState(s state) step {
|
||||
ctx.t.Errorf("attemptToDelete: expected %d events, got %d", len(s.pendingAttemptToDelete), ctx.attemptToDelete.Len())
|
||||
break
|
||||
}
|
||||
a := ctx.attemptToDelete.pendingList[i].(*node).identity
|
||||
a_virtual := ctx.attemptToDelete.pendingList[i].(*node).virtual
|
||||
a := ctx.attemptToDelete.pendingList[i].identity
|
||||
aVirtual := ctx.attemptToDelete.pendingList[i].virtual
|
||||
if !reflect.DeepEqual(e, a) {
|
||||
ctx.t.Errorf("attemptToDelete[%d]: expected %v, got %v", i, e, a)
|
||||
}
|
||||
if e_virtual != a_virtual {
|
||||
if e_virtual != aVirtual {
|
||||
ctx.t.Errorf("attemptToDelete[%d]: expected virtual node %v, got non-virtual node %v", i, e, a)
|
||||
}
|
||||
}
|
||||
if ctx.attemptToDelete.Len() > len(s.pendingAttemptToDelete) {
|
||||
for i, a := range ctx.attemptToDelete.pendingList[len(s.pendingAttemptToDelete):] {
|
||||
ctx.t.Errorf("attemptToDelete[%d]: unexpected node: %v", len(s.pendingAttemptToDelete)+i, a.(*node).identity)
|
||||
ctx.t.Errorf("attemptToDelete[%d]: unexpected node: %v", len(s.pendingAttemptToDelete)+i, a.identity)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2762,14 +2762,14 @@ func assertState(s state) step {
|
||||
ctx.t.Errorf("attemptToOrphan: expected %d events, got %d", len(s.pendingAttemptToOrphan), ctx.attemptToOrphan.Len())
|
||||
break
|
||||
}
|
||||
a := ctx.attemptToOrphan.pendingList[i].(*node).identity
|
||||
a := ctx.attemptToOrphan.pendingList[i].identity
|
||||
if !reflect.DeepEqual(e, a) {
|
||||
ctx.t.Errorf("attemptToOrphan[%d]: expected %v, got %v", i, e, a)
|
||||
}
|
||||
}
|
||||
if ctx.attemptToOrphan.Len() > len(s.pendingAttemptToOrphan) {
|
||||
for i, a := range ctx.attemptToOrphan.pendingList[len(s.pendingAttemptToOrphan):] {
|
||||
ctx.t.Errorf("attemptToOrphan[%d]: unexpected node: %v", len(s.pendingAttemptToOrphan)+i, a.(*node).identity)
|
||||
ctx.t.Errorf("attemptToOrphan[%d]: unexpected node: %v", len(s.pendingAttemptToOrphan)+i, a.identity)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2782,46 +2782,46 @@ func assertState(s state) step {
|
||||
// allows introspection of the items in the queue,
|
||||
// and treats AddAfter and AddRateLimited the same as Add
|
||||
// so they are always synchronous.
|
||||
type trackingWorkqueue struct {
|
||||
limiter workqueue.RateLimitingInterface
|
||||
pendingList []interface{}
|
||||
pendingMap map[interface{}]struct{}
|
||||
type trackingWorkqueue[T comparable] struct {
|
||||
limiter workqueue.TypedRateLimitingInterface[T]
|
||||
pendingList []T
|
||||
pendingMap map[T]struct{}
|
||||
}
|
||||
|
||||
var _ = workqueue.RateLimitingInterface(&trackingWorkqueue{})
|
||||
var _ = workqueue.TypedRateLimitingInterface[string](&trackingWorkqueue[string]{})
|
||||
|
||||
func newTrackingWorkqueue() *trackingWorkqueue {
|
||||
return &trackingWorkqueue{
|
||||
limiter: workqueue.NewRateLimitingQueue(&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Inf, 100)}),
|
||||
pendingMap: map[interface{}]struct{}{},
|
||||
func newTrackingWorkqueue[T comparable]() *trackingWorkqueue[T] {
|
||||
return &trackingWorkqueue[T]{
|
||||
limiter: workqueue.NewTypedRateLimitingQueue[T](&workqueue.TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Inf, 100)}),
|
||||
pendingMap: map[T]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *trackingWorkqueue) Add(item interface{}) {
|
||||
func (t *trackingWorkqueue[T]) Add(item T) {
|
||||
t.queue(item)
|
||||
t.limiter.Add(item)
|
||||
}
|
||||
func (t *trackingWorkqueue) AddAfter(item interface{}, duration time.Duration) {
|
||||
func (t *trackingWorkqueue[T]) AddAfter(item T, duration time.Duration) {
|
||||
t.Add(item)
|
||||
}
|
||||
func (t *trackingWorkqueue) AddRateLimited(item interface{}) {
|
||||
func (t *trackingWorkqueue[T]) AddRateLimited(item T) {
|
||||
t.Add(item)
|
||||
}
|
||||
func (t *trackingWorkqueue) Get() (interface{}, bool) {
|
||||
func (t *trackingWorkqueue[T]) Get() (T, bool) {
|
||||
item, shutdown := t.limiter.Get()
|
||||
t.dequeue(item)
|
||||
return item, shutdown
|
||||
}
|
||||
func (t *trackingWorkqueue) Done(item interface{}) {
|
||||
func (t *trackingWorkqueue[T]) Done(item T) {
|
||||
t.limiter.Done(item)
|
||||
}
|
||||
func (t *trackingWorkqueue) Forget(item interface{}) {
|
||||
func (t *trackingWorkqueue[T]) Forget(item T) {
|
||||
t.limiter.Forget(item)
|
||||
}
|
||||
func (t *trackingWorkqueue) NumRequeues(item interface{}) int {
|
||||
func (t *trackingWorkqueue[T]) NumRequeues(item T) int {
|
||||
return 0
|
||||
}
|
||||
func (t *trackingWorkqueue) Len() int {
|
||||
func (t *trackingWorkqueue[T]) Len() int {
|
||||
if e, a := len(t.pendingList), len(t.pendingMap); e != a {
|
||||
panic(fmt.Errorf("pendingList != pendingMap: %d / %d", e, a))
|
||||
}
|
||||
@ -2830,17 +2830,17 @@ func (t *trackingWorkqueue) Len() int {
|
||||
}
|
||||
return len(t.pendingList)
|
||||
}
|
||||
func (t *trackingWorkqueue) ShutDown() {
|
||||
func (t *trackingWorkqueue[T]) ShutDown() {
|
||||
t.limiter.ShutDown()
|
||||
}
|
||||
func (t *trackingWorkqueue) ShutDownWithDrain() {
|
||||
func (t *trackingWorkqueue[T]) ShutDownWithDrain() {
|
||||
t.limiter.ShutDownWithDrain()
|
||||
}
|
||||
func (t *trackingWorkqueue) ShuttingDown() bool {
|
||||
func (t *trackingWorkqueue[T]) ShuttingDown() bool {
|
||||
return t.limiter.ShuttingDown()
|
||||
}
|
||||
|
||||
func (t *trackingWorkqueue) queue(item interface{}) {
|
||||
func (t *trackingWorkqueue[T]) queue(item T) {
|
||||
if _, queued := t.pendingMap[item]; queued {
|
||||
// fmt.Printf("already queued: %#v\n", item)
|
||||
return
|
||||
@ -2848,13 +2848,13 @@ func (t *trackingWorkqueue) queue(item interface{}) {
|
||||
t.pendingMap[item] = struct{}{}
|
||||
t.pendingList = append(t.pendingList, item)
|
||||
}
|
||||
func (t *trackingWorkqueue) dequeue(item interface{}) {
|
||||
func (t *trackingWorkqueue[T]) dequeue(item T) {
|
||||
if _, queued := t.pendingMap[item]; !queued {
|
||||
// fmt.Printf("not queued: %#v\n", item)
|
||||
return
|
||||
}
|
||||
delete(t.pendingMap, item)
|
||||
newPendingList := []interface{}{}
|
||||
newPendingList := []T{}
|
||||
for _, p := range t.pendingList {
|
||||
if p == item {
|
||||
continue
|
||||
|
@ -103,13 +103,13 @@ type GraphBuilder struct {
|
||||
metadataClient metadata.Interface
|
||||
// monitors are the producer of the graphChanges queue, graphBuilder alters
|
||||
// the in-memory graph according to the changes.
|
||||
graphChanges workqueue.RateLimitingInterface
|
||||
graphChanges workqueue.TypedRateLimitingInterface[*event]
|
||||
// uidToNode doesn't require a lock to protect, because only the
|
||||
// single-threaded GraphBuilder.processGraphChanges() reads/writes it.
|
||||
uidToNode *concurrentUIDToNode
|
||||
// GraphBuilder is the producer of attemptToDelete and attemptToOrphan, GC is the consumer.
|
||||
attemptToDelete workqueue.RateLimitingInterface
|
||||
attemptToOrphan workqueue.RateLimitingInterface
|
||||
attemptToDelete workqueue.TypedRateLimitingInterface[*node]
|
||||
attemptToOrphan workqueue.TypedRateLimitingInterface[*node]
|
||||
// GraphBuilder and GC share the absentOwnerCache. Objects that are known to
|
||||
// be non-existent are added to the cached.
|
||||
absentOwnerCache *ReferenceCache
|
||||
@ -145,8 +145,18 @@ func NewDependencyGraphBuilder(
|
||||
) *GraphBuilder {
|
||||
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||
|
||||
attemptToDelete := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_delete")
|
||||
attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan")
|
||||
attemptToDelete := workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[*node](),
|
||||
workqueue.TypedRateLimitingQueueConfig[*node]{
|
||||
Name: "garbage_collector_attempt_to_delete",
|
||||
},
|
||||
)
|
||||
attemptToOrphan := workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[*node](),
|
||||
workqueue.TypedRateLimitingQueueConfig[*node]{
|
||||
Name: "garbage_collector_attempt_to_orphan",
|
||||
},
|
||||
)
|
||||
absentOwnerCache := NewReferenceCache(500)
|
||||
graphBuilder := &GraphBuilder{
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "garbage-collector-controller"}),
|
||||
@ -154,7 +164,12 @@ func NewDependencyGraphBuilder(
|
||||
metadataClient: metadataClient,
|
||||
informersStarted: informersStarted,
|
||||
restMapper: mapper,
|
||||
graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
|
||||
graphChanges: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[*event](),
|
||||
workqueue.TypedRateLimitingQueueConfig[*event]{
|
||||
Name: "garbage_collector_graph_changes",
|
||||
},
|
||||
),
|
||||
uidToNode: &concurrentUIDToNode{
|
||||
uidToNode: make(map[types.UID]*node),
|
||||
},
|
||||
@ -666,12 +681,8 @@ func (gb *GraphBuilder) processGraphChanges(logger klog.Logger) bool {
|
||||
return false
|
||||
}
|
||||
defer gb.graphChanges.Done(item)
|
||||
event, ok := item.(*event)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", item))
|
||||
return true
|
||||
}
|
||||
obj := event.obj
|
||||
event := item
|
||||
obj := item.obj
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
@ -971,8 +982,8 @@ func getAlternateOwnerIdentity(deps []*node, verifiedAbsentIdentity objectRefere
|
||||
}
|
||||
|
||||
func (gb *GraphBuilder) GetGraphResources() (
|
||||
attemptToDelete workqueue.RateLimitingInterface,
|
||||
attemptToOrphan workqueue.RateLimitingInterface,
|
||||
attemptToDelete workqueue.TypedRateLimitingInterface[*node],
|
||||
attemptToOrphan workqueue.TypedRateLimitingInterface[*node],
|
||||
absentOwnerCache *ReferenceCache,
|
||||
) {
|
||||
return gb.attemptToDelete, gb.attemptToOrphan, gb.absentOwnerCache
|
||||
|
@ -109,10 +109,10 @@ type Controller struct {
|
||||
podStore corelisters.PodLister
|
||||
|
||||
// Jobs that need to be updated
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// Orphan deleted pods that still have a Job tracking finalizer to be removed
|
||||
orphanQueue workqueue.RateLimitingInterface
|
||||
orphanQueue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
broadcaster record.EventBroadcaster
|
||||
recorder record.EventRecorder
|
||||
@ -159,8 +159,8 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn
|
||||
},
|
||||
expectations: controller.NewControllerExpectations(),
|
||||
finalizerExpectations: newUIDTrackingExpectations(),
|
||||
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.NewItemExponentialFailureRateLimiter(DefaultJobApiBackOff, MaxJobApiBackOff), workqueue.RateLimitingQueueConfig{Name: "job", Clock: clock}),
|
||||
orphanQueue: workqueue.NewRateLimitingQueueWithConfig(workqueue.NewItemExponentialFailureRateLimiter(DefaultJobApiBackOff, MaxJobApiBackOff), workqueue.RateLimitingQueueConfig{Name: "job_orphan_pod", Clock: clock}),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.NewTypedItemExponentialFailureRateLimiter[string](DefaultJobApiBackOff, MaxJobApiBackOff), workqueue.TypedRateLimitingQueueConfig[string]{Name: "job", Clock: clock}),
|
||||
orphanQueue: workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.NewTypedItemExponentialFailureRateLimiter[string](DefaultJobApiBackOff, MaxJobApiBackOff), workqueue.TypedRateLimitingQueueConfig[string]{Name: "job_orphan_pod", Clock: clock}),
|
||||
broadcaster: eventBroadcaster,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "job-controller"}),
|
||||
clock: clock,
|
||||
@ -590,7 +590,7 @@ func (jm *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer jm.queue.Done(key)
|
||||
|
||||
err := jm.syncHandler(ctx, key.(string))
|
||||
err := jm.syncHandler(ctx, key)
|
||||
if err == nil {
|
||||
jm.queue.Forget(key)
|
||||
return true
|
||||
@ -613,7 +613,7 @@ func (jm *Controller) processNextOrphanPod(ctx context.Context) bool {
|
||||
return false
|
||||
}
|
||||
defer jm.orphanQueue.Done(key)
|
||||
err := jm.syncOrphanPod(ctx, key.(string))
|
||||
err := jm.syncOrphanPod(ctx, key)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Error syncing orphan pod: %v", err))
|
||||
jm.orphanQueue.AddRateLimited(key)
|
||||
|
@ -5214,22 +5214,22 @@ func TestAddPod(t *testing.T) {
|
||||
jm.addPod(logger, pod1)
|
||||
verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1)
|
||||
key, done := jm.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod1.Name)
|
||||
}
|
||||
expectedKey, _ := controller.KeyFunc(job1)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
|
||||
jm.addPod(logger, pod2)
|
||||
verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1)
|
||||
key, done = jm.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod2.Name)
|
||||
}
|
||||
expectedKey, _ = controller.KeyFunc(job2)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
@ -5290,11 +5290,11 @@ func TestUpdatePod(t *testing.T) {
|
||||
jm.updatePod(logger, &prev, pod1)
|
||||
verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1)
|
||||
key, done := jm.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod1.Name)
|
||||
}
|
||||
expectedKey, _ := controller.KeyFunc(job1)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
|
||||
@ -5303,11 +5303,11 @@ func TestUpdatePod(t *testing.T) {
|
||||
jm.updatePod(logger, &prev, pod2)
|
||||
verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1)
|
||||
key, done = jm.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod2.Name)
|
||||
}
|
||||
expectedKey, _ = controller.KeyFunc(job2)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
@ -5420,22 +5420,22 @@ func TestDeletePod(t *testing.T) {
|
||||
jm.deletePod(logger, pod1, true)
|
||||
verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1)
|
||||
key, done := jm.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod1.Name)
|
||||
}
|
||||
expectedKey, _ := controller.KeyFunc(job1)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
|
||||
jm.deletePod(logger, pod2, true)
|
||||
verifyEmptyQueueAndAwaitForQueueLen(ctx, t, jm, 1)
|
||||
key, done = jm.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Fatalf("failed to enqueue controller for pod %v", pod2.Name)
|
||||
}
|
||||
expectedKey, _ = controller.KeyFunc(job2)
|
||||
if got, want := key.(string), expectedKey; got != want {
|
||||
if got, want := key, expectedKey; got != want {
|
||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
@ -5725,23 +5725,23 @@ func TestJobApiBackoffReset(t *testing.T) {
|
||||
verifyEmptyQueue(ctx, t, manager)
|
||||
}
|
||||
|
||||
var _ workqueue.RateLimitingInterface = &fakeRateLimitingQueue{}
|
||||
var _ workqueue.TypedRateLimitingInterface[string] = &fakeRateLimitingQueue{}
|
||||
|
||||
type fakeRateLimitingQueue struct {
|
||||
workqueue.Interface
|
||||
workqueue.TypedInterface[string]
|
||||
requeues int
|
||||
item interface{}
|
||||
item string
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
func (f *fakeRateLimitingQueue) AddRateLimited(item interface{}) {}
|
||||
func (f *fakeRateLimitingQueue) Forget(item interface{}) {
|
||||
func (f *fakeRateLimitingQueue) AddRateLimited(item string) {}
|
||||
func (f *fakeRateLimitingQueue) Forget(item string) {
|
||||
f.requeues = 0
|
||||
}
|
||||
func (f *fakeRateLimitingQueue) NumRequeues(item interface{}) int {
|
||||
func (f *fakeRateLimitingQueue) NumRequeues(item string) int {
|
||||
return f.requeues
|
||||
}
|
||||
func (f *fakeRateLimitingQueue) AddAfter(item interface{}, duration time.Duration) {
|
||||
func (f *fakeRateLimitingQueue) AddAfter(item string, duration time.Duration) {
|
||||
f.item = item
|
||||
f.duration = duration
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ type NamespaceController struct {
|
||||
// returns true when the namespace cache is ready
|
||||
listerSynced cache.InformerSynced
|
||||
// namespaces that have been queued up for processing by workers
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
// helper to delete all resources in the namespace when the namespace is deleted.
|
||||
namespacedResourcesDeleter deletion.NamespacedResourcesDeleterInterface
|
||||
}
|
||||
@ -74,7 +74,12 @@ func NewNamespaceController(
|
||||
|
||||
// create the controller so we can inject the enqueue function
|
||||
namespaceController := &NamespaceController{
|
||||
queue: workqueue.NewNamedRateLimitingQueue(nsControllerRateLimiter(), "namespace"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
nsControllerRateLimiter(),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "namespace",
|
||||
},
|
||||
),
|
||||
namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(ctx, kubeClient.CoreV1().Namespaces(), metadataClient, kubeClient.CoreV1(), discoverResourcesFn, finalizerToken),
|
||||
}
|
||||
|
||||
@ -101,12 +106,12 @@ func NewNamespaceController(
|
||||
// nsControllerRateLimiter is tuned for a faster than normal recycle time with default backoff speed and default overall
|
||||
// requeing speed. We do this so that namespace cleanup is reliably faster and we know that the number of namespaces being
|
||||
// deleted is smaller than total number of other namespace scoped resources in a cluster.
|
||||
func nsControllerRateLimiter() workqueue.RateLimiter {
|
||||
return workqueue.NewMaxOfRateLimiter(
|
||||
func nsControllerRateLimiter() workqueue.TypedRateLimiter[string] {
|
||||
return workqueue.NewTypedMaxOfRateLimiter(
|
||||
// this ensures that we retry namespace deletion at least every minute, never longer.
|
||||
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 60*time.Second),
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 60*time.Second),
|
||||
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
|
||||
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
|
||||
&workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
|
||||
)
|
||||
}
|
||||
|
||||
@ -142,7 +147,7 @@ func (nm *NamespaceController) worker(ctx context.Context) {
|
||||
}
|
||||
defer nm.queue.Done(key)
|
||||
|
||||
err := nm.syncNamespaceFromKey(ctx, key.(string))
|
||||
err := nm.syncNamespaceFromKey(ctx, key)
|
||||
if err == nil {
|
||||
// no error, forget this entry and return
|
||||
nm.queue.Forget(key)
|
||||
|
@ -297,8 +297,8 @@ type Controller struct {
|
||||
largeClusterThreshold int32
|
||||
unhealthyZoneThreshold float32
|
||||
|
||||
nodeUpdateQueue workqueue.Interface
|
||||
podUpdateQueue workqueue.RateLimitingInterface
|
||||
nodeUpdateQueue workqueue.TypedInterface[string]
|
||||
podUpdateQueue workqueue.TypedRateLimitingInterface[podUpdateItem]
|
||||
}
|
||||
|
||||
// NewNodeLifecycleController returns a new taint controller.
|
||||
@ -344,8 +344,13 @@ func NewNodeLifecycleController(
|
||||
secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS,
|
||||
largeClusterThreshold: largeClusterThreshold,
|
||||
unhealthyZoneThreshold: unhealthyZoneThreshold,
|
||||
nodeUpdateQueue: workqueue.NewTypedWithConfig[any](workqueue.TypedQueueConfig[any]{Name: "node_lifecycle_controller"}),
|
||||
podUpdateQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "node_lifecycle_controller_pods"),
|
||||
nodeUpdateQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "node_lifecycle_controller"}),
|
||||
podUpdateQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[podUpdateItem](),
|
||||
workqueue.TypedRateLimitingQueueConfig[podUpdateItem]{
|
||||
Name: "node_lifecycle_controller_pods",
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc
|
||||
@ -515,7 +520,7 @@ func (nc *Controller) doNodeProcessingPassWorker(ctx context.Context) {
|
||||
if shutdown {
|
||||
return
|
||||
}
|
||||
nodeName := obj.(string)
|
||||
nodeName := obj
|
||||
if err := nc.doNoScheduleTaintingPass(ctx, nodeName); err != nil {
|
||||
logger.Error(err, "Failed to taint NoSchedule on node, requeue it", "node", klog.KRef("", nodeName))
|
||||
// TODO(k82cn): Add nodeName back to the queue
|
||||
@ -1096,7 +1101,7 @@ func (nc *Controller) doPodProcessingWorker(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
podItem := obj.(podUpdateItem)
|
||||
podItem := obj
|
||||
nc.processPod(ctx, podItem)
|
||||
}
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ type HorizontalController struct {
|
||||
podListerSynced cache.InformerSynced
|
||||
|
||||
// Controllers that need to be synced
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// Latest unstabilized recommendations for each autoscaler.
|
||||
recommendations map[string][]timestampedRecommendation
|
||||
@ -148,15 +148,20 @@ func NewHorizontalController(
|
||||
hpaNamespacer: hpaNamespacer,
|
||||
downscaleStabilisationWindow: downscaleStabilisationWindow,
|
||||
monitor: monitor.New(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"),
|
||||
mapper: mapper,
|
||||
recommendations: map[string][]timestampedRecommendation{},
|
||||
recommendationsLock: sync.Mutex{},
|
||||
scaleUpEvents: map[string][]timestampedScaleEvent{},
|
||||
scaleUpEventsLock: sync.RWMutex{},
|
||||
scaleDownEvents: map[string][]timestampedScaleEvent{},
|
||||
scaleDownEventsLock: sync.RWMutex{},
|
||||
hpaSelectors: selectors.NewBiMultimap(),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
NewDefaultHPARateLimiter(resyncPeriod),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{
|
||||
Name: "horizontalpodautoscaler",
|
||||
},
|
||||
),
|
||||
mapper: mapper,
|
||||
recommendations: map[string][]timestampedRecommendation{},
|
||||
recommendationsLock: sync.Mutex{},
|
||||
scaleUpEvents: map[string][]timestampedScaleEvent{},
|
||||
scaleUpEventsLock: sync.RWMutex{},
|
||||
scaleDownEvents: map[string][]timestampedScaleEvent{},
|
||||
scaleDownEventsLock: sync.RWMutex{},
|
||||
hpaSelectors: selectors.NewBiMultimap(),
|
||||
}
|
||||
|
||||
hpaInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
@ -265,7 +270,7 @@ func (a *HorizontalController) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer a.queue.Done(key)
|
||||
|
||||
deleted, err := a.reconcileKey(ctx, key.(string))
|
||||
deleted, err := a.reconcileKey(ctx, key)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
|
@ -27,31 +27,31 @@ type FixedItemIntervalRateLimiter struct {
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
var _ workqueue.RateLimiter = &FixedItemIntervalRateLimiter{}
|
||||
var _ workqueue.TypedRateLimiter[string] = &FixedItemIntervalRateLimiter{}
|
||||
|
||||
// NewFixedItemIntervalRateLimiter creates a new instance of a RateLimiter using a fixed interval
|
||||
func NewFixedItemIntervalRateLimiter(interval time.Duration) workqueue.RateLimiter {
|
||||
func NewFixedItemIntervalRateLimiter(interval time.Duration) workqueue.TypedRateLimiter[string] {
|
||||
return &FixedItemIntervalRateLimiter{
|
||||
interval: interval,
|
||||
}
|
||||
}
|
||||
|
||||
// When returns the interval of the rate limiter
|
||||
func (r *FixedItemIntervalRateLimiter) When(item interface{}) time.Duration {
|
||||
func (r *FixedItemIntervalRateLimiter) When(item string) time.Duration {
|
||||
return r.interval
|
||||
}
|
||||
|
||||
// NumRequeues returns back how many failures the item has had
|
||||
func (r *FixedItemIntervalRateLimiter) NumRequeues(item interface{}) int {
|
||||
func (r *FixedItemIntervalRateLimiter) NumRequeues(item string) int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Forget indicates that an item is finished being retried.
|
||||
func (r *FixedItemIntervalRateLimiter) Forget(item interface{}) {
|
||||
func (r *FixedItemIntervalRateLimiter) Forget(item string) {
|
||||
}
|
||||
|
||||
// NewDefaultHPARateLimiter creates a rate limiter which limits overall (as per the
|
||||
// default controller rate limiter), as well as per the resync interval
|
||||
func NewDefaultHPARateLimiter(interval time.Duration) workqueue.RateLimiter {
|
||||
func NewDefaultHPARateLimiter(interval time.Duration) workqueue.TypedRateLimiter[string] {
|
||||
return NewFixedItemIntervalRateLimiter(interval)
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ type PodGCController struct {
|
||||
nodeLister corelisters.NodeLister
|
||||
nodeListerSynced cache.InformerSynced
|
||||
|
||||
nodeQueue workqueue.DelayingInterface
|
||||
nodeQueue workqueue.TypedDelayingInterface[string]
|
||||
|
||||
terminatedPodThreshold int
|
||||
gcCheckPeriod time.Duration
|
||||
@ -83,7 +83,7 @@ func NewPodGCInternal(ctx context.Context, kubeClient clientset.Interface, podIn
|
||||
podListerSynced: podInformer.Informer().HasSynced,
|
||||
nodeLister: nodeInformer.Lister(),
|
||||
nodeListerSynced: nodeInformer.Informer().HasSynced,
|
||||
nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"),
|
||||
nodeQueue: workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{Name: "orphaned_pods_nodes"}),
|
||||
gcCheckPeriod: gcCheckPeriod,
|
||||
quarantineTime: quarantineTime,
|
||||
}
|
||||
@ -270,7 +270,7 @@ func (gcc *PodGCController) discoverDeletedNodes(ctx context.Context, existingNo
|
||||
if quit {
|
||||
return nil, true
|
||||
}
|
||||
nodeName := item.(string)
|
||||
nodeName := item
|
||||
if !existingNodeNames.Has(nodeName) {
|
||||
exists, err := gcc.checkIfNodeExists(ctx, nodeName)
|
||||
switch {
|
||||
|
@ -198,7 +198,7 @@ func makePod(name string, nodeName string, phase v1.PodPhase) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func waitForAdded(q workqueue.DelayingInterface, depth int) error {
|
||||
func waitForAdded(q workqueue.TypedDelayingInterface[string], depth int) error {
|
||||
return wait.Poll(1*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
if q.Len() == depth {
|
||||
return true, nil
|
||||
@ -380,7 +380,7 @@ func TestGCOrphaned(t *testing.T) {
|
||||
// Overwrite queue
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
gcc.nodeQueue.ShutDown()
|
||||
gcc.nodeQueue = workqueue.NewDelayingQueueWithCustomClock(fakeClock, "podgc_test_queue")
|
||||
gcc.nodeQueue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[string]{Clock: fakeClock, Name: "podgc_test_queue"})
|
||||
|
||||
// First GC of orphaned pods
|
||||
gcc.gc(ctx)
|
||||
|
@ -111,7 +111,7 @@ type ReplicaSetController struct {
|
||||
podListerSynced cache.InformerSynced
|
||||
|
||||
// Controllers that need to be synced
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewReplicaSetController configures a replica set controller with the specified event recorder
|
||||
@ -145,7 +145,10 @@ func NewBaseController(logger klog.Logger, rsInformer appsinformers.ReplicaSetIn
|
||||
eventBroadcaster: eventBroadcaster,
|
||||
burstReplicas: burstReplicas,
|
||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), queueName),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: queueName},
|
||||
),
|
||||
}
|
||||
|
||||
rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -548,7 +551,7 @@ func (rsc *ReplicaSetController) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer rsc.queue.Done(key)
|
||||
|
||||
err := rsc.syncHandler(ctx, key.(string))
|
||||
err := rsc.syncHandler(ctx, key)
|
||||
if err == nil {
|
||||
rsc.queue.Forget(key)
|
||||
return true
|
||||
|
@ -875,7 +875,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
// Enqueue once. Then process it. Disable rate-limiting for this.
|
||||
manager.queue = workqueue.NewRateLimitingQueue(workqueue.NewMaxOfRateLimiter())
|
||||
manager.queue = workqueue.NewTypedRateLimitingQueue(workqueue.NewTypedMaxOfRateLimiter[string]())
|
||||
manager.enqueueRS(rs)
|
||||
manager.processNextWorkItem(ctx)
|
||||
// It should have been requeued.
|
||||
|
@ -109,7 +109,7 @@ type Controller struct {
|
||||
// recorder is used to record events in the API server
|
||||
recorder record.EventRecorder
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// The deletedObjects cache keeps track of Pods for which we know that
|
||||
// they have existed and have been removed. For those we can be sure
|
||||
@ -142,8 +142,11 @@ func NewController(
|
||||
claimsSynced: claimInformer.Informer().HasSynced,
|
||||
templateLister: templateInformer.Lister(),
|
||||
templatesSynced: templateInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_claim"),
|
||||
deletedObjects: newUIDCache(maxUIDCacheEntries),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "resource_claim"},
|
||||
),
|
||||
deletedObjects: newUIDCache(maxUIDCacheEntries),
|
||||
}
|
||||
|
||||
metrics.RegisterMetrics()
|
||||
@ -424,7 +427,7 @@ func (ec *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer ec.queue.Done(key)
|
||||
|
||||
err := ec.syncHandler(ctx, key.(string))
|
||||
err := ec.syncHandler(ctx, key)
|
||||
if err == nil {
|
||||
ec.queue.Forget(key)
|
||||
return true
|
||||
|
@ -85,9 +85,9 @@ type Controller struct {
|
||||
// A list of functions that return true when their caches have synced
|
||||
informerSyncedFuncs []cache.InformerSynced
|
||||
// ResourceQuota objects that need to be synchronized
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
// missingUsageQueue holds objects that are missing the initial usage information
|
||||
missingUsageQueue workqueue.RateLimitingInterface
|
||||
missingUsageQueue workqueue.TypedRateLimitingInterface[string]
|
||||
// To allow injection of syncUsage for testing.
|
||||
syncHandler func(ctx context.Context, key string) error
|
||||
// function that controls full recalculation of quota usage
|
||||
@ -109,10 +109,16 @@ func NewController(ctx context.Context, options *ControllerOptions) (*Controller
|
||||
rqClient: options.QuotaClient,
|
||||
rqLister: options.ResourceQuotaInformer.Lister(),
|
||||
informerSyncedFuncs: []cache.InformerSynced{options.ResourceQuotaInformer.Informer().HasSynced},
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_primary"),
|
||||
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"),
|
||||
resyncPeriod: options.ResyncPeriod,
|
||||
registry: options.Registry,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "resourcequota_primary"},
|
||||
),
|
||||
missingUsageQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "resourcequota_priority"},
|
||||
),
|
||||
resyncPeriod: options.ResyncPeriod,
|
||||
registry: options.Registry,
|
||||
}
|
||||
// set the synchronization handler
|
||||
rq.syncHandler = rq.syncResourceQuotaFromKey
|
||||
@ -246,7 +252,7 @@ func (rq *Controller) addQuota(logger klog.Logger, obj interface{}) {
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func(context.Context) {
|
||||
func (rq *Controller) worker(queue workqueue.TypedRateLimitingInterface[string]) func(context.Context) {
|
||||
workFunc := func(ctx context.Context) bool {
|
||||
key, quit := queue.Get()
|
||||
if quit {
|
||||
@ -261,7 +267,7 @@ func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func(context
|
||||
logger = klog.LoggerWithValues(logger, "queueKey", key)
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
err := rq.syncHandler(ctx, key.(string))
|
||||
err := rq.syncHandler(ctx, key)
|
||||
if err == nil {
|
||||
queue.Forget(key)
|
||||
return false
|
||||
|
@ -83,7 +83,7 @@ type QuotaMonitor struct {
|
||||
running bool
|
||||
|
||||
// monitors are the producer of the resourceChanges queue
|
||||
resourceChanges workqueue.RateLimitingInterface
|
||||
resourceChanges workqueue.TypedRateLimitingInterface[*event]
|
||||
|
||||
// interfaces with informers
|
||||
informerFactory informerfactory.InformerFactory
|
||||
@ -106,10 +106,13 @@ type QuotaMonitor struct {
|
||||
// NewMonitor creates a new instance of a QuotaMonitor
|
||||
func NewMonitor(informersStarted <-chan struct{}, informerFactory informerfactory.InformerFactory, ignoredResources map[schema.GroupResource]struct{}, resyncPeriod controller.ResyncPeriodFunc, replenishmentFunc ReplenishmentFunc, registry quota.Registry, updateFilter UpdateFilter) *QuotaMonitor {
|
||||
return &QuotaMonitor{
|
||||
informersStarted: informersStarted,
|
||||
informerFactory: informerFactory,
|
||||
ignoredResources: ignoredResources,
|
||||
resourceChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_quota_controller_resource_changes"),
|
||||
informersStarted: informersStarted,
|
||||
informerFactory: informerFactory,
|
||||
ignoredResources: ignoredResources,
|
||||
resourceChanges: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[*event](),
|
||||
workqueue.TypedRateLimitingQueueConfig[*event]{Name: "resource_quota_controller_resource_changes"},
|
||||
),
|
||||
resyncPeriod: resyncPeriod,
|
||||
replenishmentFunc: replenishmentFunc,
|
||||
registry: registry,
|
||||
@ -351,11 +354,7 @@ func (qm *QuotaMonitor) processResourceChanges(ctx context.Context) bool {
|
||||
return false
|
||||
}
|
||||
defer qm.resourceChanges.Done(item)
|
||||
event, ok := item.(*event)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", item))
|
||||
return true
|
||||
}
|
||||
event := item
|
||||
obj := event.obj
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
|
@ -65,7 +65,10 @@ func NewServiceAccountsController(saInformer coreinformers.ServiceAccountInforme
|
||||
e := &ServiceAccountsController{
|
||||
client: cl,
|
||||
serviceAccountsToEnsure: options.ServiceAccounts,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "serviceaccount"},
|
||||
),
|
||||
}
|
||||
|
||||
saHandler, _ := saInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
|
||||
@ -100,7 +103,7 @@ type ServiceAccountsController struct {
|
||||
nsLister corelisters.NamespaceLister
|
||||
nsListerSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// Run runs the ServiceAccountsController blocks until receiving signal from stopCh.
|
||||
@ -165,7 +168,7 @@ func (c *ServiceAccountsController) processNextWorkItem(ctx context.Context) boo
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.syncHandler(ctx, key.(string))
|
||||
err := c.syncHandler(ctx, key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
|
@ -80,8 +80,14 @@ func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secre
|
||||
token: options.TokenGenerator,
|
||||
rootCA: options.RootCA,
|
||||
|
||||
syncServiceAccountQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount_tokens_service"),
|
||||
syncSecretQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount_tokens_secret"),
|
||||
syncServiceAccountQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[serviceAccountQueueKey](),
|
||||
workqueue.TypedRateLimitingQueueConfig[serviceAccountQueueKey]{Name: "serviceaccount_tokens_service"},
|
||||
),
|
||||
syncSecretQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[secretQueueKey](),
|
||||
workqueue.TypedRateLimitingQueueConfig[secretQueueKey]{Name: "serviceaccount_tokens_service"},
|
||||
),
|
||||
|
||||
maxRetries: maxRetries,
|
||||
}
|
||||
@ -143,14 +149,14 @@ type TokensController struct {
|
||||
// syncServiceAccountQueue handles service account events:
|
||||
// * ensures tokens are removed for service accounts which no longer exist
|
||||
// key is "<namespace>/<name>/<uid>"
|
||||
syncServiceAccountQueue workqueue.RateLimitingInterface
|
||||
syncServiceAccountQueue workqueue.TypedRateLimitingInterface[serviceAccountQueueKey]
|
||||
|
||||
// syncSecretQueue handles secret events:
|
||||
// * deletes tokens whose service account no longer exists
|
||||
// * updates tokens with missing token or namespace data, or mismatched ca data
|
||||
// * ensures service account secret references are removed for tokens which are deleted
|
||||
// key is a secretQueueKey{}
|
||||
syncSecretQueue workqueue.RateLimitingInterface
|
||||
syncSecretQueue workqueue.TypedRateLimitingInterface[secretQueueKey]
|
||||
|
||||
maxRetries int
|
||||
}
|
||||
@ -189,14 +195,14 @@ func (e *TokensController) queueServiceAccountUpdateSync(oldObj interface{}, new
|
||||
}
|
||||
|
||||
// complete optionally requeues key, then calls queue.Done(key)
|
||||
func (e *TokensController) retryOrForget(logger klog.Logger, queue workqueue.RateLimitingInterface, key interface{}, requeue bool) {
|
||||
func retryOrForget[T comparable](logger klog.Logger, queue workqueue.TypedRateLimitingInterface[T], key T, requeue bool, maxRetries int) {
|
||||
if !requeue {
|
||||
queue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
requeueCount := queue.NumRequeues(key)
|
||||
if requeueCount < e.maxRetries {
|
||||
if requeueCount < maxRetries {
|
||||
queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
@ -227,7 +233,7 @@ func (e *TokensController) syncServiceAccount(ctx context.Context) {
|
||||
|
||||
retry := false
|
||||
defer func() {
|
||||
e.retryOrForget(logger, e.syncServiceAccountQueue, key, retry)
|
||||
retryOrForget(logger, e.syncServiceAccountQueue, key, retry, e.maxRetries)
|
||||
}()
|
||||
|
||||
saInfo, err := parseServiceAccountKey(key)
|
||||
@ -263,7 +269,7 @@ func (e *TokensController) syncSecret(ctx context.Context) {
|
||||
// Track whether or not we should retry this sync
|
||||
retry := false
|
||||
defer func() {
|
||||
e.retryOrForget(logger, e.syncSecretQueue, key, retry)
|
||||
retryOrForget(logger, e.syncSecretQueue, key, retry, e.maxRetries)
|
||||
}()
|
||||
|
||||
secretInfo, err := parseSecretQueueKey(key)
|
||||
@ -571,7 +577,7 @@ type serviceAccountQueueKey struct {
|
||||
uid types.UID
|
||||
}
|
||||
|
||||
func makeServiceAccountKey(sa *v1.ServiceAccount) interface{} {
|
||||
func makeServiceAccountKey(sa *v1.ServiceAccount) serviceAccountQueueKey {
|
||||
return serviceAccountQueueKey{
|
||||
namespace: sa.Namespace,
|
||||
name: sa.Name,
|
||||
@ -599,7 +605,7 @@ type secretQueueKey struct {
|
||||
saUID types.UID
|
||||
}
|
||||
|
||||
func makeSecretQueueKey(secret *v1.Secret) interface{} {
|
||||
func makeSecretQueueKey(secret *v1.Secret) secretQueueKey {
|
||||
return secretQueueKey{
|
||||
namespace: secret.Namespace,
|
||||
name: secret.Name,
|
||||
|
@ -75,8 +75,11 @@ func NewController(
|
||||
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
|
||||
c := &Controller{
|
||||
client: client,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ipaddresses"),
|
||||
client: client,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "ipaddresses"},
|
||||
),
|
||||
tree: iptree.New[sets.Set[string]](),
|
||||
workerLoopPeriod: time.Second,
|
||||
}
|
||||
@ -115,7 +118,7 @@ type Controller struct {
|
||||
ipAddressLister networkinglisters.IPAddressLister
|
||||
ipAddressSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// workerLoopPeriod is the time between worker runs. The workers process the queue of service and ipRange changes.
|
||||
workerLoopPeriod time.Duration
|
||||
@ -264,13 +267,12 @@ func (c *Controller) worker(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (c *Controller) processNext(ctx context.Context) bool {
|
||||
eKey, quit := c.queue.Get()
|
||||
key, quit := c.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.queue.Done(eKey)
|
||||
defer c.queue.Done(key)
|
||||
|
||||
key := eKey.(string)
|
||||
err := c.sync(ctx, key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
|
@ -71,7 +71,7 @@ type StatefulSetController struct {
|
||||
// revListerSynced returns true if the rev shared informer has synced at least once
|
||||
revListerSynced cache.InformerSynced
|
||||
// StatefulSets that need to be synced.
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
// eventBroadcaster is the core of event processing pipeline.
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
}
|
||||
@ -101,8 +101,11 @@ func NewStatefulSetController(
|
||||
),
|
||||
pvcListerSynced: pvcInformer.Informer().HasSynced,
|
||||
revListerSynced: revInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"),
|
||||
podControl: controller.RealPodControl{KubeClient: kubeClient, Recorder: recorder},
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "statefulset"},
|
||||
),
|
||||
podControl: controller.RealPodControl{KubeClient: kubeClient, Recorder: recorder},
|
||||
|
||||
eventBroadcaster: eventBroadcaster,
|
||||
}
|
||||
@ -428,8 +431,8 @@ func (ssc *StatefulSetController) processNextWorkItem(ctx context.Context) bool
|
||||
return false
|
||||
}
|
||||
defer ssc.queue.Done(key)
|
||||
if err := ssc.sync(ctx, key.(string)); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("error syncing StatefulSet %v, requeuing: %v", key.(string), err))
|
||||
if err := ssc.sync(ctx, key); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("error syncing StatefulSet %v, requeuing: %w", key, err))
|
||||
ssc.queue.AddRateLimited(key)
|
||||
} else {
|
||||
ssc.queue.Forget(key)
|
||||
|
@ -297,10 +297,8 @@ func TestStatefulSetControllerAddPod(t *testing.T) {
|
||||
|
||||
ssc.addPod(logger, pod1)
|
||||
key, done := ssc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Error("failed to enqueue StatefulSet")
|
||||
} else if key, ok := key.(string); !ok {
|
||||
t.Error("key is not a string")
|
||||
} else if expectedKey, _ := controller.KeyFunc(set1); expectedKey != key {
|
||||
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
|
||||
}
|
||||
@ -308,10 +306,8 @@ func TestStatefulSetControllerAddPod(t *testing.T) {
|
||||
|
||||
ssc.addPod(logger, pod2)
|
||||
key, done = ssc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Error("failed to enqueue StatefulSet")
|
||||
} else if key, ok := key.(string); !ok {
|
||||
t.Error("key is not a string")
|
||||
} else if expectedKey, _ := controller.KeyFunc(set2); expectedKey != key {
|
||||
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
|
||||
}
|
||||
@ -348,7 +344,7 @@ func TestStatefulSetControllerAddPodNoSet(t *testing.T) {
|
||||
ssc.addPod(logger, pod)
|
||||
ssc.queue.ShutDown()
|
||||
key, _ := ssc.queue.Get()
|
||||
if key != nil {
|
||||
if key != "" {
|
||||
t.Errorf("StatefulSet enqueued key for Pod with no Set %s", key)
|
||||
}
|
||||
}
|
||||
@ -368,10 +364,8 @@ func TestStatefulSetControllerUpdatePod(t *testing.T) {
|
||||
fakeResourceVersion(pod1)
|
||||
ssc.updatePod(logger, &prev, pod1)
|
||||
key, done := ssc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Error("failed to enqueue StatefulSet")
|
||||
} else if key, ok := key.(string); !ok {
|
||||
t.Error("key is not a string")
|
||||
} else if expectedKey, _ := controller.KeyFunc(set1); expectedKey != key {
|
||||
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
|
||||
}
|
||||
@ -380,10 +374,8 @@ func TestStatefulSetControllerUpdatePod(t *testing.T) {
|
||||
fakeResourceVersion(pod2)
|
||||
ssc.updatePod(logger, &prev, pod2)
|
||||
key, done = ssc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Error("failed to enqueue StatefulSet")
|
||||
} else if key, ok := key.(string); !ok {
|
||||
t.Error("key is not a string")
|
||||
} else if expectedKey, _ := controller.KeyFunc(set2); expectedKey != key {
|
||||
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
|
||||
}
|
||||
@ -399,7 +391,7 @@ func TestStatefulSetControllerUpdatePodWithNoSet(t *testing.T) {
|
||||
ssc.updatePod(logger, &prev, pod)
|
||||
ssc.queue.ShutDown()
|
||||
key, _ := ssc.queue.Get()
|
||||
if key != nil {
|
||||
if key != "" {
|
||||
t.Errorf("StatefulSet enqueued key for Pod with no Set %s", key)
|
||||
}
|
||||
}
|
||||
@ -413,7 +405,7 @@ func TestStatefulSetControllerUpdatePodWithSameVersion(t *testing.T) {
|
||||
ssc.updatePod(logger, pod, pod)
|
||||
ssc.queue.ShutDown()
|
||||
key, _ := ssc.queue.Get()
|
||||
if key != nil {
|
||||
if key != "" {
|
||||
t.Errorf("StatefulSet enqueued key for Pod with no Set %s", key)
|
||||
}
|
||||
}
|
||||
@ -487,20 +479,16 @@ func TestStatefulSetControllerDeletePod(t *testing.T) {
|
||||
|
||||
ssc.deletePod(logger, pod1)
|
||||
key, done := ssc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Error("failed to enqueue StatefulSet")
|
||||
} else if key, ok := key.(string); !ok {
|
||||
t.Error("key is not a string")
|
||||
} else if expectedKey, _ := controller.KeyFunc(set1); expectedKey != key {
|
||||
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
|
||||
}
|
||||
|
||||
ssc.deletePod(logger, pod2)
|
||||
key, done = ssc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Error("failed to enqueue StatefulSet")
|
||||
} else if key, ok := key.(string); !ok {
|
||||
t.Error("key is not a string")
|
||||
} else if expectedKey, _ := controller.KeyFunc(set2); expectedKey != key {
|
||||
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
|
||||
}
|
||||
@ -533,10 +521,8 @@ func TestStatefulSetControllerDeletePodTombstone(t *testing.T) {
|
||||
tombstone := cache.DeletedFinalStateUnknown{Key: tombstoneKey, Obj: pod}
|
||||
ssc.deletePod(logger, tombstone)
|
||||
key, done := ssc.queue.Get()
|
||||
if key == nil || done {
|
||||
if key == "" || done {
|
||||
t.Error("failed to enqueue StatefulSet")
|
||||
} else if key, ok := key.(string); !ok {
|
||||
t.Error("key is not a string")
|
||||
} else if expectedKey, _ := controller.KeyFunc(set); expectedKey != key {
|
||||
t.Errorf("expected StatefulSet key %s found %s", expectedKey, key)
|
||||
}
|
||||
@ -952,7 +938,7 @@ func newFakeStatefulSetController(ctx context.Context, initialObjects ...runtime
|
||||
|
||||
func fakeWorker(ssc *StatefulSetController) {
|
||||
if obj, done := ssc.queue.Get(); !done {
|
||||
ssc.sync(context.TODO(), obj.(string))
|
||||
_ = ssc.sync(context.TODO(), obj)
|
||||
ssc.queue.Done(obj)
|
||||
}
|
||||
}
|
||||
|
@ -50,8 +50,8 @@ type Controller struct {
|
||||
|
||||
storageVersionSynced cache.InformerSynced
|
||||
|
||||
leaseQueue workqueue.RateLimitingInterface
|
||||
storageVersionQueue workqueue.RateLimitingInterface
|
||||
leaseQueue workqueue.TypedRateLimitingInterface[string]
|
||||
storageVersionQueue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewStorageVersionGC creates a new Controller.
|
||||
@ -61,8 +61,14 @@ func NewStorageVersionGC(ctx context.Context, clientset kubernetes.Interface, le
|
||||
leaseLister: leaseInformer.Lister(),
|
||||
leasesSynced: leaseInformer.Informer().HasSynced,
|
||||
storageVersionSynced: storageVersionInformer.Informer().HasSynced,
|
||||
leaseQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "storage_version_garbage_collector_leases"),
|
||||
storageVersionQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "storage_version_garbage_collector_storageversions"),
|
||||
leaseQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "storage_version_garbage_collector_leases"},
|
||||
),
|
||||
storageVersionQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "storage_version_garbage_collector_storageversions"},
|
||||
),
|
||||
}
|
||||
logger := klog.FromContext(ctx)
|
||||
leaseInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -121,7 +127,7 @@ func (c *Controller) processNextLease(ctx context.Context) bool {
|
||||
}
|
||||
defer c.leaseQueue.Done(key)
|
||||
|
||||
err := c.processDeletedLease(ctx, key.(string))
|
||||
err := c.processDeletedLease(ctx, key)
|
||||
if err == nil {
|
||||
c.leaseQueue.Forget(key)
|
||||
return true
|
||||
@ -144,7 +150,7 @@ func (c *Controller) processNextStorageVersion(ctx context.Context) bool {
|
||||
}
|
||||
defer c.storageVersionQueue.Done(key)
|
||||
|
||||
err := c.syncStorageVersion(ctx, key.(string))
|
||||
err := c.syncStorageVersion(ctx, key)
|
||||
if err == nil {
|
||||
c.storageVersionQueue.Forget(key)
|
||||
return true
|
||||
|
@ -54,7 +54,7 @@ type ResourceVersionController struct {
|
||||
metadataClient metadata.Interface
|
||||
svmListers svmlisters.StorageVersionMigrationLister
|
||||
svmSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
kubeClient clientset.Interface
|
||||
mapper meta.ResettableRESTMapper
|
||||
}
|
||||
@ -76,7 +76,10 @@ func NewResourceVersionController(
|
||||
svmListers: svmInformer.Lister(),
|
||||
svmSynced: svmInformer.Informer().HasSynced,
|
||||
mapper: mapper,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ResourceVersionControllerName),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: ResourceVersionControllerName},
|
||||
),
|
||||
}
|
||||
|
||||
_, _ = svmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -137,13 +140,12 @@ func (rv *ResourceVersionController) worker(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (rv *ResourceVersionController) processNext(ctx context.Context) bool {
|
||||
eKey, quit := rv.queue.Get()
|
||||
key, quit := rv.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer rv.queue.Done(eKey)
|
||||
defer rv.queue.Done(key)
|
||||
|
||||
key := eKey.(string)
|
||||
err := rv.sync(ctx, key)
|
||||
if err == nil {
|
||||
rv.queue.Forget(key)
|
||||
|
@ -55,7 +55,7 @@ type SVMController struct {
|
||||
dynamicClient *dynamic.DynamicClient
|
||||
svmListers svmlisters.StorageVersionMigrationLister
|
||||
svmSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
restMapper meta.RESTMapper
|
||||
dependencyGraphBuilder *garbagecollector.GraphBuilder
|
||||
}
|
||||
@ -79,7 +79,10 @@ func NewSVMController(
|
||||
svmSynced: svmInformer.Informer().HasSynced,
|
||||
restMapper: mapper,
|
||||
dependencyGraphBuilder: dependencyGraphBuilder,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: controllerName},
|
||||
),
|
||||
}
|
||||
|
||||
_, _ = svmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -146,13 +149,12 @@ func (svmc *SVMController) worker(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (svmc *SVMController) processNext(ctx context.Context) bool {
|
||||
svmKey, quit := svmc.queue.Get()
|
||||
key, quit := svmc.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer svmc.queue.Done(svmKey)
|
||||
defer svmc.queue.Done(key)
|
||||
|
||||
key := svmKey.(string)
|
||||
err := svmc.sync(ctx, key)
|
||||
if err == nil {
|
||||
svmc.queue.Forget(key)
|
||||
|
@ -102,8 +102,8 @@ type Controller struct {
|
||||
nodeUpdateChannels []chan nodeUpdateItem
|
||||
podUpdateChannels []chan podUpdateItem
|
||||
|
||||
nodeUpdateQueue workqueue.Interface
|
||||
podUpdateQueue workqueue.Interface
|
||||
nodeUpdateQueue workqueue.TypedInterface[nodeUpdateItem]
|
||||
podUpdateQueue workqueue.TypedInterface[podUpdateItem]
|
||||
}
|
||||
|
||||
func deletePodHandler(c clientset.Interface, emitEventFunc func(types.NamespacedName), controllerName string) func(ctx context.Context, fireAt time.Time, args *WorkArgs) error {
|
||||
@ -220,8 +220,8 @@ func New(ctx context.Context, c clientset.Interface, podInformer corev1informers
|
||||
},
|
||||
taintedNodes: make(map[string][]v1.Taint),
|
||||
|
||||
nodeUpdateQueue: workqueue.NewWithConfig(workqueue.QueueConfig{Name: "noexec_taint_node"}),
|
||||
podUpdateQueue: workqueue.NewWithConfig(workqueue.QueueConfig{Name: "noexec_taint_pod"}),
|
||||
nodeUpdateQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[nodeUpdateItem]{Name: "noexec_taint_node"}),
|
||||
podUpdateQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[podUpdateItem]{Name: "noexec_taint_pod"}),
|
||||
}
|
||||
tm.taintEvictionQueue = CreateWorkerQueue(deletePodHandler(c, tm.emitPodDeletionEvent, tm.name))
|
||||
|
||||
@ -312,15 +312,14 @@ func (tc *Controller) Run(ctx context.Context) {
|
||||
// into channels.
|
||||
go func(stopCh <-chan struct{}) {
|
||||
for {
|
||||
item, shutdown := tc.nodeUpdateQueue.Get()
|
||||
nodeUpdate, shutdown := tc.nodeUpdateQueue.Get()
|
||||
if shutdown {
|
||||
break
|
||||
}
|
||||
nodeUpdate := item.(nodeUpdateItem)
|
||||
hash := hash(nodeUpdate.nodeName, UpdateWorkerSize)
|
||||
select {
|
||||
case <-stopCh:
|
||||
tc.nodeUpdateQueue.Done(item)
|
||||
tc.nodeUpdateQueue.Done(nodeUpdate)
|
||||
return
|
||||
case tc.nodeUpdateChannels[hash] <- nodeUpdate:
|
||||
// tc.nodeUpdateQueue.Done is called by the nodeUpdateChannels worker
|
||||
@ -330,7 +329,7 @@ func (tc *Controller) Run(ctx context.Context) {
|
||||
|
||||
go func(stopCh <-chan struct{}) {
|
||||
for {
|
||||
item, shutdown := tc.podUpdateQueue.Get()
|
||||
podUpdate, shutdown := tc.podUpdateQueue.Get()
|
||||
if shutdown {
|
||||
break
|
||||
}
|
||||
@ -338,11 +337,10 @@ func (tc *Controller) Run(ctx context.Context) {
|
||||
// between node worker setting tc.taintedNodes and pod worker reading this to decide
|
||||
// whether to delete pod.
|
||||
// It's possible that even without this assumption this code is still correct.
|
||||
podUpdate := item.(podUpdateItem)
|
||||
hash := hash(podUpdate.nodeName, UpdateWorkerSize)
|
||||
select {
|
||||
case <-stopCh:
|
||||
tc.podUpdateQueue.Done(item)
|
||||
tc.podUpdateQueue.Done(podUpdate)
|
||||
return
|
||||
case tc.podUpdateChannels[hash] <- podUpdate:
|
||||
// tc.podUpdateQueue.Done is called by the podUpdateChannels worker
|
||||
|
@ -60,7 +60,7 @@ type Controller struct {
|
||||
nodeStore listers.NodeLister
|
||||
|
||||
// Nodes that need to be synced.
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// Returns true if all underlying informers are synced.
|
||||
hasSynced func() bool
|
||||
@ -81,7 +81,10 @@ type Controller struct {
|
||||
func NewTTLController(ctx context.Context, nodeInformer informers.NodeInformer, kubeClient clientset.Interface) *Controller {
|
||||
ttlc := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ttlcontroller"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "ttlcontroller"},
|
||||
),
|
||||
}
|
||||
logger := klog.FromContext(ctx)
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -216,7 +219,7 @@ func (ttlc *Controller) processItem(ctx context.Context) bool {
|
||||
}
|
||||
defer ttlc.queue.Done(key)
|
||||
|
||||
err := ttlc.updateNodeIfNeeded(ctx, key.(string))
|
||||
err := ttlc.updateNodeIfNeeded(ctx, key)
|
||||
if err == nil {
|
||||
ttlc.queue.Forget(key)
|
||||
return true
|
||||
|
@ -230,7 +230,7 @@ func TestDesiredTTL(t *testing.T) {
|
||||
|
||||
for i, testCase := range testCases {
|
||||
ttlController := &Controller{
|
||||
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
queue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()),
|
||||
nodeCount: testCase.nodeCount,
|
||||
desiredTTLSeconds: testCase.desiredTTL,
|
||||
boundaryStep: testCase.boundaryStep,
|
||||
|
@ -62,7 +62,7 @@ type Controller struct {
|
||||
jListerSynced cache.InformerSynced
|
||||
|
||||
// Jobs that the controller will check its TTL and attempt to delete when the TTL expires.
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// The clock for tracking time
|
||||
clock clock.Clock
|
||||
@ -79,7 +79,10 @@ func New(ctx context.Context, jobInformer batchinformers.JobInformer, client cli
|
||||
tc := &Controller{
|
||||
client: client,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ttl-after-finished-controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ttl_jobs_to_delete"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "ttl_jobs_to_delete"},
|
||||
),
|
||||
}
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
@ -172,13 +175,13 @@ func (tc *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer tc.queue.Done(key)
|
||||
|
||||
err := tc.processJob(ctx, key.(string))
|
||||
err := tc.processJob(ctx, key)
|
||||
tc.handleErr(err, key)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (tc *Controller) handleErr(err error, key interface{}) {
|
||||
func (tc *Controller) handleErr(err error, key string) {
|
||||
if err == nil {
|
||||
tc.queue.Forget(key)
|
||||
return
|
||||
|
@ -41,7 +41,7 @@ const ControllerName = "validatingadmissionpolicy-status"
|
||||
// This controller runs type checks against referred types for each policy definition.
|
||||
type Controller struct {
|
||||
policyInformer informerv1.ValidatingAdmissionPolicyInformer
|
||||
policyQueue workqueue.RateLimitingInterface
|
||||
policyQueue workqueue.TypedRateLimitingInterface[string]
|
||||
policySynced cache.InformerSynced
|
||||
policyClient admissionregistrationv1.ValidatingAdmissionPolicyInterface
|
||||
|
||||
@ -69,9 +69,12 @@ func (c *Controller) Run(ctx context.Context, workers int) {
|
||||
func NewController(policyInformer informerv1.ValidatingAdmissionPolicyInformer, policyClient admissionregistrationv1.ValidatingAdmissionPolicyInterface, typeChecker *validatingadmissionpolicy.TypeChecker) (*Controller, error) {
|
||||
c := &Controller{
|
||||
policyInformer: policyInformer,
|
||||
policyQueue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: ControllerName}),
|
||||
policyClient: policyClient,
|
||||
typeChecker: typeChecker,
|
||||
policyQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: ControllerName},
|
||||
),
|
||||
policyClient: policyClient,
|
||||
typeChecker: typeChecker,
|
||||
}
|
||||
reg, err := policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
@ -112,10 +115,6 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
defer c.policyQueue.Done(key)
|
||||
|
||||
err := func() error {
|
||||
key, ok := key.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("expect a string but got %v", key)
|
||||
}
|
||||
policy, err := c.policyInformer.Lister().Get(key)
|
||||
if err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
|
@ -133,7 +133,10 @@ func NewAttachDetachController(
|
||||
podIndexer: podInformer.Informer().GetIndexer(),
|
||||
nodeLister: nodeInformer.Lister(),
|
||||
nodesSynced: nodeInformer.Informer().HasSynced,
|
||||
pvcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcs"),
|
||||
pvcQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "pvcs"},
|
||||
),
|
||||
}
|
||||
|
||||
adc.csiNodeLister = csiNodeInformer.Lister()
|
||||
@ -313,7 +316,7 @@ type attachDetachController struct {
|
||||
broadcaster record.EventBroadcaster
|
||||
|
||||
// pvcQueue is used to queue pvc objects
|
||||
pvcQueue workqueue.RateLimitingInterface
|
||||
pvcQueue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// csiMigratedPluginManager detects in-tree plugins that have been migrated to CSI
|
||||
csiMigratedPluginManager csimigration.PluginManager
|
||||
@ -600,11 +603,11 @@ func (adc *attachDetachController) processNextItem(logger klog.Logger) bool {
|
||||
}
|
||||
defer adc.pvcQueue.Done(keyObj)
|
||||
|
||||
if err := adc.syncPVCByKey(logger, keyObj.(string)); err != nil {
|
||||
if err := adc.syncPVCByKey(logger, keyObj); err != nil {
|
||||
// Rather than wait for a full resync, re-add the key to the
|
||||
// queue to be processed.
|
||||
adc.pvcQueue.AddRateLimited(keyObj)
|
||||
runtime.HandleError(fmt.Errorf("Failed to sync pvc %q, will retry again: %v", keyObj.(string), err))
|
||||
runtime.HandleError(fmt.Errorf("failed to sync pvc %q, will retry again: %w", keyObj, err))
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ type ephemeralController struct {
|
||||
// recorder is used to record events in the API server
|
||||
recorder record.EventRecorder
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewController creates an ephemeral volume controller.
|
||||
@ -88,7 +88,10 @@ func NewController(
|
||||
podSynced: podInformer.Informer().HasSynced,
|
||||
pvcLister: pvcInformer.Lister(),
|
||||
pvcsSynced: pvcInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ephemeral_volume"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "ephemeral_volume"},
|
||||
),
|
||||
}
|
||||
|
||||
ephemeralvolumemetrics.RegisterMetrics()
|
||||
@ -193,7 +196,7 @@ func (ec *ephemeralController) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer ec.queue.Done(key)
|
||||
|
||||
err := ec.syncHandler(ctx, key.(string))
|
||||
err := ec.syncHandler(ctx, key)
|
||||
if err == nil {
|
||||
ec.queue.Forget(key)
|
||||
return true
|
||||
|
@ -87,7 +87,7 @@ type expandController struct {
|
||||
|
||||
operationGenerator operationexecutor.OperationGenerator
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
translator CSINameTranslator
|
||||
|
||||
@ -104,10 +104,13 @@ func NewExpandController(
|
||||
csiMigratedPluginManager csimigration.PluginManager) (ExpandController, error) {
|
||||
|
||||
expc := &expandController{
|
||||
kubeClient: kubeClient,
|
||||
pvcLister: pvcInformer.Lister(),
|
||||
pvcsSynced: pvcInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "volume_expand"),
|
||||
kubeClient: kubeClient,
|
||||
pvcLister: pvcInformer.Lister(),
|
||||
pvcsSynced: pvcInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "volume_expand"},
|
||||
),
|
||||
translator: translator,
|
||||
csiMigratedPluginManager: csiMigratedPluginManager,
|
||||
}
|
||||
@ -180,7 +183,7 @@ func (expc *expandController) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer expc.queue.Done(key)
|
||||
|
||||
err := expc.syncHandler(ctx, key.(string))
|
||||
err := expc.syncHandler(ctx, key)
|
||||
if err == nil {
|
||||
expc.queue.Forget(key)
|
||||
return true
|
||||
|
@ -188,8 +188,8 @@ type PersistentVolumeController struct {
|
||||
// version errors in API server and other checks in this controller),
|
||||
// however overall speed of multi-worker controller would be lower than if
|
||||
// it runs single thread only.
|
||||
claimQueue *workqueue.Type
|
||||
volumeQueue *workqueue.Type
|
||||
claimQueue *workqueue.Typed[string]
|
||||
volumeQueue *workqueue.Typed[string]
|
||||
|
||||
// Map of scheduled/running operations.
|
||||
runningOperations goroutinemap.GoRoutineMap
|
||||
|
@ -89,8 +89,8 @@ func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolu
|
||||
clusterName: p.ClusterName,
|
||||
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
|
||||
createProvisionedPVInterval: createProvisionedPVInterval,
|
||||
claimQueue: workqueue.NewTypedWithConfig[any](workqueue.TypedQueueConfig[any]{Name: "claims"}),
|
||||
volumeQueue: workqueue.NewTypedWithConfig[any](workqueue.TypedQueueConfig[any]{Name: "volumes"}),
|
||||
claimQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "claims"}),
|
||||
volumeQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "volumes"}),
|
||||
resyncPeriod: p.SyncPeriod,
|
||||
operationTimestamps: metrics.NewOperationStartTimeCache(),
|
||||
}
|
||||
@ -171,7 +171,7 @@ func (ctrl *PersistentVolumeController) initializeCaches(logger klog.Logger, vol
|
||||
}
|
||||
|
||||
// enqueueWork adds volume or claim to given work queue.
|
||||
func (ctrl *PersistentVolumeController) enqueueWork(ctx context.Context, queue workqueue.Interface, obj interface{}) {
|
||||
func (ctrl *PersistentVolumeController) enqueueWork(ctx context.Context, queue workqueue.TypedInterface[string], obj interface{}) {
|
||||
// Beware of "xxx deleted" events
|
||||
logger := klog.FromContext(ctx)
|
||||
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
|
||||
@ -489,12 +489,11 @@ func updateMigrationAnnotations(logger klog.Logger, cmpm CSIMigratedPluginManage
|
||||
func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
|
||||
logger := klog.FromContext(ctx)
|
||||
workFunc := func(ctx context.Context) bool {
|
||||
keyObj, quit := ctrl.volumeQueue.Get()
|
||||
key, quit := ctrl.volumeQueue.Get()
|
||||
if quit {
|
||||
return true
|
||||
}
|
||||
defer ctrl.volumeQueue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
defer ctrl.volumeQueue.Done(key)
|
||||
logger.V(5).Info("volumeWorker", "volumeKey", key)
|
||||
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -548,12 +547,11 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
|
||||
func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
|
||||
logger := klog.FromContext(ctx)
|
||||
workFunc := func() bool {
|
||||
keyObj, quit := ctrl.claimQueue.Get()
|
||||
key, quit := ctrl.claimQueue.Get()
|
||||
if quit {
|
||||
return true
|
||||
}
|
||||
defer ctrl.claimQueue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
defer ctrl.claimQueue.Done(key)
|
||||
logger.V(5).Info("claimWorker", "claimKey", key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
|
@ -51,14 +51,17 @@ type Controller struct {
|
||||
podListerSynced cache.InformerSynced
|
||||
podIndexer cache.Indexer
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewPVCProtectionController returns a new instance of PVCProtectionController.
|
||||
func NewPVCProtectionController(logger klog.Logger, pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "pvcprotection"},
|
||||
),
|
||||
}
|
||||
|
||||
e.pvcLister = pvcInformer.Lister()
|
||||
@ -126,7 +129,7 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer c.queue.Done(pvcKey)
|
||||
|
||||
pvcNamespace, pvcName, err := cache.SplitMetaNamespaceKey(pvcKey.(string))
|
||||
pvcNamespace, pvcName, err := cache.SplitMetaNamespaceKey(pvcKey)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("error parsing PVC key %q: %v", pvcKey, err))
|
||||
return true
|
||||
|
@ -45,14 +45,17 @@ type Controller struct {
|
||||
pvLister corelisters.PersistentVolumeLister
|
||||
pvListerSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewPVProtectionController returns a new *Controller.
|
||||
func NewPVProtectionController(logger klog.Logger, pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "pvprotection"},
|
||||
),
|
||||
}
|
||||
|
||||
e.pvLister = pvInformer.Lister()
|
||||
@ -102,7 +105,7 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
defer c.queue.Done(pvKey)
|
||||
|
||||
pvName := pvKey.(string)
|
||||
pvName := pvKey
|
||||
|
||||
err := c.processPV(ctx, pvName)
|
||||
if err == nil {
|
||||
|
@ -61,7 +61,7 @@ type Controller struct {
|
||||
|
||||
// queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors.
|
||||
// we only ever place one entry in here, but it is keyed as usual: namespace/name
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// kubeSystemConfigMapInformer is tracked so that we can start these on Run
|
||||
kubeSystemConfigMapInformer cache.SharedIndexInformer
|
||||
@ -94,11 +94,14 @@ func NewClusterAuthenticationTrustController(requiredAuthenticationData ClusterA
|
||||
kubeSystemConfigMapInformer := corev1informers.NewConfigMapInformer(kubeClient, configMapNamespace, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
|
||||
c := &Controller{
|
||||
requiredAuthenticationData: requiredAuthenticationData,
|
||||
configMapLister: corev1listers.NewConfigMapLister(kubeSystemConfigMapInformer.GetIndexer()),
|
||||
configMapClient: kubeClient.CoreV1(),
|
||||
namespaceClient: kubeClient.CoreV1(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cluster_authentication_trust_controller"),
|
||||
requiredAuthenticationData: requiredAuthenticationData,
|
||||
configMapLister: corev1listers.NewConfigMapLister(kubeSystemConfigMapInformer.GetIndexer()),
|
||||
configMapClient: kubeClient.CoreV1(),
|
||||
namespaceClient: kubeClient.CoreV1(),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "cluster_authentication_trust_controller"},
|
||||
),
|
||||
preRunCaches: []cache.InformerSynced{kubeSystemConfigMapInformer.HasSynced},
|
||||
kubeSystemConfigMapInformer: kubeSystemConfigMapInformer,
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ type crdRegistrationController struct {
|
||||
|
||||
// queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors
|
||||
// this is actually keyed by a groupVersion
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[schema.GroupVersion]
|
||||
}
|
||||
|
||||
// NewCRDRegistrationController returns a controller which will register CRD GroupVersions with the auto APIService registration
|
||||
@ -67,7 +67,10 @@ func NewCRDRegistrationController(crdinformer crdinformers.CustomResourceDefinit
|
||||
crdSynced: crdinformer.Informer().HasSynced,
|
||||
apiServiceRegistration: apiServiceRegistration,
|
||||
syncedInitialSet: make(chan struct{}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_autoregistration_controller"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[schema.GroupVersion](),
|
||||
workqueue.TypedRateLimitingQueueConfig[schema.GroupVersion]{Name: "crd_autoregistration_controller"},
|
||||
),
|
||||
}
|
||||
c.syncHandler = c.handleVersionUpdate
|
||||
|
||||
@ -164,7 +167,7 @@ func (c *crdRegistrationController) processNextWorkItem() bool {
|
||||
defer c.queue.Done(key)
|
||||
|
||||
// do your work on the key. This method will contains your "do stuff" logic
|
||||
err := c.syncHandler(key.(schema.GroupVersion))
|
||||
err := c.syncHandler(key)
|
||||
if err == nil {
|
||||
// if you had no error, tell the queue to stop tracking history for your key. This will
|
||||
// reset things like failure counts for per-item rate limiting
|
||||
|
@ -58,7 +58,7 @@ type Controller struct {
|
||||
configMapInformer cache.SharedIndexInformer
|
||||
configMapCache cache.Indexer
|
||||
configMapSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// rate limiter controls the rate limit of the creation of the configmap.
|
||||
// this is useful in multi-apiserver cluster to prevent config existing in a
|
||||
@ -80,8 +80,11 @@ func newController(cs kubernetes.Interface, cl clock.Clock, limiter *rate.Limite
|
||||
})
|
||||
|
||||
c := &Controller{
|
||||
configMapClient: cs.CoreV1(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "legacy_token_tracking_controller"),
|
||||
configMapClient: cs.CoreV1(),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "legacy_token_tracking_controller"},
|
||||
),
|
||||
configMapInformer: informer,
|
||||
configMapCache: informer.GetIndexer(),
|
||||
configMapSynced: informer.HasSynced,
|
||||
|
@ -56,7 +56,7 @@ type nodeResourcesController struct {
|
||||
kubeClient kubernetes.Interface
|
||||
getNode func() (*v1.Node, error)
|
||||
wg sync.WaitGroup
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
sliceStore cache.Store
|
||||
|
||||
mutex sync.RWMutex
|
||||
@ -96,10 +96,13 @@ func startNodeResourcesController(ctx context.Context, kubeClient kubernetes.Int
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
c := &nodeResourcesController{
|
||||
ctx: ctx,
|
||||
kubeClient: kubeClient,
|
||||
getNode: getNode,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "node_resource_slices"),
|
||||
ctx: ctx,
|
||||
kubeClient: kubeClient,
|
||||
getNode: getNode,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "node_resource_slices"},
|
||||
),
|
||||
activePlugins: make(map[string]*activePlugin),
|
||||
}
|
||||
|
||||
@ -347,7 +350,7 @@ func (c *nodeResourcesController) processNextWorkItem(ctx context.Context) bool
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
driverName := key.(string)
|
||||
driverName := key
|
||||
|
||||
// Panics are caught and treated like errors.
|
||||
var err error
|
||||
|
@ -146,7 +146,7 @@ type containerLogManager struct {
|
||||
policy LogRotatePolicy
|
||||
clock clock.Clock
|
||||
mutex sync.Mutex
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
maxWorkers int
|
||||
monitoringPeriod metav1.Duration
|
||||
}
|
||||
@ -172,10 +172,13 @@ func NewContainerLogManager(runtimeService internalapi.RuntimeService, osInterfa
|
||||
MaxSize: parsedMaxSize,
|
||||
MaxFiles: maxFiles,
|
||||
},
|
||||
clock: clock.RealClock{},
|
||||
mutex: sync.Mutex{},
|
||||
maxWorkers: maxWorkers,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubelet_log_rotate_manager"),
|
||||
clock: clock.RealClock{},
|
||||
mutex: sync.Mutex{},
|
||||
maxWorkers: maxWorkers,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubelet_log_rotate_manager"},
|
||||
),
|
||||
monitoringPeriod: monitorInterval,
|
||||
}, nil
|
||||
}
|
||||
@ -264,7 +267,7 @@ func (c *containerLogManager) processContainer(ctx context.Context, worker int)
|
||||
}()
|
||||
// Always default the return to true to keep the processing of Queue ongoing
|
||||
ok = true
|
||||
id := key.(string)
|
||||
id := key
|
||||
|
||||
resp, err := c.runtimeService.ContainerStatus(ctx, id, false)
|
||||
if err != nil {
|
||||
|
@ -96,10 +96,13 @@ func TestRotateLogs(t *testing.T) {
|
||||
MaxSize: testMaxSize,
|
||||
MaxFiles: testMaxFiles,
|
||||
},
|
||||
osInterface: container.RealOS{},
|
||||
clock: testingclock.NewFakeClock(now),
|
||||
mutex: sync.Mutex{},
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubelet_log_rotate_manager"),
|
||||
osInterface: container.RealOS{},
|
||||
clock: testingclock.NewFakeClock(now),
|
||||
mutex: sync.Mutex{},
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubelet_log_rotate_manager"},
|
||||
),
|
||||
maxWorkers: 10,
|
||||
monitoringPeriod: v1.Duration{Duration: 10 * time.Second},
|
||||
}
|
||||
@ -204,10 +207,13 @@ func TestClean(t *testing.T) {
|
||||
MaxSize: testMaxSize,
|
||||
MaxFiles: testMaxFiles,
|
||||
},
|
||||
osInterface: container.RealOS{},
|
||||
clock: testingclock.NewFakeClock(now),
|
||||
mutex: sync.Mutex{},
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubelet_log_rotate_manager"),
|
||||
osInterface: container.RealOS{},
|
||||
clock: testingclock.NewFakeClock(now),
|
||||
mutex: sync.Mutex{},
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubelet_log_rotate_manager"},
|
||||
),
|
||||
maxWorkers: 10,
|
||||
monitoringPeriod: v1.Duration{Duration: 10 * time.Second},
|
||||
}
|
||||
@ -411,12 +417,15 @@ func TestRotateLatestLog(t *testing.T) {
|
||||
now := time.Now()
|
||||
f := critest.NewFakeRuntimeService()
|
||||
c := &containerLogManager{
|
||||
runtimeService: f,
|
||||
policy: LogRotatePolicy{MaxFiles: test.maxFiles},
|
||||
osInterface: container.RealOS{},
|
||||
clock: testingclock.NewFakeClock(now),
|
||||
mutex: sync.Mutex{},
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubelet_log_rotate_manager"),
|
||||
runtimeService: f,
|
||||
policy: LogRotatePolicy{MaxFiles: test.maxFiles},
|
||||
osInterface: container.RealOS{},
|
||||
clock: testingclock.NewFakeClock(now),
|
||||
mutex: sync.Mutex{},
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubelet_log_rotate_manager"},
|
||||
),
|
||||
maxWorkers: 10,
|
||||
monitoringPeriod: v1.Duration{Duration: 10 * time.Second},
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ type MetaAllocator struct {
|
||||
ipAddressLister networkingv1alpha1listers.IPAddressLister
|
||||
ipAddressSynced cache.InformerSynced
|
||||
ipAddressInformer networkingv1alpha1informers.IPAddressInformer
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
internalStopCh chan struct{}
|
||||
|
||||
@ -92,10 +92,13 @@ func NewMetaAllocator(
|
||||
ipAddressLister: ipAddressInformer.Lister(),
|
||||
ipAddressSynced: ipAddressInformer.Informer().HasSynced,
|
||||
ipAddressInformer: ipAddressInformer,
|
||||
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: ControllerName}),
|
||||
internalStopCh: make(chan struct{}),
|
||||
tree: iptree.New[*Allocator](),
|
||||
ipFamily: family,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: ControllerName},
|
||||
),
|
||||
internalStopCh: make(chan struct{}),
|
||||
tree: iptree.New[*Allocator](),
|
||||
ipFamily: family,
|
||||
}
|
||||
|
||||
_, _ = serviceCIDRInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
|
@ -100,9 +100,9 @@ type RepairIPAddress struct {
|
||||
ipAddressLister networkinglisters.IPAddressLister
|
||||
ipAddressSynced cache.InformerSynced
|
||||
|
||||
cidrQueue workqueue.RateLimitingInterface
|
||||
svcQueue workqueue.RateLimitingInterface
|
||||
ipQueue workqueue.RateLimitingInterface
|
||||
cidrQueue workqueue.TypedRateLimitingInterface[string]
|
||||
svcQueue workqueue.TypedRateLimitingInterface[string]
|
||||
ipQueue workqueue.TypedRateLimitingInterface[string]
|
||||
workerLoopPeriod time.Duration
|
||||
|
||||
muTree sync.Mutex
|
||||
@ -132,14 +132,23 @@ func NewRepairIPAddress(interval time.Duration,
|
||||
serviceCIDRSynced: serviceCIDRInformer.Informer().HasSynced,
|
||||
ipAddressLister: ipAddressInformer.Lister(),
|
||||
ipAddressSynced: ipAddressInformer.Informer().HasSynced,
|
||||
cidrQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "servicecidrs"),
|
||||
svcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "services"),
|
||||
ipQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ipaddresses"),
|
||||
tree: iptree.New[string](),
|
||||
workerLoopPeriod: time.Second,
|
||||
broadcaster: eventBroadcaster,
|
||||
recorder: recorder,
|
||||
clock: clock.RealClock{},
|
||||
cidrQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "servicecidrs"},
|
||||
),
|
||||
svcQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "services"},
|
||||
),
|
||||
ipQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "ipaddresses"},
|
||||
),
|
||||
tree: iptree.New[string](),
|
||||
workerLoopPeriod: time.Second,
|
||||
broadcaster: eventBroadcaster,
|
||||
recorder: recorder,
|
||||
clock: clock.RealClock{},
|
||||
}
|
||||
|
||||
_, _ = serviceInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
|
||||
@ -310,13 +319,13 @@ func (r *RepairIPAddress) processNextWorkSvc() bool {
|
||||
}
|
||||
defer r.svcQueue.Done(eKey)
|
||||
|
||||
err := r.syncService(eKey.(string))
|
||||
err := r.syncService(eKey)
|
||||
r.handleSvcErr(err, eKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) handleSvcErr(err error, key interface{}) {
|
||||
func (r *RepairIPAddress) handleSvcErr(err error, key string) {
|
||||
if err == nil {
|
||||
r.svcQueue.Forget(key)
|
||||
return
|
||||
@ -458,13 +467,13 @@ func (r *RepairIPAddress) processNextWorkIp() bool {
|
||||
}
|
||||
defer r.ipQueue.Done(eKey)
|
||||
|
||||
err := r.syncIPAddress(eKey.(string))
|
||||
r.handleIpErr(err, eKey)
|
||||
err := r.syncIPAddress(eKey)
|
||||
r.handleIPErr(err, eKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) handleIpErr(err error, key interface{}) {
|
||||
func (r *RepairIPAddress) handleIPErr(err error, key string) {
|
||||
if err == nil {
|
||||
r.ipQueue.Forget(key)
|
||||
return
|
||||
@ -566,7 +575,7 @@ func (r *RepairIPAddress) processNextWorkCIDR() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) handleCIDRErr(err error, key interface{}) {
|
||||
func (r *RepairIPAddress) handleCIDRErr(err error, key string) {
|
||||
if err == nil {
|
||||
r.cidrQueue.Forget(key)
|
||||
return
|
||||
|
@ -46,7 +46,7 @@ type KubernetesAPIApprovalPolicyConformantConditionController struct {
|
||||
// To allow injection for testing.
|
||||
syncFn func(key string) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// last protectedAnnotation value this controller updated the condition per CRD name (to avoid two
|
||||
// different version of the apiextensions-apiservers in HA to fight for the right message)
|
||||
@ -60,10 +60,13 @@ func NewKubernetesAPIApprovalPolicyConformantConditionController(
|
||||
crdClient client.CustomResourceDefinitionsGetter,
|
||||
) *KubernetesAPIApprovalPolicyConformantConditionController {
|
||||
c := &KubernetesAPIApprovalPolicyConformantConditionController{
|
||||
crdClient: crdClient,
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kubernetes_api_approval_conformant_condition_controller"),
|
||||
crdClient: crdClient,
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubernetes_api_approval_conformant_condition_controller"},
|
||||
),
|
||||
lastSeenProtectedAnnotation: map[string]string{},
|
||||
}
|
||||
|
||||
@ -210,7 +213,7 @@ func (c *KubernetesAPIApprovalPolicyConformantConditionController) processNextWo
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.syncFn(key.(string))
|
||||
err := c.syncFn(key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
|
@ -45,7 +45,7 @@ type EstablishingController struct {
|
||||
// To allow injection for testing.
|
||||
syncFn func(key string) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewEstablishingController creates new EstablishingController.
|
||||
@ -55,7 +55,10 @@ func NewEstablishingController(crdInformer informers.CustomResourceDefinitionInf
|
||||
crdClient: crdClient,
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crdEstablishing"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "crdEstablishing"},
|
||||
),
|
||||
}
|
||||
|
||||
ec.syncFn = ec.sync
|
||||
@ -100,7 +103,7 @@ func (ec *EstablishingController) processNextWorkItem() bool {
|
||||
}
|
||||
defer ec.queue.Done(key)
|
||||
|
||||
err := ec.syncFn(key.(string))
|
||||
err := ec.syncFn(key)
|
||||
if err == nil {
|
||||
ec.queue.Forget(key)
|
||||
return true
|
||||
|
@ -66,7 +66,7 @@ type CRDFinalizer struct {
|
||||
// To allow injection for testing.
|
||||
syncFn func(key string) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// ListerCollectionDeleter combines rest.Lister and rest.CollectionDeleter.
|
||||
@ -93,7 +93,10 @@ func NewCRDFinalizer(
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdSynced: crdInformer.Informer().HasSynced,
|
||||
crClientGetter: crClientGetter,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_finalizer"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "crd_finalizer"},
|
||||
),
|
||||
}
|
||||
|
||||
crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -290,7 +293,7 @@ func (c *CRDFinalizer) processNextWorkItem() bool {
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.syncFn(key.(string))
|
||||
err := c.syncFn(key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
|
@ -50,7 +50,7 @@ type ConditionController struct {
|
||||
// To allow injection for testing.
|
||||
syncFn func(key string) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// last generation this controller updated the condition per CRD name (to avoid two
|
||||
// different version of the apiextensions-apiservers in HA to fight for the right message)
|
||||
@ -64,10 +64,13 @@ func NewConditionController(
|
||||
crdClient client.CustomResourceDefinitionsGetter,
|
||||
) *ConditionController {
|
||||
c := &ConditionController{
|
||||
crdClient: crdClient,
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "non_structural_schema_condition_controller"),
|
||||
crdClient: crdClient,
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "non_structural_schema_condition_controller"},
|
||||
),
|
||||
lastSeenGeneration: map[string]int64{},
|
||||
}
|
||||
|
||||
@ -216,7 +219,7 @@ func (c *ConditionController) processNextWorkItem() bool {
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.syncFn(key.(string))
|
||||
err := c.syncFn(key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
|
@ -51,7 +51,7 @@ type Controller struct {
|
||||
// To allow injection for testing.
|
||||
syncFn func(string) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
staticSpec *spec.Swagger
|
||||
|
||||
@ -114,9 +114,12 @@ func createSpecCache(crd *apiextensionsv1.CustomResourceDefinition) *specCache {
|
||||
// NewController creates a new Controller with input CustomResourceDefinition informer
|
||||
func NewController(crdInformer informers.CustomResourceDefinitionInformer) *Controller {
|
||||
c := &Controller{
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdsSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_openapi_controller"),
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdsSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "crd_openapi_controller"},
|
||||
),
|
||||
specsByName: map[string]*specCache{},
|
||||
}
|
||||
|
||||
@ -183,11 +186,11 @@ func (c *Controller) processNextWorkItem() bool {
|
||||
defer func() {
|
||||
elapsed := time.Since(start)
|
||||
if elapsed > time.Second {
|
||||
klog.Warningf("slow openapi aggregation of %q: %s", key.(string), elapsed)
|
||||
klog.Warningf("slow openapi aggregation of %q: %s", key, elapsed)
|
||||
}
|
||||
}()
|
||||
|
||||
err := c.syncFn(key.(string))
|
||||
err := c.syncFn(key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
|
@ -50,7 +50,7 @@ type Controller struct {
|
||||
// To allow injection for testing.
|
||||
syncFn func(string) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
openAPIV3Service *handler3.OpenAPIService
|
||||
|
||||
@ -62,9 +62,12 @@ type Controller struct {
|
||||
// NewController creates a new Controller with input CustomResourceDefinition informer
|
||||
func NewController(crdInformer informers.CustomResourceDefinitionInformer) *Controller {
|
||||
c := &Controller{
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdsSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_openapi_v3_controller"),
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdsSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "crd_openapi_v3_controller"},
|
||||
),
|
||||
specsByGVandName: map[schema.GroupVersion]map[string]*spec3.OpenAPI{},
|
||||
}
|
||||
|
||||
@ -133,11 +136,11 @@ func (c *Controller) processNextWorkItem() bool {
|
||||
defer func() {
|
||||
elapsed := time.Since(start)
|
||||
if elapsed > time.Second {
|
||||
klog.Warningf("slow openapi aggregation of %q: %s", key.(string), elapsed)
|
||||
klog.Warningf("slow openapi aggregation of %q: %s", key, elapsed)
|
||||
}
|
||||
}()
|
||||
|
||||
err := c.syncFn(key.(string))
|
||||
err := c.syncFn(key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
|
@ -58,7 +58,7 @@ type NamingConditionController struct {
|
||||
// To allow injection for testing.
|
||||
syncFn func(key string) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
func NewNamingConditionController(
|
||||
@ -69,7 +69,10 @@ func NewNamingConditionController(
|
||||
crdClient: crdClient,
|
||||
crdLister: crdInformer.Lister(),
|
||||
crdSynced: crdInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_naming_condition_controller"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "crd_naming_condition_controller"},
|
||||
),
|
||||
}
|
||||
|
||||
informerIndexer := crdInformer.Informer().GetIndexer()
|
||||
@ -314,7 +317,7 @@ func (c *NamingConditionController) processNextWorkItem() bool {
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.syncFn(key.(string))
|
||||
err := c.syncFn(key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
|
@ -40,7 +40,7 @@ var _ Controller[runtime.Object] = &controller[runtime.Object]{}
|
||||
|
||||
type controller[T runtime.Object] struct {
|
||||
informer Informer[T]
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// Returns an error if there was a transient error during reconciliation
|
||||
// and the object should be tried again later.
|
||||
@ -99,7 +99,10 @@ func (c *controller[T]) Run(ctx context.Context) error {
|
||||
klog.Infof("starting %s", c.options.Name)
|
||||
defer klog.Infof("stopping %s", c.options.Name)
|
||||
|
||||
c.queue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), c.options.Name)
|
||||
c.queue = workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: c.options.Name},
|
||||
)
|
||||
|
||||
// Forcefully shutdown workqueue. Drop any enqueued items.
|
||||
// Important to do this in a `defer` at the start of `Run`.
|
||||
@ -219,7 +222,7 @@ func (c *controller[T]) runWorker() {
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
err := func(obj string) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
@ -227,19 +230,6 @@ func (c *controller[T]) runWorker() {
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.queue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// How did an incorrectly formatted key get in the workqueue?
|
||||
// Done is sufficient. (Forget resets rate limiter for the key,
|
||||
// but the key is invalid so there is no point in doing that)
|
||||
return fmt.Errorf("expected string in workqueue but got %#v", obj)
|
||||
}
|
||||
defer c.hasProcessed.Finished(key)
|
||||
|
||||
if err := c.reconcile(key); err != nil {
|
||||
|
@ -61,7 +61,7 @@ type quotaEvaluator struct {
|
||||
// The technique is valuable for rollup activities to avoid fanout and reduce resource contention.
|
||||
// We could move this into a library if another component needed it.
|
||||
// queue is indexed by namespace, so that we bundle up on a per-namespace basis
|
||||
queue *workqueue.Type
|
||||
queue *workqueue.Typed[string]
|
||||
workLock sync.Mutex
|
||||
work map[string][]*admissionWaiter
|
||||
dirtyWork map[string][]*admissionWaiter
|
||||
@ -122,7 +122,7 @@ func NewQuotaEvaluator(quotaAccessor QuotaAccessor, ignoredResources map[schema.
|
||||
ignoredResources: ignoredResources,
|
||||
registry: quotaRegistry,
|
||||
|
||||
queue: workqueue.NewTypedWithConfig[any](workqueue.TypedQueueConfig[any]{Name: "admission_quota_controller"}),
|
||||
queue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "admission_quota_controller"}),
|
||||
work: map[string][]*admissionWaiter{},
|
||||
dirtyWork: map[string][]*admissionWaiter{},
|
||||
inProgress: sets.String{},
|
||||
@ -666,11 +666,10 @@ func (e *quotaEvaluator) completeWork(ns string) {
|
||||
// returned namespace (regardless of whether the work item list is
|
||||
// empty).
|
||||
func (e *quotaEvaluator) getWork() (string, []*admissionWaiter, bool) {
|
||||
uncastNS, shutdown := e.queue.Get()
|
||||
ns, shutdown := e.queue.Get()
|
||||
if shutdown {
|
||||
return "", []*admissionWaiter{}, shutdown
|
||||
}
|
||||
ns := uncastNS.(string)
|
||||
|
||||
e.workLock.Lock()
|
||||
defer e.workLock.Unlock()
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@ -35,7 +36,6 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -74,7 +74,7 @@ type RequestHeaderAuthRequestController struct {
|
||||
configmapInformer cache.SharedIndexInformer
|
||||
configmapInformerSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// exportedRequestHeaderBundle is a requestHeaderBundle that contains the last read, non-zero length content of the configmap
|
||||
exportedRequestHeaderBundle atomic.Value
|
||||
@ -104,7 +104,10 @@ func NewRequestHeaderAuthRequestController(
|
||||
extraHeaderPrefixesKey: extraHeaderPrefixesKey,
|
||||
allowedClientNamesKey: allowedClientNamesKey,
|
||||
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RequestHeaderAuthRequestController"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "RequestHeaderAuthRequestController"},
|
||||
),
|
||||
}
|
||||
|
||||
// we construct our own informer because we need such a small subset of the information available. Just one namespace.
|
||||
|
@ -54,7 +54,7 @@ type ConfigMapCAController struct {
|
||||
|
||||
listeners []Listener
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
// preRunCaches are the caches to sync before starting the work of this control loop
|
||||
preRunCaches []cache.InformerSynced
|
||||
}
|
||||
@ -94,7 +94,10 @@ func NewDynamicCAFromConfigMapController(purpose, namespace, name, key string, k
|
||||
configmapLister: configmapLister,
|
||||
configMapInformer: uncastConfigmapInformer,
|
||||
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)},
|
||||
),
|
||||
preRunCaches: []cache.InformerSynced{uncastConfigmapInformer.HasSynced},
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ type DynamicFileCAContent struct {
|
||||
listeners []Listener
|
||||
|
||||
// queue only ever has one item, but it has nice error handling backoff/retry semantics
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
var _ Notifier = &DynamicFileCAContent{}
|
||||
@ -82,7 +82,10 @@ func NewDynamicCAContentFromFile(purpose, filename string) (*DynamicFileCAConten
|
||||
ret := &DynamicFileCAContent{
|
||||
name: name,
|
||||
filename: filename,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)},
|
||||
),
|
||||
}
|
||||
if err := ret.loadCABundle(); err != nil {
|
||||
return nil, err
|
||||
|
@ -47,7 +47,7 @@ type DynamicCertKeyPairContent struct {
|
||||
listeners []Listener
|
||||
|
||||
// queue only ever has one item, but it has nice error handling backoff/retry semantics
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
var _ CertKeyContentProvider = &DynamicCertKeyPairContent{}
|
||||
@ -64,7 +64,10 @@ func NewDynamicServingContentFromFiles(purpose, certFile, keyFile string) (*Dyna
|
||||
name: name,
|
||||
certFile: certFile,
|
||||
keyFile: keyFile,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)},
|
||||
),
|
||||
}
|
||||
if err := ret.loadCertKeyPair(); err != nil {
|
||||
return nil, err
|
||||
|
@ -56,7 +56,7 @@ type DynamicServingCertificateController struct {
|
||||
currentServingTLSConfig atomic.Value
|
||||
|
||||
// queue only ever has one item, but it has nice error handling backoff/retry semantics
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
eventRecorder events.EventRecorder
|
||||
}
|
||||
|
||||
@ -76,7 +76,10 @@ func NewDynamicServingCertificateController(
|
||||
servingCert: servingCert,
|
||||
sniCerts: sniCerts,
|
||||
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicServingCertificateController"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicServingCertificateController"},
|
||||
),
|
||||
eventRecorder: eventRecorder,
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ type DynamicEncryptionConfigContent struct {
|
||||
lastLoadedEncryptionConfigHash string
|
||||
|
||||
// queue for processing changes in encryption config file.
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// dynamicTransformers updates the transformers when encryption config file changes.
|
||||
dynamicTransformers *encryptionconfig.DynamicTransformers
|
||||
@ -78,8 +78,11 @@ func NewDynamicEncryptionConfiguration(
|
||||
filePath: filePath,
|
||||
lastLoadedEncryptionConfigHash: configContentHash,
|
||||
dynamicTransformers: dynamicTransformers,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name),
|
||||
apiServerID: apiServerID,
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: name},
|
||||
),
|
||||
apiServerID: apiServerID,
|
||||
getEncryptionConfigHash: func(_ context.Context, filepath string) (string, error) {
|
||||
return encryptionconfig.GetEncryptionConfigHash(filepath)
|
||||
},
|
||||
@ -150,7 +153,7 @@ func (d *DynamicEncryptionConfigContent) processNextWorkItem(serverCtx context.C
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey interface{}) {
|
||||
func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey string) {
|
||||
var (
|
||||
updatedEffectiveConfig bool
|
||||
err error
|
||||
|
@ -349,7 +349,7 @@ apiserver_encryption_config_controller_automatic_reloads_total{apiserver_id_hash
|
||||
}
|
||||
|
||||
type mockWorkQueue struct {
|
||||
workqueue.RateLimitingInterface // will panic if any unexpected method is called
|
||||
workqueue.TypedRateLimitingInterface[string] // will panic if any unexpected method is called
|
||||
|
||||
closeOnce sync.Once
|
||||
addCalled chan struct{}
|
||||
@ -362,33 +362,33 @@ type mockWorkQueue struct {
|
||||
addRateLimitedCount atomic.Uint64
|
||||
}
|
||||
|
||||
func (m *mockWorkQueue) Done(item interface{}) {
|
||||
func (m *mockWorkQueue) Done(item string) {
|
||||
m.count.Add(1)
|
||||
m.wasCanceled = m.ctx.Err() != nil
|
||||
m.cancel()
|
||||
}
|
||||
|
||||
func (m *mockWorkQueue) Get() (item interface{}, shutdown bool) {
|
||||
func (m *mockWorkQueue) Get() (item string, shutdown bool) {
|
||||
<-m.addCalled
|
||||
|
||||
switch m.count.Load() {
|
||||
case 0:
|
||||
return nil, false
|
||||
return "", false
|
||||
case 1:
|
||||
return nil, true
|
||||
return "", true
|
||||
default:
|
||||
panic("too many calls to Get")
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockWorkQueue) Add(item interface{}) {
|
||||
func (m *mockWorkQueue) Add(item string) {
|
||||
m.closeOnce.Do(func() {
|
||||
close(m.addCalled)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *mockWorkQueue) ShutDown() {}
|
||||
func (m *mockWorkQueue) AddRateLimited(item interface{}) { m.addRateLimitedCount.Add(1) }
|
||||
func (m *mockWorkQueue) ShutDown() {}
|
||||
func (m *mockWorkQueue) AddRateLimited(item string) { m.addRateLimitedCount.Add(1) }
|
||||
|
||||
type mockHealthChecker struct {
|
||||
pluginName string
|
||||
|
@ -135,7 +135,7 @@ type configController struct {
|
||||
|
||||
// configQueue holds `(interface{})(0)` when the configuration
|
||||
// objects need to be reprocessed.
|
||||
configQueue workqueue.RateLimitingInterface
|
||||
configQueue workqueue.TypedRateLimitingInterface[int]
|
||||
|
||||
plLister flowcontrollister.PriorityLevelConfigurationLister
|
||||
plInformerSynced cache.InformerSynced
|
||||
@ -292,7 +292,10 @@ func newTestableController(config TestableConfig) *configController {
|
||||
klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager)
|
||||
// Start with longish delay because conflicts will be between
|
||||
// different processes, so take some time to go away.
|
||||
cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue")
|
||||
cfgCtlr.configQueue = workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[int](200*time.Millisecond, 8*time.Hour),
|
||||
workqueue.TypedRateLimitingQueueConfig[int]{Name: "priority_and_fairness_config_queue"},
|
||||
)
|
||||
// ensure the data structure reflects the mandatory config
|
||||
cfgCtlr.lockAndDigestConfigObjects(nil, nil)
|
||||
fci := config.InformerFactory.Flowcontrol().V1()
|
||||
@ -474,7 +477,7 @@ func (cfgCtlr *configController) processNextWorkItem() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func(obj interface{}) {
|
||||
func(obj int) {
|
||||
defer cfgCtlr.configQueue.Done(obj)
|
||||
specificDelay, err := cfgCtlr.syncOne()
|
||||
switch {
|
||||
|
@ -37,12 +37,12 @@ import (
|
||||
// Controller demonstrates how to implement a controller with client-go.
|
||||
type Controller struct {
|
||||
indexer cache.Indexer
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
informer cache.Controller
|
||||
}
|
||||
|
||||
// NewController creates a new Controller.
|
||||
func NewController(queue workqueue.RateLimitingInterface, indexer cache.Indexer, informer cache.Controller) *Controller {
|
||||
func NewController(queue workqueue.TypedRateLimitingInterface[string], indexer cache.Indexer, informer cache.Controller) *Controller {
|
||||
return &Controller{
|
||||
informer: informer,
|
||||
indexer: indexer,
|
||||
@ -62,7 +62,7 @@ func (c *Controller) processNextItem() bool {
|
||||
defer c.queue.Done(key)
|
||||
|
||||
// Invoke the method containing the business logic
|
||||
err := c.syncToStdout(key.(string))
|
||||
err := c.syncToStdout(key)
|
||||
// Handle the error if something went wrong during the execution of the business logic
|
||||
c.handleErr(err, key)
|
||||
return true
|
||||
@ -90,7 +90,7 @@ func (c *Controller) syncToStdout(key string) error {
|
||||
}
|
||||
|
||||
// handleErr checks if an error happened and makes sure we will retry later.
|
||||
func (c *Controller) handleErr(err error, key interface{}) {
|
||||
func (c *Controller) handleErr(err error, key string) {
|
||||
if err == nil {
|
||||
// Forget about the #AddRateLimited history of the key on every successful synchronization.
|
||||
// This ensures that future processing of updates for this key is not delayed because of
|
||||
@ -168,7 +168,7 @@ func main() {
|
||||
podListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", v1.NamespaceDefault, fields.Everything())
|
||||
|
||||
// create the workqueue
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||
queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]())
|
||||
|
||||
// Bind the workqueue to a cache with the help of an informer. This way we make sure that
|
||||
// whenever the cache is updated, the pod key is added to the workqueue.
|
||||
|
@ -47,14 +47,17 @@ type dynamicClientCert struct {
|
||||
connDialer *connrotation.Dialer
|
||||
|
||||
// queue only ever has one item, but it has nice error handling backoff/retry semantics
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert {
|
||||
d := &dynamicClientCert{
|
||||
reload: reload,
|
||||
connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicClientCertificate"},
|
||||
),
|
||||
}
|
||||
|
||||
return d
|
||||
|
@ -109,7 +109,7 @@ type CloudNodeController struct {
|
||||
|
||||
nodesLister corelisters.NodeLister
|
||||
nodesSynced cache.InformerSynced
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
workqueue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewCloudNodeController creates a CloudNodeController object
|
||||
@ -134,7 +134,10 @@ func NewCloudNodeController(
|
||||
workerCount: workerCount,
|
||||
nodesLister: nodeInformer.Lister(),
|
||||
nodesSynced: nodeInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Nodes"),
|
||||
workqueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "Nodes"},
|
||||
),
|
||||
}
|
||||
|
||||
// Use shared informer to listen to add/update of nodes. Note that any nodes
|
||||
@ -219,16 +222,8 @@ func (cnc *CloudNodeController) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer cnc.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
defer cnc.workqueue.Done(obj)
|
||||
|
||||
var key string
|
||||
var ok bool
|
||||
if key, ok = obj.(string); !ok {
|
||||
cnc.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
err := func(key string) error {
|
||||
defer cnc.workqueue.Done(key)
|
||||
|
||||
// Run the syncHandler, passing it the key of the
|
||||
// Node resource to be synced.
|
||||
@ -241,7 +236,7 @@ func (cnc *CloudNodeController) processNextWorkItem(ctx context.Context) bool {
|
||||
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
cnc.workqueue.Forget(obj)
|
||||
cnc.workqueue.Forget(key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
|
@ -90,8 +90,8 @@ type Controller struct {
|
||||
nodeLister corelisters.NodeLister
|
||||
nodeListerSynced cache.InformerSynced
|
||||
// services and nodes that need to be synced
|
||||
serviceQueue workqueue.RateLimitingInterface
|
||||
nodeQueue workqueue.RateLimitingInterface
|
||||
serviceQueue workqueue.TypedRateLimitingInterface[string]
|
||||
nodeQueue workqueue.TypedRateLimitingInterface[string]
|
||||
// lastSyncedNodes is used when reconciling node state and keeps track of
|
||||
// the last synced set of nodes per service key. This is accessed from the
|
||||
// service and node controllers, hence it is protected by a lock.
|
||||
@ -117,9 +117,15 @@ func New(
|
||||
cache: &serviceCache{serviceMap: make(map[string]*cachedService)},
|
||||
nodeLister: nodeInformer.Lister(),
|
||||
nodeListerSynced: nodeInformer.Informer().HasSynced,
|
||||
serviceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "service"),
|
||||
nodeQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "node"),
|
||||
lastSyncedNodes: make(map[string][]*v1.Node),
|
||||
serviceQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](minRetryDelay, maxRetryDelay),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "service"},
|
||||
),
|
||||
nodeQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](minRetryDelay, maxRetryDelay),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "node"},
|
||||
),
|
||||
lastSyncedNodes: make(map[string][]*v1.Node),
|
||||
}
|
||||
|
||||
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
@ -282,7 +288,7 @@ func (c *Controller) processNextServiceItem(ctx context.Context) bool {
|
||||
}
|
||||
defer c.serviceQueue.Done(key)
|
||||
|
||||
err := c.syncService(ctx, key.(string))
|
||||
err := c.syncService(ctx, key)
|
||||
if err == nil {
|
||||
c.serviceQueue.Forget(key)
|
||||
return true
|
||||
|
@ -175,9 +175,15 @@ func newController(ctx context.Context, objects ...runtime.Object) (*Controller,
|
||||
serviceListerSynced: serviceInformer.Informer().HasSynced,
|
||||
nodeLister: nodeInformer.Lister(),
|
||||
nodeListerSynced: nodeInformer.Informer().HasSynced,
|
||||
serviceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "service"),
|
||||
nodeQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "node"),
|
||||
lastSyncedNodes: make(map[string][]*v1.Node),
|
||||
serviceQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](minRetryDelay, maxRetryDelay),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "service"},
|
||||
),
|
||||
nodeQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](minRetryDelay, maxRetryDelay),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "node"},
|
||||
),
|
||||
lastSyncedNodes: make(map[string][]*v1.Node),
|
||||
}
|
||||
|
||||
informerFactory.Start(stopCh)
|
||||
@ -897,8 +903,8 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) {
|
||||
if quit {
|
||||
t.Fatalf("get no queue element")
|
||||
}
|
||||
if keyExpected != keyGot.(string) {
|
||||
t.Fatalf("get service key error, expected: %s, got: %s", keyExpected, keyGot.(string))
|
||||
if keyExpected != keyGot {
|
||||
t.Fatalf("get service key error, expected: %s, got: %s", keyExpected, keyGot)
|
||||
}
|
||||
|
||||
newService := svc.DeepCopy()
|
||||
@ -2314,7 +2320,10 @@ func TestServiceQueueDelay(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
controller, cloud, client := newController(ctx)
|
||||
queue := &spyWorkQueue{RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test-service-queue-delay")}
|
||||
queue := &spyWorkQueue{TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "test-service-queue-delay"},
|
||||
)}
|
||||
controller.serviceQueue = queue
|
||||
cloud.Err = tc.lbCloudErr
|
||||
|
||||
@ -2400,26 +2409,26 @@ func (l *fakeNodeLister) Get(name string) (*v1.Node, error) {
|
||||
// spyWorkQueue implements a work queue and adds the ability to inspect processed
|
||||
// items for testing purposes.
|
||||
type spyWorkQueue struct {
|
||||
workqueue.RateLimitingInterface
|
||||
workqueue.TypedRateLimitingInterface[string]
|
||||
items []spyQueueItem
|
||||
}
|
||||
|
||||
// spyQueueItem represents an item that was being processed.
|
||||
type spyQueueItem struct {
|
||||
Key interface{}
|
||||
Key string
|
||||
// Delay represents the delayed duration if and only if AddAfter was invoked.
|
||||
Delay time.Duration
|
||||
}
|
||||
|
||||
// AddAfter is like workqueue.RateLimitingInterface.AddAfter but records the
|
||||
// added key and delay internally.
|
||||
func (f *spyWorkQueue) AddAfter(key interface{}, delay time.Duration) {
|
||||
func (f *spyWorkQueue) AddAfter(key string, delay time.Duration) {
|
||||
f.items = append(f.items, spyQueueItem{
|
||||
Key: key,
|
||||
Delay: delay,
|
||||
})
|
||||
|
||||
f.RateLimitingInterface.AddAfter(key, delay)
|
||||
f.TypedRateLimitingInterface.AddAfter(key, delay)
|
||||
}
|
||||
|
||||
// getItems returns all items that were recorded.
|
||||
|
@ -163,7 +163,7 @@ type controller struct {
|
||||
setReservedFor bool
|
||||
kubeClient kubernetes.Interface
|
||||
claimNameLookup *resourceclaim.Lookup
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
eventRecorder record.EventRecorder
|
||||
rcLister resourcev1alpha2listers.ResourceClassLister
|
||||
rcSynced cache.InformerSynced
|
||||
@ -208,8 +208,10 @@ func New(
|
||||
v1.EventSource{Component: fmt.Sprintf("resource driver %s", name)})
|
||||
|
||||
// The work queue contains either keys for claims or PodSchedulingContext objects.
|
||||
queue := workqueue.NewNamedRateLimitingQueue(
|
||||
workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s-queue", name))
|
||||
queue := workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("%s-queue", name)},
|
||||
)
|
||||
|
||||
// The mutation cache acts as an additional layer for the informer
|
||||
// cache and after an update made by the controller returns a more
|
||||
@ -371,7 +373,7 @@ func (ctrl *controller) sync() {
|
||||
logger := klog.LoggerWithValues(ctrl.logger, "key", key)
|
||||
ctx := klog.NewContext(ctrl.ctx, logger)
|
||||
logger.V(4).Info("processing")
|
||||
obj, err := ctrl.syncKey(ctx, key.(string))
|
||||
obj, err := ctrl.syncKey(ctx, key)
|
||||
switch err {
|
||||
case nil:
|
||||
logger.V(5).Info("completed")
|
||||
|
@ -51,7 +51,7 @@ type APIServiceRegistrationController struct {
|
||||
// To allow injection for testing.
|
||||
syncFn func(key string) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
var _ dynamiccertificates.Listener = &APIServiceRegistrationController{}
|
||||
@ -62,7 +62,10 @@ func NewAPIServiceRegistrationController(apiServiceInformer informers.APIService
|
||||
apiHandlerManager: apiHandlerManager,
|
||||
apiServiceLister: apiServiceInformer.Lister(),
|
||||
apiServiceSynced: apiServiceInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "APIServiceRegistrationController"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "APIServiceRegistrationController"},
|
||||
),
|
||||
}
|
||||
|
||||
apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -143,7 +146,7 @@ func (c *APIServiceRegistrationController) processNextWorkItem() bool {
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.syncFn(key.(string))
|
||||
err := c.syncFn(key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
|
@ -103,7 +103,7 @@ type discoveryManager struct {
|
||||
// It is important that the reconciler for this queue does not excessively
|
||||
// contact the apiserver if a key was enqueued before the server was last
|
||||
// contacted.
|
||||
dirtyAPIServiceQueue workqueue.RateLimitingInterface
|
||||
dirtyAPIServiceQueue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// Merged handler which stores all known groupversions
|
||||
mergedDiscoveryHandler discoveryendpoint.ResourceManager
|
||||
@ -197,8 +197,11 @@ func NewDiscoveryManager(
|
||||
mergedDiscoveryHandler: target,
|
||||
apiServices: make(map[string]groupVersionInfo),
|
||||
cachedResults: make(map[serviceKey]cachedResult),
|
||||
dirtyAPIServiceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "discovery-manager"),
|
||||
codecs: codecs,
|
||||
dirtyAPIServiceQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "discovery-manager"},
|
||||
),
|
||||
codecs: codecs,
|
||||
}
|
||||
}
|
||||
|
||||
@ -488,7 +491,7 @@ func (dm *discoveryManager) Run(stopCh <-chan struct{}, discoverySyncedCh chan<-
|
||||
func() {
|
||||
defer dm.dirtyAPIServiceQueue.Done(next)
|
||||
|
||||
if err := dm.syncAPIService(next.(string)); err != nil {
|
||||
if err := dm.syncAPIService(next); err != nil {
|
||||
dm.dirtyAPIServiceQueue.AddRateLimited(next)
|
||||
} else {
|
||||
dm.dirtyAPIServiceQueue.Forget(next)
|
||||
|
@ -1072,39 +1072,39 @@ func fetchPath(handler http.Handler, etag string) (*http.Response, []byte, *apid
|
||||
// isComplete
|
||||
type completerWorkqueue struct {
|
||||
lock sync.Mutex
|
||||
workqueue.RateLimitingInterface
|
||||
processing map[interface{}]struct{}
|
||||
workqueue.TypedRateLimitingInterface[string]
|
||||
processing map[string]struct{}
|
||||
}
|
||||
|
||||
var _ = workqueue.RateLimitingInterface(&completerWorkqueue{})
|
||||
var _ = workqueue.TypedRateLimitingInterface[string](&completerWorkqueue{})
|
||||
|
||||
func newCompleterWorkqueue(wq workqueue.RateLimitingInterface) *completerWorkqueue {
|
||||
func newCompleterWorkqueue(wq workqueue.TypedRateLimitingInterface[string]) *completerWorkqueue {
|
||||
return &completerWorkqueue{
|
||||
RateLimitingInterface: wq,
|
||||
processing: make(map[interface{}]struct{}),
|
||||
TypedRateLimitingInterface: wq,
|
||||
processing: make(map[string]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *completerWorkqueue) Add(item interface{}) {
|
||||
func (q *completerWorkqueue) Add(item string) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
q.processing[item] = struct{}{}
|
||||
q.RateLimitingInterface.Add(item)
|
||||
q.TypedRateLimitingInterface.Add(item)
|
||||
}
|
||||
|
||||
func (q *completerWorkqueue) AddAfter(item interface{}, duration time.Duration) {
|
||||
func (q *completerWorkqueue) AddAfter(item string, duration time.Duration) {
|
||||
q.Add(item)
|
||||
}
|
||||
|
||||
func (q *completerWorkqueue) AddRateLimited(item interface{}) {
|
||||
func (q *completerWorkqueue) AddRateLimited(item string) {
|
||||
q.Add(item)
|
||||
}
|
||||
|
||||
func (q *completerWorkqueue) Done(item interface{}) {
|
||||
func (q *completerWorkqueue) Done(item string) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
delete(q.processing, item)
|
||||
q.RateLimitingInterface.Done(item)
|
||||
q.TypedRateLimitingInterface.Done(item)
|
||||
}
|
||||
|
||||
func (q *completerWorkqueue) isComplete() bool {
|
||||
|
@ -81,7 +81,7 @@ type autoRegisterController struct {
|
||||
apiServicesAtStart map[string]bool
|
||||
|
||||
// queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
}
|
||||
|
||||
// NewAutoRegisterController creates a new autoRegisterController.
|
||||
@ -97,7 +97,10 @@ func NewAutoRegisterController(apiServiceInformer informers.APIServiceInformer,
|
||||
syncedSuccessfullyLock: &sync.RWMutex{},
|
||||
syncedSuccessfully: map[string]bool{},
|
||||
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "autoregister"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "autoregister"},
|
||||
),
|
||||
}
|
||||
c.syncHandler = c.checkAPIService
|
||||
|
||||
@ -182,7 +185,7 @@ func (c *autoRegisterController) processNextWorkItem() bool {
|
||||
defer c.queue.Done(key)
|
||||
|
||||
// do your work on the key. This method will contains your "do stuff" logic
|
||||
err := c.syncHandler(key.(string))
|
||||
err := c.syncHandler(key)
|
||||
if err == nil {
|
||||
// if you had no error, tell the queue to stop tracking history for your key. This will
|
||||
// reset things like failure counts for per-item rate limiting
|
||||
|
@ -316,7 +316,10 @@ func TestSync(t *testing.T) {
|
||||
apiServiceClient: fakeClient.ApiregistrationV1(),
|
||||
apiServiceLister: listers.NewAPIServiceLister(apiServiceIndexer),
|
||||
apiServicesToSync: map[string]*apiregistrationv1.APIService{},
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "autoregister"),
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.DefaultTypedControllerRateLimiter[string](),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "autoregister"},
|
||||
),
|
||||
|
||||
syncedSuccessfullyLock: &sync.RWMutex{},
|
||||
syncedSuccessfully: alreadySynced,
|
||||
|
@ -47,7 +47,7 @@ const (
|
||||
// them if necessary.
|
||||
type AggregationController struct {
|
||||
openAPIAggregationManager aggregator.SpecAggregator
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
downloader *aggregator.Downloader
|
||||
|
||||
// To allow injection for testing.
|
||||
@ -58,9 +58,9 @@ type AggregationController struct {
|
||||
func NewAggregationController(downloader *aggregator.Downloader, openAPIAggregationManager aggregator.SpecAggregator) *AggregationController {
|
||||
c := &AggregationController{
|
||||
openAPIAggregationManager: openAPIAggregationManager,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(
|
||||
workqueue.NewItemExponentialFailureRateLimiter(successfulUpdateDelay, failedUpdateMaxExpDelay),
|
||||
"open_api_aggregation_controller",
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](successfulUpdateDelay, failedUpdateMaxExpDelay),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "open_api_aggregation_controller"},
|
||||
),
|
||||
downloader: downloader,
|
||||
}
|
||||
@ -97,7 +97,7 @@ func (c *AggregationController) processNextWorkItem() bool {
|
||||
}
|
||||
klog.V(4).Infof("OpenAPI AggregationController: Processing item %s", key)
|
||||
|
||||
action, err := c.syncHandler(key.(string))
|
||||
action, err := c.syncHandler(key)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("loading OpenAPI spec for %q failed with: %v", key, err))
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ const (
|
||||
// AggregationController periodically checks the list of group-versions handled by each APIService and updates the discovery page periodically
|
||||
type AggregationController struct {
|
||||
openAPIAggregationManager aggregator.SpecProxier
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
// To allow injection for testing.
|
||||
syncHandler func(key string) (syncAction, error)
|
||||
@ -56,9 +56,9 @@ type AggregationController struct {
|
||||
func NewAggregationController(openAPIAggregationManager aggregator.SpecProxier) *AggregationController {
|
||||
c := &AggregationController{
|
||||
openAPIAggregationManager: openAPIAggregationManager,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(
|
||||
workqueue.NewItemExponentialFailureRateLimiter(successfulUpdateDelay, failedUpdateMaxExpDelay),
|
||||
"open_api_v3_aggregation_controller",
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](successfulUpdateDelay, failedUpdateMaxExpDelay),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "open_api_v3_aggregation_controller"},
|
||||
),
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ func (c *AggregationController) processNextWorkItem() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if aggregator.IsLocalAPIService(key.(string)) {
|
||||
if aggregator.IsLocalAPIService(key) {
|
||||
// for local delegation targets that are aggregated once per second, log at
|
||||
// higher level to avoid flooding the log
|
||||
klog.V(6).Infof("OpenAPI AggregationController: Processing item %s", key)
|
||||
@ -106,7 +106,7 @@ func (c *AggregationController) processNextWorkItem() bool {
|
||||
klog.V(4).Infof("OpenAPI AggregationController: Processing item %s", key)
|
||||
}
|
||||
|
||||
action, err := c.syncHandler(key.(string))
|
||||
action, err := c.syncHandler(key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
} else {
|
||||
@ -115,7 +115,7 @@ func (c *AggregationController) processNextWorkItem() bool {
|
||||
|
||||
switch action {
|
||||
case syncRequeue:
|
||||
if aggregator.IsLocalAPIService(key.(string)) {
|
||||
if aggregator.IsLocalAPIService(key) {
|
||||
klog.V(7).Infof("OpenAPI AggregationController: action for local item %s: Requeue after %s.", key, successfulUpdateDelayLocal)
|
||||
c.queue.AddAfter(key, successfulUpdateDelayLocal)
|
||||
} else {
|
||||
|
@ -81,7 +81,7 @@ type AvailableConditionController struct {
|
||||
// To allow injection for testing.
|
||||
syncFn func(key string) error
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[string]
|
||||
// map from service-namespace -> service-name -> apiservice names
|
||||
cache map[string]map[string][]string
|
||||
// this lock protects operations on the above cache
|
||||
@ -107,12 +107,13 @@ func NewAvailableConditionController(
|
||||
serviceLister: serviceInformer.Lister(),
|
||||
endpointsLister: endpointsInformer.Lister(),
|
||||
serviceResolver: serviceResolver,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
// We want a fairly tight requeue time. The controller listens to the API, but because it relies on the routability of the
|
||||
// service network, it is possible for an external, non-watchable factor to affect availability. This keeps
|
||||
// the maximum disruption time to a minimum, but it does prevent hot loops.
|
||||
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second),
|
||||
"AvailableConditionController"),
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 30*time.Second),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "AvailableConditionController"},
|
||||
),
|
||||
proxyTransportDial: proxyTransportDial,
|
||||
proxyCurrentCertKeyContent: proxyCurrentCertKeyContent,
|
||||
metrics: newAvailabilityMetrics(),
|
||||
@ -451,7 +452,7 @@ func (c *AvailableConditionController) processNextWorkItem() bool {
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.syncFn(key.(string))
|
||||
err := c.syncFn(key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
|
@ -126,12 +126,13 @@ func setupAPIServices(apiServices []*apiregistration.APIService) (*AvailableCond
|
||||
serviceLister: v1listers.NewServiceLister(serviceIndexer),
|
||||
endpointsLister: v1listers.NewEndpointsLister(endpointsIndexer),
|
||||
serviceResolver: &fakeServiceResolver{url: testServer.URL},
|
||||
queue: workqueue.NewNamedRateLimitingQueue(
|
||||
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
// We want a fairly tight requeue time. The controller listens to the API, but because it relies on the routability of the
|
||||
// service network, it is possible for an external, non-watchable factor to affect availability. This keeps
|
||||
// the maximum disruption time to a minimum, but it does prevent hot loops.
|
||||
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second),
|
||||
"AvailableConditionController"),
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 30*time.Second),
|
||||
workqueue.TypedRateLimitingQueueConfig[string]{Name: "AvailableConditionController"},
|
||||
),
|
||||
metrics: newAvailabilityMetrics(),
|
||||
}
|
||||
for _, svc := range apiServices {
|
||||
|
@ -80,7 +80,7 @@ type Controller struct {
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
workqueue workqueue.TypedRateLimitingInterface[string]
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
@ -105,9 +105,9 @@ func NewController(
|
||||
eventBroadcaster.StartStructuredLogging(0)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
|
||||
ratelimiter := workqueue.NewMaxOfRateLimiter(
|
||||
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
|
||||
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(50), 300)},
|
||||
ratelimiter := workqueue.NewTypedMaxOfRateLimiter(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 1000*time.Second),
|
||||
&workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(50), 300)},
|
||||
)
|
||||
|
||||
controller := &Controller{
|
||||
@ -117,7 +117,7 @@ func NewController(
|
||||
deploymentsSynced: deploymentInformer.Informer().HasSynced,
|
||||
foosLister: fooInformer.Lister(),
|
||||
foosSynced: fooInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewRateLimitingQueue(ratelimiter),
|
||||
workqueue: workqueue.NewTypedRateLimitingQueue(ratelimiter),
|
||||
recorder: recorder,
|
||||
}
|
||||
|
||||
@ -204,29 +204,14 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
err := func(key string) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
defer c.workqueue.Done(key)
|
||||
// Run the syncHandler, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.syncHandler(ctx, key); err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user