add gc and improve testing

This commit is contained in:
Jefftree 2024-07-23 02:57:34 +00:00
parent c47ff1e1a9
commit e0c6987ca8
7 changed files with 606 additions and 53 deletions

View File

@ -162,7 +162,15 @@ func (c completedConfig) New(name string, delegationTarget genericapiserver.Dele
client.CoordinationV1(),
client.CoordinationV1alpha1(),
)
return controller.Run, err
gccontroller := leaderelection.NewLeaseCandidateGC(
client,
1*time.Hour,
lcInformer,
)
return func(ctx context.Context, workers int) {
go controller.Run(ctx, workers)
go gccontroller.Run(ctx.Done())
}, err
})
return nil
})

View File

@ -17,7 +17,7 @@ limitations under the License.
package leaderelection
import (
"slices"
"fmt"
"time"
"github.com/blang/semver/v4"
@ -53,26 +53,70 @@ func shouldReelect(candidates []*v1alpha1.LeaseCandidate, currentLeader *v1alpha
return compare(currentLeader, pickedLeader) > 0
}
func pickBestStrategy(candidates []*v1alpha1.LeaseCandidate) v1.CoordinatedLeaseStrategy {
// TODO: This doesn't account for cycles within the preference graph
// We may have to do a topological sort to verify that the preference ordering is valid
var bestStrategy *v1.CoordinatedLeaseStrategy
for _, c := range candidates {
if len(c.Spec.PreferredStrategies) > 0 {
if bestStrategy == nil {
bestStrategy = &c.Spec.PreferredStrategies[0]
continue
}
if *bestStrategy != c.Spec.PreferredStrategies[0] {
if idx := slices.Index(c.Spec.PreferredStrategies, *bestStrategy); idx > 0 {
bestStrategy = &c.Spec.PreferredStrategies[0]
} else {
klog.Infof("Error: bad strategy ordering")
}
// topologicalSortWithOneRoot has a caveat that there may only be one root (indegree=0) node in a valid ordering.
func topologicalSortWithOneRoot(graph map[v1.CoordinatedLeaseStrategy][]v1.CoordinatedLeaseStrategy) []v1.CoordinatedLeaseStrategy {
inDegree := make(map[v1.CoordinatedLeaseStrategy]int)
for node := range graph {
inDegree[node] = 0
}
for _, neighbors := range graph {
for _, neighbor := range neighbors {
inDegree[neighbor]++
}
}
var queue []v1.CoordinatedLeaseStrategy
for vertex, degree := range inDegree {
if degree == 0 {
queue = append(queue, vertex)
}
}
// If multiple nodes have indegree of 0, multiple strategies are non-superceding and is a conflict.
if len(queue) > 1 {
return nil
}
var sorted []v1.CoordinatedLeaseStrategy
for len(queue) > 0 {
vertex := queue[0]
queue = queue[1:]
sorted = append(sorted, vertex)
for _, neighbor := range graph[vertex] {
inDegree[neighbor]--
if inDegree[neighbor] == 0 {
queue = append(queue, neighbor)
}
}
}
return (*bestStrategy)
if len(sorted) != len(graph) {
fmt.Printf("%s", (sorted))
return nil // Cycle detected
}
return sorted
}
func pickBestStrategy(candidates []*v1alpha1.LeaseCandidate) (v1.CoordinatedLeaseStrategy, error) {
graph := make(map[v1.CoordinatedLeaseStrategy][]v1.CoordinatedLeaseStrategy)
nilStrategy := v1.CoordinatedLeaseStrategy("")
for _, c := range candidates {
for i := range len(c.Spec.PreferredStrategies) - 1 {
graph[c.Spec.PreferredStrategies[i]] = append(graph[c.Spec.PreferredStrategies[i]], c.Spec.PreferredStrategies[i+1])
}
if _, ok := graph[c.Spec.PreferredStrategies[len(c.Spec.PreferredStrategies)-1]]; !ok {
graph[c.Spec.PreferredStrategies[len(c.Spec.PreferredStrategies)-1]] = []v1.CoordinatedLeaseStrategy{}
}
}
sorted := topologicalSortWithOneRoot(graph)
if sorted == nil {
return nilStrategy, fmt.Errorf("Invalid strategy")
}
return sorted[0], nil
}
func validLeaseCandidateForOldestEmulationVersion(l *v1alpha1.LeaseCandidate) bool {

View File

@ -509,7 +509,6 @@ func TestShouldReelect(t *testing.T) {
},
expectResult: false,
},
// TODO: Add test cases where candidates have invalid version numbers
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
@ -520,3 +519,232 @@ func TestShouldReelect(t *testing.T) {
})
}
}
func TestTopologicalSortWithOneRoot(t *testing.T) {
tests := []struct {
name string
graph map[v1.CoordinatedLeaseStrategy][]v1.CoordinatedLeaseStrategy
want []v1.CoordinatedLeaseStrategy
}{
{
name: "simple DAG",
graph: map[v1.CoordinatedLeaseStrategy][]v1.CoordinatedLeaseStrategy{
v1.OldestEmulationVersion: {"foo"},
"foo": {"bar"},
"bar": {},
},
want: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion, "foo", "bar"},
},
{
name: "cycle",
graph: map[v1.CoordinatedLeaseStrategy][]v1.CoordinatedLeaseStrategy{
v1.OldestEmulationVersion: {"foo"},
"foo": {v1.OldestEmulationVersion},
},
want: nil,
},
{
name: "multiple",
graph: map[v1.CoordinatedLeaseStrategy][]v1.CoordinatedLeaseStrategy{
v1.OldestEmulationVersion: {"foo", "baz"},
"foo": {"baz"},
"baz": {},
},
want: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion, "foo", "baz"},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := topologicalSortWithOneRoot(tc.graph)
if !equalStrategies(got, tc.want) {
t.Errorf("topologicalSortWithOneRoot() = %v, want %v", got, tc.want)
}
})
}
}
func TestPickBestStrategy(t *testing.T) {
tests := []struct {
name string
candidates []*v1alpha1.LeaseCandidate
wantStrategy v1.CoordinatedLeaseStrategy
wantError bool
}{
{
name: "single candidate, single preferred strategy",
candidates: []*v1alpha1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, different preferred strategies should fail",
candidates: []*v1alpha1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{"foo.com/bar"},
},
},
},
wantError: true,
},
{
name: "multiple candidates, multiple resolved preferred strategy",
candidates: []*v1alpha1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion, "foo.com/bar"},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{"foo.com/bar"},
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, same preferred strategy",
candidates: []*v1alpha1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, conflicting preferred strategy",
candidates: []*v1alpha1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{"foo.com/bar"},
},
},
},
wantStrategy: "",
wantError: true,
},
{
name: "multiple candidates, cycle in preferred strategies",
candidates: []*v1alpha1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{"foo.com/bar", v1.OldestEmulationVersion},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion, "foo.com/bar"},
},
},
},
wantError: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotStrategy, err := pickBestStrategy(tc.candidates)
gotError := err != nil
if gotError != tc.wantError {
t.Errorf("pickBestStrategy() error = %v,:%v want %v", gotError, err, tc.wantError)
}
if !gotError && gotStrategy != tc.wantStrategy {
t.Errorf("pickBestStrategy() = %v, want %v", gotStrategy, tc.wantStrategy)
}
})
}
}
func equalStrategies(s1, s2 []v1.CoordinatedLeaseStrategy) bool {
if len(s1) != len(s2) {
return false
}
for i := range s1 {
if s1[i] != s2[i] {
return false
}
}
return true
}

View File

@ -49,7 +49,7 @@ const (
electionDuration = 5 * time.Second
leaseCandidateValidDuration = 5 * time.Minute
leaseCandidateValidDuration = 30 * time.Minute
)
// Controller is the leader election controller, which observes component identity leases for
@ -202,7 +202,16 @@ func (c *Controller) electionNeeded(candidates []*v1alpha1.LeaseCandidate, lease
return true, nil
}
prelimStrategy := pickBestStrategy(candidates)
for _, candidate := range candidates {
if candidate.Spec.RenewTime != nil && candidate.Spec.RenewTime.Add(leaseCandidateValidDuration/2).Before(time.Now()) {
return true, nil
}
}
prelimStrategy, err := pickBestStrategy(candidates)
if err != nil {
return false, err
}
if prelimStrategy != v1.OldestEmulationVersion {
klog.V(2).Infof("strategy %s is not recognized by CLE.", prelimStrategy)
return false, nil
@ -302,7 +311,11 @@ func (c *Controller) reconcileElectionStep(ctx context.Context, leaseNN types.Na
return false, fmt.Errorf("no available candidates")
}
strategy := pickBestStrategy(ackedCandidates)
strategy, err := pickBestStrategy(ackedCandidates)
if err != nil {
return false, err
}
if strategy != v1.OldestEmulationVersion {
klog.V(2).Infof("strategy %s is not recognized by CLE.", strategy)
return false, nil

View File

@ -0,0 +1,99 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"context"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coordinationv1alpha1informers "k8s.io/client-go/informers/coordination/v1alpha1"
"k8s.io/client-go/kubernetes"
listers "k8s.io/client-go/listers/coordination/v1alpha1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
)
type LeaseCandidateGCController struct {
kubeclientset kubernetes.Interface
leaseCandidateLister listers.LeaseCandidateLister
leaseCandidateInformer coordinationv1alpha1informers.LeaseCandidateInformer
leaseCandidatesSynced cache.InformerSynced
gcCheckPeriod time.Duration
}
// NewLeaseCandidateGC creates a new LeaseCandidateGCController.
func NewLeaseCandidateGC(clientset kubernetes.Interface, gcCheckPeriod time.Duration, leaseCandidateInformer coordinationv1alpha1informers.LeaseCandidateInformer) *LeaseCandidateGCController {
return &LeaseCandidateGCController{
kubeclientset: clientset,
leaseCandidateLister: leaseCandidateInformer.Lister(),
leaseCandidateInformer: leaseCandidateInformer,
leaseCandidatesSynced: leaseCandidateInformer.Informer().HasSynced,
gcCheckPeriod: gcCheckPeriod,
}
}
// Run starts one worker.
func (c *LeaseCandidateGCController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer klog.Infof("Shutting down apiserver leasecandidate garbage collector")
klog.Infof("Starting apiserver leasecandidate garbage collector")
if !cache.WaitForCacheSync(stopCh, c.leaseCandidatesSynced) {
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
return
}
go wait.Until(c.gc, c.gcCheckPeriod, stopCh)
<-stopCh
}
func (c *LeaseCandidateGCController) gc() {
lcs, err := c.leaseCandidateLister.List(labels.Everything())
if err != nil {
klog.ErrorS(err, "Error while listing lease candidates")
return
}
for _, leaseCandidate := range lcs {
// evaluate lease from cache
if !isLeaseCandidateExpired(leaseCandidate) {
continue
}
lc, err := c.kubeclientset.CoordinationV1alpha1().LeaseCandidates(leaseCandidate.Namespace).Get(context.TODO(), leaseCandidate.Name, metav1.GetOptions{})
if err != nil {
klog.ErrorS(err, "Error getting lc")
continue
}
// evaluate lease from apiserver
if !isLeaseCandidateExpired(lc) {
continue
}
if err := c.kubeclientset.CoordinationV1alpha1().LeaseCandidates(lc.Namespace).Delete(
context.TODO(), lc.Name, metav1.DeleteOptions{}); err != nil {
klog.ErrorS(err, "Error deleting lease")
}
}
}

View File

@ -0,0 +1,144 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"context"
"testing"
"time"
v1 "k8s.io/api/coordination/v1"
v1alpha1 "k8s.io/api/coordination/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/ptr"
)
func TestLeaseCandidateGCController(t *testing.T) {
tests := []struct {
name string
leaseCandidates []*v1alpha1.LeaseCandidate
expectedDeletedCount int
}{
{
name: "delete expired lease candidates",
leaseCandidates: []*v1alpha1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * leaseCandidateValidDuration)},
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
RenewTime: ptr.To(metav1.NewMicroTime(time.Now().Add(-1 * leaseCandidateValidDuration))),
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * leaseCandidateValidDuration)},
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-B",
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
RenewTime: ptr.To(metav1.NewMicroTime(time.Now().Add(-1 * leaseCandidateValidDuration))),
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate3",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-C",
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
RenewTime: ptr.To(metav1.NewMicroTime(time.Now())),
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
},
},
},
expectedDeletedCount: 2,
},
{
name: "no expired lease candidates",
leaseCandidates: []*v1alpha1.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "component-A",
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
RenewTime: ptr.To(metav1.NewMicroTime(time.Now())),
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
},
},
},
expectedDeletedCount: 0,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
client := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
leaseCandidateInformer := informerFactory.Coordination().V1alpha1().LeaseCandidates()
controller := NewLeaseCandidateGC(client, 10*time.Millisecond, leaseCandidateInformer)
go informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
// Create lease candidates
for _, lc := range tc.leaseCandidates {
_, err := client.CoordinationV1alpha1().LeaseCandidates(lc.Namespace).Create(ctx, lc, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
}
cache.WaitForCacheSync(ctx.Done(), controller.leaseCandidatesSynced)
go controller.Run(ctx.Done())
err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 600*time.Second, true, func(ctx context.Context) (done bool, err error) {
lcs, err := client.CoordinationV1alpha1().LeaseCandidates("default").List(ctx, metav1.ListOptions{})
if err != nil {
return true, err
}
return len(lcs.Items) == len(tc.leaseCandidates)-tc.expectedDeletedCount, nil
})
if err != nil {
t.Fatal(err)
}
})
}
}

View File

@ -23,6 +23,8 @@ import (
"time"
v1 "k8s.io/api/coordination/v1"
v1alpha1 "k8s.io/api/coordination/v1alpha1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
genericfeatures "k8s.io/apiserver/pkg/features"
@ -77,28 +79,6 @@ func TestMultipleLeaseCandidate(t *testing.T) {
cletest.pollForLease("foo", "default", "foo3")
}
func TestLeaderDisappear(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.CoordinatedLeaderElection, true)
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
ctx, cancel := context.WithCancel(context.Background())
cletest := setupCLE(config, ctx, cancel, t)
defer cletest.cleanup()
go cletest.createAndRunFakeController("foo1", "default", "foo", "1.20.0", "1.20.0")
go cletest.createAndRunFakeController("foo2", "default", "foo", "1.20.0", "1.19.0")
cletest.pollForLease("foo", "default", "foo2")
cletest.cancelController("foo2", "default")
cletest.deleteLC("foo2", "default")
cletest.pollForLease("foo", "default", "foo1")
}
func TestLeaseSwapIfBetterAvailable(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.CoordinatedLeaderElection, true)
@ -143,6 +123,51 @@ func TestUpgradeSkew(t *testing.T) {
cletest.pollForLease("foo", "default", "foo1-131")
}
func TestLeaseCandidateCleanup(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.CoordinatedLeaderElection, true)
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
expiredLC := &v1alpha1.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "expired",
Namespace: "default",
},
Spec: v1alpha1.LeaseCandidateSpec{
LeaseName: "foobar",
BinaryVersion: "0.1.0",
EmulationVersion: "0.1.0",
PreferredStrategies: []v1.CoordinatedLeaseStrategy{v1.OldestEmulationVersion},
RenewTime: &metav1.MicroTime{Time: time.Now().Add(-1 * time.Hour)},
},
}
ctx := context.Background()
_, err = clientset.CoordinationV1alpha1().LeaseCandidates("default").Create(ctx, expiredLC, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
err = wait.PollUntilContextTimeout(ctx, 1000*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (done bool, err error) {
_, err = clientset.CoordinationV1alpha1().LeaseCandidates("default").Get(ctx, "expired", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return true, nil
}
return false, nil
})
if err != nil {
t.Fatalf("Timieout waiting for lease gc")
}
}
type ctxCancelPair struct {
ctx context.Context
cancel func()
@ -169,7 +194,6 @@ func (t cleTest) createAndRunFakeLegacyController(name string, namespace string,
},
OnStoppedLeading: func() {
klog.Errorf("%s Lost leadership, stopping", name)
// klog.FlushAndExit(klog.ExitFlushTimeout, 1)
},
})
@ -273,13 +297,6 @@ func (t cleTest) cleanup() {
}
}
func (t cleTest) deleteLC(name, namespace string) {
err := t.clientset.CoordinationV1alpha1().LeaseCandidates(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil {
t.t.Error(err)
}
}
func setupCLE(config *rest.Config, ctx context.Context, cancel func(), t *testing.T) cleTest {
clientset, err := kubernetes.NewForConfig(config)
if err != nil {