Merge pull request #80954 from zachomedia/fix-lock-release

Fix leader election lock release when using LeaseLocks

Kubernetes-commit: 68f6b09e807917d73446e6ee6146b4029d1aae79
This commit is contained in:
Kubernetes Publisher 2020-10-26 20:57:58 -07:00
commit b8dd4e3c0f
5 changed files with 168 additions and 9 deletions

4
Godeps/Godeps.json generated
View File

@ -448,11 +448,11 @@
},
{
"ImportPath": "k8s.io/api",
"Rev": "54bcc311e327"
"Rev": "0415de894212"
},
{
"ImportPath": "k8s.io/apimachinery",
"Rev": "554eef9dbf66"
"Rev": "ab98f4dc11fb"
},
{
"ImportPath": "k8s.io/gengo",

8
go.mod
View File

@ -26,14 +26,14 @@ require (
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
k8s.io/api v0.0.0-20201020200614-54bcc311e327
k8s.io/apimachinery v0.0.0-20201020200440-554eef9dbf66
k8s.io/api v0.0.0-20201026202134-0415de894212
k8s.io/apimachinery v0.0.0-20201026201559-ab98f4dc11fb
k8s.io/klog/v2 v2.2.0
k8s.io/utils v0.0.0-20200729134348-d5654de09c73
sigs.k8s.io/yaml v1.2.0
)
replace (
k8s.io/api => k8s.io/api v0.0.0-20201020200614-54bcc311e327
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20201020200440-554eef9dbf66
k8s.io/api => k8s.io/api v0.0.0-20201026202134-0415de894212
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20201026201559-ab98f4dc11fb
)

4
go.sum
View File

@ -335,8 +335,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/api v0.0.0-20201020200614-54bcc311e327/go.mod h1:ZstqwXfkQ55HZmXltK0DHYZaNJbMq4U/mnlyigw1pH0=
k8s.io/apimachinery v0.0.0-20201020200440-554eef9dbf66/go.mod h1:6s3VNb000AUbBIxR7q3WHlbBwfpEGqIJsCG5gIX+0LI=
k8s.io/api v0.0.0-20201026202134-0415de894212/go.mod h1:ZHfIarpAhcOE82iD+VeQxQXxatIY63UubJAgxUq67ig=
k8s.io/apimachinery v0.0.0-20201026201559-ab98f4dc11fb/go.mod h1:6s3VNb000AUbBIxR7q3WHlbBwfpEGqIJsCG5gIX+0LI=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=

View File

@ -290,8 +290,12 @@ func (le *LeaderElector) release() bool {
if !le.IsLeader() {
return true
}
now := metav1.Now()
leaderElectionRecord := rl.LeaderElectionRecord{
LeaderTransitions: le.observedRecord.LeaderTransitions,
LeaseDurationSeconds: 1,
RenewTime: now,
AcquireTime: now,
}
if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil {
klog.Errorf("Failed to release lock: %v", err)

View File

@ -917,3 +917,158 @@ func TestTryAcquireOrRenewEndpointsLeases(t *testing.T) {
func TestTryAcquireOrRenewConfigMapsLeases(t *testing.T) {
testTryAcquireOrRenewMultiLock(t, "configmapsleases")
}
func testReleaseLease(t *testing.T, objectType string) {
tests := []struct {
name string
observedRecord rl.LeaderElectionRecord
observedTime time.Time
reactors []Reactor
expectSuccess bool
transitionLeader bool
outHolder string
}{
{
name: "release acquired lock from no object",
reactors: []Reactor{
{
verb: "get",
objectType: objectType,
reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.NewNotFound(action.(fakeclient.GetAction).GetResource().GroupResource(), action.(fakeclient.GetAction).GetName())
},
},
{
verb: "create",
objectType: objectType,
reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(fakeclient.CreateAction).GetObject(), nil
},
},
{
verb: "update",
objectType: objectType,
reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(fakeclient.UpdateAction).GetObject(), nil
},
},
},
expectSuccess: true,
outHolder: "",
},
}
for i := range tests {
test := &tests[i]
t.Run(test.name, func(t *testing.T) {
// OnNewLeader is called async so we have to wait for it.
var wg sync.WaitGroup
wg.Add(1)
var reportedLeader string
var lock rl.Interface
objectMeta := metav1.ObjectMeta{Namespace: "foo", Name: "bar"}
resourceLockConfig := rl.ResourceLockConfig{
Identity: "baz",
EventRecorder: &record.FakeRecorder{},
}
c := &fake.Clientset{}
for _, reactor := range test.reactors {
c.AddReactor(reactor.verb, objectType, reactor.reaction)
}
c.AddReactor("*", "*", func(action fakeclient.Action) (bool, runtime.Object, error) {
t.Errorf("unreachable action. testclient called too many times: %+v", action)
return true, nil, fmt.Errorf("unreachable action")
})
switch objectType {
case "endpoints":
lock = &rl.EndpointsLock{
EndpointsMeta: objectMeta,
LockConfig: resourceLockConfig,
Client: c.CoreV1(),
}
case "configmaps":
lock = &rl.ConfigMapLock{
ConfigMapMeta: objectMeta,
LockConfig: resourceLockConfig,
Client: c.CoreV1(),
}
case "leases":
lock = &rl.LeaseLock{
LeaseMeta: objectMeta,
LockConfig: resourceLockConfig,
Client: c.CoordinationV1(),
}
}
lec := LeaderElectionConfig{
Lock: lock,
LeaseDuration: 10 * time.Second,
Callbacks: LeaderCallbacks{
OnNewLeader: func(l string) {
defer wg.Done()
reportedLeader = l
},
},
}
observedRawRecord := GetRawRecordOrDie(t, objectType, test.observedRecord)
le := &LeaderElector{
config: lec,
observedRecord: test.observedRecord,
observedRawRecord: observedRawRecord,
observedTime: test.observedTime,
clock: clock.RealClock{},
}
if !le.tryAcquireOrRenew(context.Background()) {
t.Errorf("unexpected result of tryAcquireOrRenew: [succeeded=%v]", true)
}
le.maybeReportTransition()
// Wait for a response to the leader transition, and add 1 so that we can track the final transition.
wg.Wait()
wg.Add(1)
if test.expectSuccess != le.release() {
t.Errorf("unexpected result of release: [succeeded=%v]", !test.expectSuccess)
}
le.observedRecord.AcquireTime = metav1.Time{}
le.observedRecord.RenewTime = metav1.Time{}
if le.observedRecord.HolderIdentity != test.outHolder {
t.Errorf("expected holder:\n\t%+v\ngot:\n\t%+v", test.outHolder, le.observedRecord.HolderIdentity)
}
if len(test.reactors) != len(c.Actions()) {
t.Errorf("wrong number of api interactions")
}
if test.transitionLeader && le.observedRecord.LeaderTransitions != 1 {
t.Errorf("leader should have transitioned but did not")
}
if !test.transitionLeader && le.observedRecord.LeaderTransitions != 0 {
t.Errorf("leader should not have transitioned but did")
}
le.maybeReportTransition()
wg.Wait()
if reportedLeader != test.outHolder {
t.Errorf("reported leader was not the new leader. expected %q, got %q", test.outHolder, reportedLeader)
}
})
}
}
// Will test leader election using endpoints as the resource
func TestReleaseLeaseEndpoints(t *testing.T) {
testReleaseLease(t, "endpoints")
}
// Will test leader election using endpoints as the resource
func TestReleaseLeaseConfigMaps(t *testing.T) {
testReleaseLease(t, "configmaps")
}
// Will test leader election using endpoints as the resource
func TestReleaseLeaseLeases(t *testing.T) {
testReleaseLease(t, "leases")
}