chore(*): update election example (#82821)

* feat(*): simplify leader election example

* chore(*): fix with comment

* chore(*): add os.exit

* chore(*): fix typo

Kubernetes-commit: 1185012da65edf802f22aff24141c0466a2a3ac8
This commit is contained in:
Mengjiang Bao
2019-10-11 15:05:39 +08:00
committed by Kubernetes Publisher
parent 172b42569c
commit c51c673cf8
5 changed files with 49 additions and 52 deletions

View File

@@ -7,15 +7,16 @@ This example demonstrates how to use the leader election package.
Run the following three commands in separate terminals. Each terminal needs a unique `id`.
```bash
# first terminal
go run *.go -kubeconfig=/my/config -logtostderr=true -id=1
# first terminal
go run main.go -kubeconfig=/path/to/kubeconfig -logtostderr=true -lease-lock-name=example -lease-lock-namespace=default -id=1
# second terminal
go run *.go -kubeconfig=/my/config -logtostderr=true -id=2
# second terminal
go run main.go -kubeconfig=/path/to/kubeconfig -logtostderr=true -lease-lock-name=example -lease-lock-namespace=default -id=2
# third terminal
go run *.go -kubeconfig=/my/config -logtostderr=true -id=3
go run main.go -kubeconfig=/path/to/kubeconfig -logtostderr=true -lease-lock-name=example -lease-lock-namespace=default -id=3
```
> You can ignore the `-kubeconfig` flag if you are running these commands in the Kubernetes cluster.
Now kill the existing leader. You will see from the terminal outputs that one of the remaining two processes will be elected as the new leader.

View File

@@ -19,21 +19,18 @@ package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/google/uuid"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/transport"
"k8s.io/klog"
)
@@ -62,13 +59,16 @@ func main() {
var id string
flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
flag.StringVar(&id, "id", "", "the holder identity name")
flag.StringVar(&leaseLockName, "lease-lock-name", "example", "the lease lock resource name")
flag.StringVar(&leaseLockNamespace, "lease-lock-namespace", "default", "the lease lock resource namespace")
flag.StringVar(&id, "id", uuid.New().String(), "the holder identity name")
flag.StringVar(&leaseLockName, "lease-lock-name", "", "the lease lock resource name")
flag.StringVar(&leaseLockNamespace, "lease-lock-namespace", "", "the lease lock resource namespace")
flag.Parse()
if id == "" {
klog.Fatal("unable to get id (missing id flag).")
if leaseLockName == "" {
klog.Fatal("unable to get lease lock resource name (missing lease-lock-name flag).")
}
if leaseLockNamespace == "" {
klog.Fatal("unable to get lease lock resource namespace (missing lease-lock-namespace flag).")
}
// leader election uses the Kubernetes API by writing to a
@@ -82,6 +82,29 @@ func main() {
}
client := clientset.NewForConfigOrDie(config)
run := func(ctx context.Context) {
// complete your controller loop here
klog.Info("Controller loop...")
select {}
}
// use a Go context so we can tell the leaderelection code when we
// want to step down
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// listen for interrupts or the Linux SIGTERM signal and cancel
// our context, which the leader election code will observe and
// step down
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
go func() {
<-ch
klog.Info("Received termination, signaling shutdown")
cancel()
}()
// we use the Lease lock type since edits to Leases are less common
// and fewer objects in the cluster watch "all Leases".
lock := &resourcelock.LeaseLock{
@@ -95,25 +118,6 @@ func main() {
},
}
// use a Go context so we can tell the leaderelection code when we
// want to step down
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// use a client that will stop allowing new requests once the context ends
config.Wrap(transport.ContextCanceller(ctx, fmt.Errorf("the leader is shutting down")))
// listen for interrupts or the Linux SIGTERM signal and cancel
// our context, which the leader election code will observe and
// step down
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
go func() {
<-ch
log.Printf("Received termination, signaling shutdown")
cancel()
}()
// start the leader election code loop
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
@@ -131,12 +135,12 @@ func main() {
OnStartedLeading: func(ctx context.Context) {
// we're notified when we start - this is where you would
// usually put your code
klog.Infof("%s: leading", id)
run(ctx)
},
OnStoppedLeading: func() {
// we can do cleanup here, or after the RunOrDie method
// returns
klog.Infof("%s: lost", id)
// we can do cleanup here
klog.Infof("leader lost: %s", id)
os.Exit(0)
},
OnNewLeader: func(identity string) {
// we're notified when new leader elected
@@ -144,18 +148,8 @@ func main() {
// I just got the lock
return
}
klog.Infof("new leader elected: %v", identity)
klog.Infof("new leader elected: %s", identity)
},
},
})
// because the context is closed, the client should report errors
_, err = client.CoordinationV1().Leases(leaseLockNamespace).Get(leaseLockName, metav1.GetOptions{})
if err == nil || !strings.Contains(err.Error(), "the leader is shutting down") {
log.Fatalf("%s: expected to get an error when trying to make a client call: %v", id, err)
}
// we no longer hold the lease, so perform any cleanup and then
// exit
log.Printf("%s: done", id)
}