Merge pull request #75240 from kairen/update-client-go-example

client-go: update leader election example
This commit is contained in:
Kubernetes Prow Robot 2019-03-20 05:24:23 -07:00 committed by GitHub
commit b3be84dcc5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 92 additions and 41 deletions

View File

@ -15,6 +15,7 @@ filegroup(
"//staging/src/k8s.io/client-go/examples/create-update-delete-deployment:all-srcs",
"//staging/src/k8s.io/client-go/examples/fake-client:all-srcs",
"//staging/src/k8s.io/client-go/examples/in-cluster-client-configuration:all-srcs",
"//staging/src/k8s.io/client-go/examples/leader-election:all-srcs",
"//staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration:all-srcs",
"//staging/src/k8s.io/client-go/examples/workqueue:all-srcs",
"//staging/src/k8s.io/client-go/informers:all-srcs",

View File

@ -42,6 +42,7 @@ import _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
Register a custom resource type with the API, create/update/query this custom
type, and write a controller that drives the cluster state based on the changes to
the custom resources.
- [**Leader election**](./leader-election): Demonstrates the use of the leader election package, which can be used to implement HA controllers.
[informer]: https://godoc.org/k8s.io/client-go/tools/cache#NewInformer

View File

@ -1,11 +1,21 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "leader-election",
embed = [":go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/tools/leaderelection/example",
importpath = "k8s.io/client-go/tools/leaderelection/example",
visibility = ["//visibility:private"],
importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/examples/leader-election",
importpath = "k8s.io/client-go/examples/leader-election",
deps = [
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
@ -18,12 +28,6 @@ go_library(
],
)
go_binary(
name = "example",
embed = [":go_default_library"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
@ -35,5 +39,4 @@ filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,21 @@
# Leader Election Example
This example demonstrates how to use the leader election package.
## Running
Run the following three commands in separate terminals. Each terminal needs a unique `id`.
```bash
# first terminal
$ go run *.go -kubeconfig=/my/config -logtostderr=true -id=1
# second terminal
$ go run *.go -kubeconfig=/my/config -logtostderr=true -id=2
# third terminal
$ go run *.go -kubeconfig=/my/config -logtostderr=true -id=3
```
> You can ignore the `-kubeconfig` flag if you are running these commands in the Kubernetes cluster.
Now kill the existing leader. You will see from the terminal outputs that one of the remaining two processes will be elected as the new leader.

View File

@ -28,7 +28,7 @@ import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
@ -37,40 +37,59 @@ import (
"k8s.io/klog"
)
// main demonstrates a leader elected process that will step down if interrupted.
func buildConfig(kubeconfig string) (*rest.Config, error) {
if kubeconfig != "" {
cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
return cfg, nil
}
cfg, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
return cfg, nil
}
func main() {
klog.InitFlags(nil)
var kubeconfig string
var leaseLockName string
var leaseLockNamespace string
var id string
flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
flag.StringVar(&id, "id", "", "the holder identity name")
flag.StringVar(&leaseLockName, "lease-lock-name", "example", "the lease lock resource name")
flag.StringVar(&leaseLockNamespace, "lease-lock-namespace", "default", "the lease lock resource namespace")
flag.Parse()
args := flag.Args()
if len(args) != 3 {
log.Fatalf("requires three arguments: ID NAMESPACE CONFIG_MAP_NAME (%d)", len(args))
if id == "" {
klog.Fatal("unable to get id (missing id flag).")
}
// leader election uses the Kubernetes API by writing to a ConfigMap or Endpoints
// object. Conflicting writes are detected and each client handles those actions
// leader election uses the Kubernetes API by writing to a
// lock object, which can be a LeaseLock object (preferred),
// a ConfigMap, or an Endpoints (deprecated) object.
// Conflicting writes are detected and each client handles those actions
// independently.
var config *rest.Config
var err error
if kubeconfig := os.Getenv("KUBECONFIG"); len(kubeconfig) > 0 {
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
} else {
config, err = rest.InClusterConfig()
}
config, err := buildConfig(kubeconfig)
if err != nil {
log.Fatalf("failed to create client: %v", err)
klog.Fatal(err)
}
client := clientset.NewForConfigOrDie(config)
// we use the ConfigMap lock type since edits to ConfigMaps are less common
// and fewer objects in the cluster watch "all ConfigMaps" (unlike the older
// Endpoints lock type, where quite a few system agents like the kube-proxy
// and ingress controllers must watch endpoints).
id := args[0]
lock := &resourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: args[1],
Name: args[2],
// we use the Lease lock type since edits to Leases are less common
// and fewer objects in the cluster watch "all Leases".
lock := &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: leaseLockName,
Namespace: leaseLockNamespace,
},
Client: kubernetes.NewForConfigOrDie(config).CoreV1(),
Client: client.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
},
@ -83,7 +102,6 @@ func main() {
// use a client that will stop allowing new requests once the context ends
config.Wrap(transport.ContextCanceller(ctx, fmt.Errorf("the leader is shutting down")))
exampleClient := kubernetes.NewForConfigOrDie(config).CoreV1()
// listen for interrupts or the Linux SIGTERM signal and cancel
// our context, which the leader election code will observe and
@ -113,18 +131,26 @@ func main() {
OnStartedLeading: func(ctx context.Context) {
// we're notified when we start - this is where you would
// usually put your code
log.Printf("%s: leading", id)
klog.Infof("%s: leading", id)
},
OnStoppedLeading: func() {
// we can do cleanup here, or after the RunOrDie method
// returns
log.Printf("%s: lost", id)
klog.Infof("%s: lost", id)
},
OnNewLeader: func(identity string) {
// we're notified when new leader elected
if identity == id {
// I just got the lock
return
}
klog.Infof("new leader elected: %v", identity)
},
},
})
// because the context is closed, the client should report errors
_, err = exampleClient.ConfigMaps(args[1]).Get(args[2], metav1.GetOptions{})
_, err = client.CoordinationV1().Leases(leaseLockNamespace).Get(leaseLockName, metav1.GetOptions{})
if err == nil || !strings.Contains(err.Error(), "the leader is shutting down") {
log.Fatalf("%s: expected to get an error when trying to make a client call: %v", id, err)
}

View File

@ -55,7 +55,6 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/tools/leaderelection/example:all-srcs",
"//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:all-srcs",
],
tags = ["automanaged"],