mirror of
https://github.com/kubernetes/client-go.git
synced 2025-06-20 04:13:19 +00:00
171 lines
5.1 KiB
Go
171 lines
5.1 KiB
Go
/*
|
|
Copyright 2018 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package main
|
|
|
|
import (
|
|
"context"
|
|
"flag"
|
|
"os"
|
|
"os/signal"
|
|
"sync/atomic"
|
|
"syscall"
|
|
"time"
|
|
|
|
"github.com/google/uuid"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
"k8s.io/client-go/rest"
|
|
"k8s.io/client-go/tools/clientcmd"
|
|
"k8s.io/client-go/tools/leaderelection"
|
|
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
|
"k8s.io/klog/v2"
|
|
)
|
|
|
|
func buildConfig(kubeconfig string) (*rest.Config, error) {
|
|
if kubeconfig != "" {
|
|
cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return cfg, nil
|
|
}
|
|
|
|
cfg, err := rest.InClusterConfig()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return cfg, nil
|
|
}
|
|
|
|
func main() {
|
|
klog.InitFlags(nil)
|
|
|
|
var kubeconfig string
|
|
var leaseLockName string
|
|
var leaseLockNamespace string
|
|
var id string
|
|
var startedLeading atomic.Bool
|
|
|
|
flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
|
|
flag.StringVar(&id, "id", uuid.New().String(), "the holder identity name")
|
|
flag.StringVar(&leaseLockName, "lease-lock-name", "", "the lease lock resource name")
|
|
flag.StringVar(&leaseLockNamespace, "lease-lock-namespace", "", "the lease lock resource namespace")
|
|
flag.Parse()
|
|
|
|
if leaseLockName == "" {
|
|
klog.Fatal("unable to get lease lock resource name (missing lease-lock-name flag).")
|
|
}
|
|
if leaseLockNamespace == "" {
|
|
klog.Fatal("unable to get lease lock resource namespace (missing lease-lock-namespace flag).")
|
|
}
|
|
|
|
// leader election uses the Kubernetes API by writing to a
|
|
// lock object, which can be a LeaseLock object (preferred),
|
|
// a ConfigMap, or an Endpoints (deprecated) object.
|
|
// Conflicting writes are detected and each client handles those actions
|
|
// independently.
|
|
config, err := buildConfig(kubeconfig)
|
|
if err != nil {
|
|
klog.Fatal(err)
|
|
}
|
|
client := clientset.NewForConfigOrDie(config)
|
|
|
|
run := func(ctx context.Context) {
|
|
// complete your controller loop here
|
|
klog.Info("Controller loop...")
|
|
|
|
select {}
|
|
}
|
|
|
|
// use a Go context so we can tell the leaderelection code when we
|
|
// want to step down
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
// listen for interrupts or the Linux SIGTERM signal and cancel
|
|
// our context, which the leader election code will observe and
|
|
// step down
|
|
ch := make(chan os.Signal, 1)
|
|
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
|
|
go func() {
|
|
<-ch
|
|
klog.Info("Received termination, signaling shutdown")
|
|
cancel()
|
|
}()
|
|
|
|
// we use the Lease lock type since edits to Leases are less common
|
|
// and fewer objects in the cluster watch "all Leases".
|
|
lock := &resourcelock.LeaseLock{
|
|
LeaseMeta: metav1.ObjectMeta{
|
|
Name: leaseLockName,
|
|
Namespace: leaseLockNamespace,
|
|
},
|
|
Client: client.CoordinationV1(),
|
|
LockConfig: resourcelock.ResourceLockConfig{
|
|
Identity: id,
|
|
},
|
|
}
|
|
|
|
// start the leader election code loop
|
|
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
|
Lock: lock,
|
|
// IMPORTANT: you MUST ensure that any code you have that
|
|
// is protected by the lease must terminate **before**
|
|
// you call cancel. Otherwise, you could have a background
|
|
// loop still running and another process could
|
|
// get elected before your background loop finished, violating
|
|
// the stated goal of the lease.
|
|
ReleaseOnCancel: true,
|
|
LeaseDuration: 60 * time.Second,
|
|
RenewDeadline: 15 * time.Second,
|
|
RetryPeriod: 5 * time.Second,
|
|
Callbacks: leaderelection.LeaderCallbacks{
|
|
OnStartedLeading: func(ctx context.Context) {
|
|
// we're notified when we start - this is where you would
|
|
// usually put your code
|
|
startedLeading.Store(true)
|
|
run(ctx)
|
|
},
|
|
OnStoppedLeading: func() {
|
|
// we can do cleanup here, but note that this callback is always called
|
|
// when the LeaderElector exits, even if it did not start leading.
|
|
// Therefore, we should check if we actually started leading before
|
|
// performing any cleanup operations to avoid unexpected behavior.
|
|
klog.Infof("leader lost: %s", id)
|
|
|
|
// Example check to ensure we only perform cleanup if we actually started leading
|
|
if startedLeading.Load() {
|
|
// Perform cleanup operations here
|
|
// For example, releasing resources, closing connections, etc.
|
|
klog.Info("Performing cleanup operations...")
|
|
} else {
|
|
klog.Info("No cleanup needed as we never started leading.")
|
|
}
|
|
os.Exit(0)
|
|
},
|
|
OnNewLeader: func(identity string) {
|
|
// we're notified when new leader elected
|
|
if identity == id {
|
|
// I just got the lock
|
|
return
|
|
}
|
|
klog.Infof("new leader elected: %s", identity)
|
|
},
|
|
},
|
|
})
|
|
}
|