mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Clean shutdown of kcm, ccm and scheduler
This commit is contained in:
parent
55130ae2ab
commit
fe3616cafb
@ -43,8 +43,8 @@ type Config struct {
|
|||||||
// the rest config for the master
|
// the rest config for the master
|
||||||
Kubeconfig *restclient.Config
|
Kubeconfig *restclient.Config
|
||||||
|
|
||||||
// the event sink
|
EventBroadcaster record.EventBroadcaster
|
||||||
EventRecorder record.EventRecorder
|
EventRecorder record.EventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
type completedConfig struct {
|
type completedConfig struct {
|
||||||
|
@ -41,6 +41,7 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/metadata"
|
"k8s.io/client-go/metadata"
|
||||||
"k8s.io/client-go/metadata/metadatainformer"
|
"k8s.io/client-go/metadata/metadatainformer"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
@ -174,13 +175,18 @@ func ResyncPeriod(c *config.CompletedConfig) func() time.Duration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run runs the KubeControllerManagerOptions. This should never exit.
|
// Run runs the KubeControllerManagerOptions.
|
||||||
func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error {
|
func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error {
|
||||||
// To help debugging, immediately log version
|
// To help debugging, immediately log version
|
||||||
klog.Infof("Version: %+v", version.Get())
|
klog.Infof("Version: %+v", version.Get())
|
||||||
|
|
||||||
klog.InfoS("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK"))
|
klog.InfoS("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK"))
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
c.EventBroadcaster.StartStructuredLogging(0)
|
||||||
|
c.EventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.Client.CoreV1().Events("")})
|
||||||
|
defer c.EventBroadcaster.Shutdown()
|
||||||
|
|
||||||
if cfgz, err := configz.New(ConfigzName); err == nil {
|
if cfgz, err := configz.New(ConfigzName); err == nil {
|
||||||
cfgz.Set(c.ComponentConfig)
|
cfgz.Set(c.ComponentConfig)
|
||||||
} else {
|
} else {
|
||||||
@ -213,7 +219,6 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error {
|
|||||||
saTokenControllerInitFunc := serviceAccountTokenControllerStarter{rootClientBuilder: rootClientBuilder}.startServiceAccountTokenController
|
saTokenControllerInitFunc := serviceAccountTokenControllerStarter{rootClientBuilder: rootClientBuilder}.startServiceAccountTokenController
|
||||||
|
|
||||||
run := func(ctx context.Context, startSATokenController InitFunc, initializersFunc ControllerInitializersFunc) {
|
run := func(ctx context.Context, startSATokenController InitFunc, initializersFunc ControllerInitializersFunc) {
|
||||||
|
|
||||||
controllerContext, err := CreateControllerContext(c, rootClientBuilder, clientBuilder, ctx.Done())
|
controllerContext, err := CreateControllerContext(c, rootClientBuilder, clientBuilder, ctx.Done())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("error building controller context: %v", err)
|
klog.Fatalf("error building controller context: %v", err)
|
||||||
@ -227,13 +232,14 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error {
|
|||||||
controllerContext.ObjectOrMetadataInformerFactory.Start(stopCh)
|
controllerContext.ObjectOrMetadataInformerFactory.Start(stopCh)
|
||||||
close(controllerContext.InformersStarted)
|
close(controllerContext.InformersStarted)
|
||||||
|
|
||||||
select {}
|
<-ctx.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
// No leader election, run directly
|
// No leader election, run directly
|
||||||
if !c.ComponentConfig.Generic.LeaderElection.LeaderElect {
|
if !c.ComponentConfig.Generic.LeaderElection.LeaderElect {
|
||||||
run(context.TODO(), saTokenControllerInitFunc, NewControllerInitializers)
|
ctx, _ := wait.ContextForChannel(stopCh)
|
||||||
panic("unreachable")
|
run(ctx, saTokenControllerInitFunc, NewControllerInitializers)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := os.Hostname()
|
id, err := os.Hostname()
|
||||||
@ -311,7 +317,8 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
select {}
|
<-stopCh
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ControllerContext defines the context object for controller
|
// ControllerContext defines the context object for controller
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
clientgokubescheme "k8s.io/client-go/kubernetes/scheme"
|
clientgokubescheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
@ -430,12 +429,14 @@ func (s KubeControllerManagerOptions) Config(allControllers []string, disabledBy
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
eventRecorder := createRecorder(client, KubeControllerManagerUserAgent)
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
eventRecorder := eventBroadcaster.NewRecorder(clientgokubescheme.Scheme, v1.EventSource{Component: KubeControllerManagerUserAgent})
|
||||||
|
|
||||||
c := &kubecontrollerconfig.Config{
|
c := &kubecontrollerconfig.Config{
|
||||||
Client: client,
|
Client: client,
|
||||||
Kubeconfig: kubeconfig,
|
Kubeconfig: kubeconfig,
|
||||||
EventRecorder: eventRecorder,
|
EventBroadcaster: eventBroadcaster,
|
||||||
|
EventRecorder: eventRecorder,
|
||||||
}
|
}
|
||||||
if err := s.ApplyTo(c); err != nil {
|
if err := s.ApplyTo(c); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -444,10 +445,3 @@ func (s KubeControllerManagerOptions) Config(allControllers []string, disabledBy
|
|||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createRecorder(kubeClient clientset.Interface, userAgent string) record.EventRecorder {
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
|
||||||
return eventBroadcaster.NewRecorder(clientgokubescheme.Scheme, v1.EventSource{Component: userAgent})
|
|
||||||
}
|
|
||||||
|
@ -155,8 +155,9 @@ func Run(ctx context.Context, cc *schedulerserverconfig.CompletedConfig, sched *
|
|||||||
return fmt.Errorf("unable to register configz: %s", err)
|
return fmt.Errorf("unable to register configz: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare the event broadcaster.
|
// Start events processing pipeline.
|
||||||
cc.EventBroadcaster.StartRecordingToSink(ctx.Done())
|
cc.EventBroadcaster.StartRecordingToSink(ctx.Done())
|
||||||
|
defer cc.EventBroadcaster.Shutdown()
|
||||||
|
|
||||||
// Setup healthz checks.
|
// Setup healthz checks.
|
||||||
var checks []healthz.HealthChecker
|
var checks []healthz.HealthChecker
|
||||||
|
@ -32,6 +32,8 @@ import (
|
|||||||
"k8s.io/kubernetes/cmd/kube-scheduler/app"
|
"k8s.io/kubernetes/cmd/kube-scheduler/app"
|
||||||
kubeschedulerconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
kubeschedulerconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||||
"k8s.io/kubernetes/cmd/kube-scheduler/app/options"
|
"k8s.io/kubernetes/cmd/kube-scheduler/app/options"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TearDownFunc is to be called to tear down a test server.
|
// TearDownFunc is to be called to tear down a test server.
|
||||||
@ -61,8 +63,19 @@ type Logger interface {
|
|||||||
// enough time to remove temporary files.
|
// enough time to remove temporary files.
|
||||||
func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) {
|
func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
var errCh chan error
|
||||||
tearDown := func() {
|
tearDown := func() {
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
|
// If the scheduler was started, let's wait for it to
|
||||||
|
// shutdown clearly.
|
||||||
|
if errCh != nil {
|
||||||
|
err, ok := <-errCh
|
||||||
|
if ok && err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to shutdown test server clearly")
|
||||||
|
}
|
||||||
|
}
|
||||||
if len(result.TmpDir) != 0 {
|
if len(result.TmpDir) != 0 {
|
||||||
os.RemoveAll(result.TmpDir)
|
os.RemoveAll(result.TmpDir)
|
||||||
}
|
}
|
||||||
@ -103,8 +116,9 @@ func StartTestServer(t Logger, customFlags []string) (result TestServer, err err
|
|||||||
return result, fmt.Errorf("failed to create config from options: %v", err)
|
return result, fmt.Errorf("failed to create config from options: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
errCh := make(chan error)
|
errCh = make(chan error)
|
||||||
go func(ctx context.Context) {
|
go func(ctx context.Context) {
|
||||||
|
defer close(errCh)
|
||||||
if err := app.Run(ctx, cc, sched); err != nil {
|
if err := app.Run(ctx, cc, sched); err != nil {
|
||||||
errCh <- err
|
errCh <- err
|
||||||
}
|
}
|
||||||
|
@ -335,7 +335,16 @@ func New(client clientset.Interface,
|
|||||||
// Run begins watching and scheduling. It starts scheduling and blocked until the context is done.
|
// Run begins watching and scheduling. It starts scheduling and blocked until the context is done.
|
||||||
func (sched *Scheduler) Run(ctx context.Context) {
|
func (sched *Scheduler) Run(ctx context.Context) {
|
||||||
sched.SchedulingQueue.Run()
|
sched.SchedulingQueue.Run()
|
||||||
wait.UntilWithContext(ctx, sched.scheduleOne, 0)
|
|
||||||
|
// We need to start scheduleOne loop in a dedicated goroutine,
|
||||||
|
// because scheduleOne function hangs on getting the next item
|
||||||
|
// from the SchedulingQueue.
|
||||||
|
// If there are no new pods to schedule, it will be hanging there
|
||||||
|
// and if done in this goroutine it will be blocking closing
|
||||||
|
// SchedulingQueue, in effect causing a deadlock on shutdown.
|
||||||
|
go wait.UntilWithContext(ctx, sched.scheduleOne, 0)
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
sched.SchedulingQueue.Close()
|
sched.SchedulingQueue.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,7 +45,10 @@ type Config struct {
|
|||||||
// the rest config for the master
|
// the rest config for the master
|
||||||
Kubeconfig *restclient.Config
|
Kubeconfig *restclient.Config
|
||||||
|
|
||||||
// the event sink
|
// EventBroadcaster is broadcaster events to all sinks.
|
||||||
|
EventBroadcaster record.EventBroadcaster
|
||||||
|
|
||||||
|
// EventRecord is a sink for events.
|
||||||
EventRecorder record.EventRecorder
|
EventRecorder record.EventRecorder
|
||||||
|
|
||||||
// ClientBuilder will provide a client for this controller to use
|
// ClientBuilder will provide a client for this controller to use
|
||||||
|
@ -34,6 +34,7 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/server/healthz"
|
"k8s.io/apiserver/pkg/server/healthz"
|
||||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/metadata"
|
"k8s.io/client-go/metadata"
|
||||||
"k8s.io/client-go/metadata/metadatainformer"
|
"k8s.io/client-go/metadata/metadatainformer"
|
||||||
"k8s.io/client-go/restmapper"
|
"k8s.io/client-go/restmapper"
|
||||||
@ -142,6 +143,11 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface
|
|||||||
// To help debugging, immediately log version
|
// To help debugging, immediately log version
|
||||||
klog.Infof("Version: %+v", version.Get())
|
klog.Infof("Version: %+v", version.Get())
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
c.EventBroadcaster.StartStructuredLogging(0)
|
||||||
|
c.EventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.Client.CoreV1().Events("")})
|
||||||
|
defer c.EventBroadcaster.Shutdown()
|
||||||
|
|
||||||
// setup /configz endpoint
|
// setup /configz endpoint
|
||||||
if cz, err := configz.New(ConfigzName); err == nil {
|
if cz, err := configz.New(ConfigzName); err == nil {
|
||||||
cz.Set(c.ComponentConfig)
|
cz.Set(c.ComponentConfig)
|
||||||
@ -182,8 +188,10 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !c.ComponentConfig.Generic.LeaderElection.LeaderElect {
|
if !c.ComponentConfig.Generic.LeaderElection.LeaderElect {
|
||||||
run(context.TODO(), controllerInitializers)
|
ctx, _ := wait.ContextForChannel(stopCh)
|
||||||
panic("unreachable")
|
run(ctx, controllerInitializers)
|
||||||
|
<-stopCh
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Identity used to distinguish between multiple cloud controller manager instances
|
// Identity used to distinguish between multiple cloud controller manager instances
|
||||||
@ -251,7 +259,8 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
select {}
|
<-stopCh
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// startControllers starts the cloud specific controller loops.
|
// startControllers starts the cloud specific controller loops.
|
||||||
@ -304,7 +313,8 @@ func startControllers(ctx context.Context, cloud cloudprovider.Interface, contro
|
|||||||
c.SharedInformers.Start(stopCh)
|
c.SharedInformers.Start(stopCh)
|
||||||
controllerContext.InformerFactory.Start(controllerContext.Stop)
|
controllerContext.InformerFactory.Start(controllerContext.Stop)
|
||||||
|
|
||||||
select {}
|
<-stopCh
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitCloudFunc is used to initialize cloud
|
// InitCloudFunc is used to initialize cloud
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
@ -175,7 +174,8 @@ func (o *CloudControllerManagerOptions) ApplyTo(c *config.Config, userAgent stri
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.EventRecorder = createRecorder(c.Client, userAgent)
|
c.EventBroadcaster = record.NewBroadcaster()
|
||||||
|
c.EventRecorder = c.EventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: userAgent})
|
||||||
|
|
||||||
rootClientBuilder := clientbuilder.SimpleControllerClientBuilder{
|
rootClientBuilder := clientbuilder.SimpleControllerClientBuilder{
|
||||||
ClientConfig: c.Kubeconfig,
|
ClientConfig: c.Kubeconfig,
|
||||||
@ -241,10 +241,3 @@ func (o *CloudControllerManagerOptions) Config(allControllers, disabledByDefault
|
|||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createRecorder(kubeClient clientset.Interface, userAgent string) record.EventRecorder {
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
|
||||||
return eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: userAgent})
|
|
||||||
}
|
|
||||||
|
@ -1444,7 +1444,10 @@ func initTestPreferNominatedNode(t *testing.T, nsPrefix string, opts ...schedule
|
|||||||
f := testCtx.Scheduler.NextPod
|
f := testCtx.Scheduler.NextPod
|
||||||
testCtx.Scheduler.NextPod = func() (podInfo *framework.QueuedPodInfo) {
|
testCtx.Scheduler.NextPod = func() (podInfo *framework.QueuedPodInfo) {
|
||||||
podInfo = f()
|
podInfo = f()
|
||||||
podInfo.Pod.Status.NominatedNodeName = "node-1"
|
// Scheduler.Next() may return nil when scheduler is shutting down.
|
||||||
|
if podInfo != nil {
|
||||||
|
podInfo.Pod.Status.NominatedNodeName = "node-1"
|
||||||
|
}
|
||||||
return podInfo
|
return podInfo
|
||||||
}
|
}
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
Loading…
Reference in New Issue
Block a user