mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Fix event broadcaster shutdown in multiple controllers
This commit is contained in:
parent
81261d4693
commit
11b679c66a
@ -84,10 +84,13 @@ var controllerKind = apps.SchemeGroupVersion.WithKind("DaemonSet")
|
|||||||
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
|
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
|
||||||
// in the system with actual running pods.
|
// in the system with actual running pods.
|
||||||
type DaemonSetsController struct {
|
type DaemonSetsController struct {
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
eventRecorder record.EventRecorder
|
|
||||||
podControl controller.PodControlInterface
|
eventBroadcaster record.EventBroadcaster
|
||||||
crControl controller.ControllerRevisionControlInterface
|
eventRecorder record.EventRecorder
|
||||||
|
|
||||||
|
podControl controller.PodControlInterface
|
||||||
|
crControl controller.ControllerRevisionControlInterface
|
||||||
|
|
||||||
// An dsc is temporarily suspended after creating/deleting these many replicas.
|
// An dsc is temporarily suspended after creating/deleting these many replicas.
|
||||||
// It resumes normal action after observing the watch events for them.
|
// It resumes normal action after observing the watch events for them.
|
||||||
@ -138,8 +141,6 @@ func NewDaemonSetsController(
|
|||||||
failedPodsBackoff *flowcontrol.Backoff,
|
failedPodsBackoff *flowcontrol.Backoff,
|
||||||
) (*DaemonSetsController, error) {
|
) (*DaemonSetsController, error) {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
|
||||||
|
|
||||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||||
@ -147,8 +148,9 @@ func NewDaemonSetsController(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
dsc := &DaemonSetsController{
|
dsc := &DaemonSetsController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
|
eventBroadcaster: eventBroadcaster,
|
||||||
|
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
|
||||||
podControl: controller.RealPodControl{
|
podControl: controller.RealPodControl{
|
||||||
KubeClient: kubeClient,
|
KubeClient: kubeClient,
|
||||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
|
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
|
||||||
@ -279,6 +281,11 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
|||||||
// Run begins watching and syncing daemon sets.
|
// Run begins watching and syncing daemon sets.
|
||||||
func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) {
|
func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
dsc.eventBroadcaster.StartStructuredLogging(0)
|
||||||
|
dsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dsc.kubeClient.CoreV1().Events("")})
|
||||||
|
defer dsc.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
defer dsc.queue.ShutDown()
|
defer dsc.queue.ShutDown()
|
||||||
|
|
||||||
klog.Infof("Starting daemon sets controller")
|
klog.Infof("Starting daemon sets controller")
|
||||||
|
@ -67,9 +67,11 @@ var controllerKind = apps.SchemeGroupVersion.WithKind("Deployment")
|
|||||||
// in the system with actual running replica sets and pods.
|
// in the system with actual running replica sets and pods.
|
||||||
type DeploymentController struct {
|
type DeploymentController struct {
|
||||||
// rsControl is used for adopting/releasing replica sets.
|
// rsControl is used for adopting/releasing replica sets.
|
||||||
rsControl controller.RSControlInterface
|
rsControl controller.RSControlInterface
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
eventRecorder record.EventRecorder
|
|
||||||
|
eventBroadcaster record.EventBroadcaster
|
||||||
|
eventRecorder record.EventRecorder
|
||||||
|
|
||||||
// To allow injection of syncDeployment for testing.
|
// To allow injection of syncDeployment for testing.
|
||||||
syncHandler func(ctx context.Context, dKey string) error
|
syncHandler func(ctx context.Context, dKey string) error
|
||||||
@ -100,8 +102,6 @@ type DeploymentController struct {
|
|||||||
// NewDeploymentController creates a new DeploymentController.
|
// NewDeploymentController creates a new DeploymentController.
|
||||||
func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
|
func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
|
||||||
|
|
||||||
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||||
@ -109,9 +109,10 @@ func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInfor
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
dc := &DeploymentController{
|
dc := &DeploymentController{
|
||||||
client: client,
|
client: client,
|
||||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "deployment-controller"}),
|
eventBroadcaster: eventBroadcaster,
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "deployment-controller"}),
|
||||||
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||||
}
|
}
|
||||||
dc.rsControl = controller.RealRSControl{
|
dc.rsControl = controller.RealRSControl{
|
||||||
KubeClient: client,
|
KubeClient: client,
|
||||||
@ -148,6 +149,12 @@ func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInfor
|
|||||||
// Run begins watching and syncing.
|
// Run begins watching and syncing.
|
||||||
func (dc *DeploymentController) Run(ctx context.Context, workers int) {
|
func (dc *DeploymentController) Run(ctx context.Context, workers int) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
dc.eventBroadcaster.StartStructuredLogging(0)
|
||||||
|
dc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.client.CoreV1().Events("")})
|
||||||
|
defer dc.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
defer dc.queue.ShutDown()
|
defer dc.queue.ShutDown()
|
||||||
|
|
||||||
klog.InfoS("Starting controller", "controller", "deployment")
|
klog.InfoS("Starting controller", "controller", "deployment")
|
||||||
|
@ -114,7 +114,8 @@ type Controller struct {
|
|||||||
// Orphan deleted pods that still have a Job tracking finalizer to be removed
|
// Orphan deleted pods that still have a Job tracking finalizer to be removed
|
||||||
orphanQueue workqueue.RateLimitingInterface
|
orphanQueue workqueue.RateLimitingInterface
|
||||||
|
|
||||||
recorder record.EventRecorder
|
broadcaster record.EventBroadcaster
|
||||||
|
recorder record.EventRecorder
|
||||||
|
|
||||||
podUpdateBatchPeriod time.Duration
|
podUpdateBatchPeriod time.Duration
|
||||||
}
|
}
|
||||||
@ -123,8 +124,6 @@ type Controller struct {
|
|||||||
// in sync with their corresponding Job objects.
|
// in sync with their corresponding Job objects.
|
||||||
func NewController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *Controller {
|
func NewController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *Controller {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
|
||||||
|
|
||||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||||
@ -140,6 +139,7 @@ func NewController(podInformer coreinformers.PodInformer, jobInformer batchinfor
|
|||||||
finalizerExpectations: newUIDTrackingExpectations(),
|
finalizerExpectations: newUIDTrackingExpectations(),
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(DefaultJobBackOff, MaxJobBackOff), "job"),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(DefaultJobBackOff, MaxJobBackOff), "job"),
|
||||||
orphanQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(DefaultJobBackOff, MaxJobBackOff), "job_orphan_pod"),
|
orphanQueue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(DefaultJobBackOff, MaxJobBackOff), "job_orphan_pod"),
|
||||||
|
broadcaster: eventBroadcaster,
|
||||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "job-controller"}),
|
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "job-controller"}),
|
||||||
}
|
}
|
||||||
if feature.DefaultFeatureGate.Enabled(features.JobReadyPods) {
|
if feature.DefaultFeatureGate.Enabled(features.JobReadyPods) {
|
||||||
@ -178,6 +178,12 @@ func NewController(podInformer coreinformers.PodInformer, jobInformer batchinfor
|
|||||||
// Run the main goroutine responsible for watching and syncing jobs.
|
// Run the main goroutine responsible for watching and syncing jobs.
|
||||||
func (jm *Controller) Run(ctx context.Context, workers int) {
|
func (jm *Controller) Run(ctx context.Context, workers int) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
jm.broadcaster.StartStructuredLogging(0)
|
||||||
|
jm.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: jm.kubeClient.CoreV1().Events("")})
|
||||||
|
defer jm.broadcaster.Shutdown()
|
||||||
|
|
||||||
defer jm.queue.ShutDown()
|
defer jm.queue.ShutDown()
|
||||||
defer jm.orphanQueue.ShutDown()
|
defer jm.orphanQueue.ShutDown()
|
||||||
|
|
||||||
|
@ -88,6 +88,8 @@ type ReplicaSetController struct {
|
|||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
podControl controller.PodControlInterface
|
podControl controller.PodControlInterface
|
||||||
|
|
||||||
|
eventBroadcaster record.EventBroadcaster
|
||||||
|
|
||||||
// A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
|
// A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
|
||||||
// It resumes normal action after observing the watch events for them.
|
// It resumes normal action after observing the watch events for them.
|
||||||
burstReplicas int
|
burstReplicas int
|
||||||
@ -117,8 +119,6 @@ type ReplicaSetController struct {
|
|||||||
// NewReplicaSetController configures a replica set controller with the specified event recorder
|
// NewReplicaSetController configures a replica set controller with the specified event recorder
|
||||||
func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
|
func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
|
||||||
if err := metrics.Register(legacyregistry.Register); err != nil {
|
if err := metrics.Register(legacyregistry.Register); err != nil {
|
||||||
klog.ErrorS(err, "unable to register metrics")
|
klog.ErrorS(err, "unable to register metrics")
|
||||||
}
|
}
|
||||||
@ -130,13 +130,14 @@ func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInf
|
|||||||
KubeClient: kubeClient,
|
KubeClient: kubeClient,
|
||||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replicaset-controller"}),
|
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replicaset-controller"}),
|
||||||
},
|
},
|
||||||
|
eventBroadcaster,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBaseController is the implementation of NewReplicaSetController with additional injected
|
// NewBaseController is the implementation of NewReplicaSetController with additional injected
|
||||||
// parameters so that it can also serve as the implementation of NewReplicationController.
|
// parameters so that it can also serve as the implementation of NewReplicationController.
|
||||||
func NewBaseController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int,
|
func NewBaseController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int,
|
||||||
gvk schema.GroupVersionKind, metricOwnerName, queueName string, podControl controller.PodControlInterface) *ReplicaSetController {
|
gvk schema.GroupVersionKind, metricOwnerName, queueName string, podControl controller.PodControlInterface, eventBroadcaster record.EventBroadcaster) *ReplicaSetController {
|
||||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage(metricOwnerName, kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
ratelimiter.RegisterMetricAndTrackRateLimiterUsage(metricOwnerName, kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
@ -145,6 +146,7 @@ func NewBaseController(rsInformer appsinformers.ReplicaSetInformer, podInformer
|
|||||||
GroupVersionKind: gvk,
|
GroupVersionKind: gvk,
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
podControl: podControl,
|
podControl: podControl,
|
||||||
|
eventBroadcaster: eventBroadcaster,
|
||||||
burstReplicas: burstReplicas,
|
burstReplicas: burstReplicas,
|
||||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), queueName),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), queueName),
|
||||||
@ -188,17 +190,15 @@ func NewBaseController(rsInformer appsinformers.ReplicaSetInformer, podInformer
|
|||||||
return rsc
|
return rsc
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetEventRecorder replaces the event recorder used by the ReplicaSetController
|
|
||||||
// with the given recorder. Only used for testing.
|
|
||||||
func (rsc *ReplicaSetController) SetEventRecorder(recorder record.EventRecorder) {
|
|
||||||
// TODO: Hack. We can't cleanly shutdown the event recorder, so benchmarks
|
|
||||||
// need to pass in a fake.
|
|
||||||
rsc.podControl = controller.RealPodControl{KubeClient: rsc.kubeClient, Recorder: recorder}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run begins watching and syncing.
|
// Run begins watching and syncing.
|
||||||
func (rsc *ReplicaSetController) Run(ctx context.Context, workers int) {
|
func (rsc *ReplicaSetController) Run(ctx context.Context, workers int) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
rsc.eventBroadcaster.StartStructuredLogging(0)
|
||||||
|
rsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: rsc.kubeClient.CoreV1().Events("")})
|
||||||
|
defer rsc.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
defer rsc.queue.ShutDown()
|
defer rsc.queue.ShutDown()
|
||||||
|
|
||||||
controllerName := strings.ToLower(rsc.Kind)
|
controllerName := strings.ToLower(rsc.Kind)
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||||
@ -50,8 +49,6 @@ type ReplicationManager struct {
|
|||||||
// NewReplicationManager configures a replication manager with the specified event recorder
|
// NewReplicationManager configures a replication manager with the specified event recorder
|
||||||
func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager {
|
func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
|
||||||
return &ReplicationManager{
|
return &ReplicationManager{
|
||||||
*replicaset.NewBaseController(informerAdapter{rcInformer}, podInformer, clientsetAdapter{kubeClient}, burstReplicas,
|
*replicaset.NewBaseController(informerAdapter{rcInformer}, podInformer, clientsetAdapter{kubeClient}, burstReplicas,
|
||||||
v1.SchemeGroupVersion.WithKind("ReplicationController"),
|
v1.SchemeGroupVersion.WithKind("ReplicationController"),
|
||||||
@ -61,6 +58,7 @@ func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer cor
|
|||||||
KubeClient: kubeClient,
|
KubeClient: kubeClient,
|
||||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replication-controller"}),
|
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replication-controller"}),
|
||||||
}},
|
}},
|
||||||
|
eventBroadcaster,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,6 +74,8 @@ type StatefulSetController struct {
|
|||||||
revListerSynced cache.InformerSynced
|
revListerSynced cache.InformerSynced
|
||||||
// StatefulSets that need to be synced.
|
// StatefulSets that need to be synced.
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
|
// eventBroadcaster is the core of event processing pipeline.
|
||||||
|
eventBroadcaster record.EventBroadcaster
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStatefulSetController creates a new statefulset controller.
|
// NewStatefulSetController creates a new statefulset controller.
|
||||||
@ -85,8 +87,6 @@ func NewStatefulSetController(
|
|||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
) *StatefulSetController {
|
) *StatefulSetController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"})
|
||||||
ssc := &StatefulSetController{
|
ssc := &StatefulSetController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
@ -101,10 +101,11 @@ func NewStatefulSetController(
|
|||||||
recorder,
|
recorder,
|
||||||
),
|
),
|
||||||
pvcListerSynced: pvcInformer.Informer().HasSynced,
|
pvcListerSynced: pvcInformer.Informer().HasSynced,
|
||||||
|
revListerSynced: revInformer.Informer().HasSynced,
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"),
|
||||||
podControl: controller.RealPodControl{KubeClient: kubeClient, Recorder: recorder},
|
podControl: controller.RealPodControl{KubeClient: kubeClient, Recorder: recorder},
|
||||||
|
|
||||||
revListerSynced: revInformer.Informer().HasSynced,
|
eventBroadcaster: eventBroadcaster,
|
||||||
}
|
}
|
||||||
|
|
||||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
@ -142,6 +143,12 @@ func NewStatefulSetController(
|
|||||||
// Run runs the statefulset controller.
|
// Run runs the statefulset controller.
|
||||||
func (ssc *StatefulSetController) Run(ctx context.Context, workers int) {
|
func (ssc *StatefulSetController) Run(ctx context.Context, workers int) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
ssc.eventBroadcaster.StartStructuredLogging(0)
|
||||||
|
ssc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ssc.kubeClient.CoreV1().Events("")})
|
||||||
|
defer ssc.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
defer ssc.queue.ShutDown()
|
defer ssc.queue.ShutDown()
|
||||||
|
|
||||||
klog.Infof("Starting stateful set controller")
|
klog.Infof("Starting stateful set controller")
|
||||||
|
@ -41,7 +41,6 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/record"
|
|
||||||
watchtools "k8s.io/client-go/tools/watch"
|
watchtools "k8s.io/client-go/tools/watch"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||||
@ -102,7 +101,6 @@ func TestQuota(t *testing.T) {
|
|||||||
clientset,
|
clientset,
|
||||||
replicationcontroller.BurstReplicas,
|
replicationcontroller.BurstReplicas,
|
||||||
)
|
)
|
||||||
rm.SetEventRecorder(&record.FakeRecorder{})
|
|
||||||
go rm.Run(ctx, 3)
|
go rm.Run(ctx, 3)
|
||||||
|
|
||||||
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
||||||
@ -333,7 +331,6 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
|
|||||||
clientset,
|
clientset,
|
||||||
replicationcontroller.BurstReplicas,
|
replicationcontroller.BurstReplicas,
|
||||||
)
|
)
|
||||||
rm.SetEventRecorder(&record.FakeRecorder{})
|
|
||||||
go rm.Run(ctx, 3)
|
go rm.Run(ctx, 3)
|
||||||
|
|
||||||
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
||||||
@ -462,7 +459,6 @@ func TestQuotaLimitService(t *testing.T) {
|
|||||||
clientset,
|
clientset,
|
||||||
replicationcontroller.BurstReplicas,
|
replicationcontroller.BurstReplicas,
|
||||||
)
|
)
|
||||||
rm.SetEventRecorder(&record.FakeRecorder{})
|
|
||||||
go rm.Run(ctx, 3)
|
go rm.Run(ctx, 3)
|
||||||
|
|
||||||
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
||||||
|
Loading…
Reference in New Issue
Block a user