mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
move scheduler to use v1beta1.events
Signed-off-by: Yassine TIJANI <ytijani@vmware.com>
This commit is contained in:
parent
2659b3755a
commit
08522f8e5a
@ -12,7 +12,9 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -22,7 +22,9 @@ import (
|
|||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
"k8s.io/client-go/kubernetes/typed/events/v1beta1"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/client-go/tools/leaderelection"
|
"k8s.io/client-go/tools/leaderelection"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
@ -45,9 +47,14 @@ type Config struct {
|
|||||||
Client clientset.Interface
|
Client clientset.Interface
|
||||||
InformerFactory informers.SharedInformerFactory
|
InformerFactory informers.SharedInformerFactory
|
||||||
PodInformer coreinformers.PodInformer
|
PodInformer coreinformers.PodInformer
|
||||||
EventClient v1core.EventsGetter
|
EventClient v1beta1.EventsGetter
|
||||||
Recorder record.EventRecorder
|
|
||||||
Broadcaster record.EventBroadcaster
|
// TODO: Remove the following after fully migrating to the new events api.
|
||||||
|
CoreEventClient v1core.EventsGetter
|
||||||
|
LeaderElectionBroadcaster record.EventBroadcaster
|
||||||
|
|
||||||
|
Recorder events.EventRecorder
|
||||||
|
Broadcaster events.EventBroadcaster
|
||||||
|
|
||||||
// LeaderElection is optional.
|
// LeaderElection is optional.
|
||||||
LeaderElection *leaderelection.LeaderElectionConfig
|
LeaderElection *leaderelection.LeaderElectionConfig
|
||||||
|
@ -29,10 +29,10 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||||
|
@ -31,10 +31,10 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||||
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/client-go/tools/leaderelection"
|
"k8s.io/client-go/tools/leaderelection"
|
||||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
@ -237,13 +237,15 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Prepare event clients.
|
// Prepare event clients.
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: eventClient.EventsV1beta1().Events("")})
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: c.ComponentConfig.SchedulerName})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, c.ComponentConfig.SchedulerName)
|
||||||
|
leaderElectionBroadcaster := record.NewBroadcaster()
|
||||||
|
leaderElectionRecorder := leaderElectionBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: c.ComponentConfig.SchedulerName})
|
||||||
|
|
||||||
// Set up leader election if enabled.
|
// Set up leader election if enabled.
|
||||||
var leaderElectionConfig *leaderelection.LeaderElectionConfig
|
var leaderElectionConfig *leaderelection.LeaderElectionConfig
|
||||||
if c.ComponentConfig.LeaderElection.LeaderElect {
|
if c.ComponentConfig.LeaderElection.LeaderElect {
|
||||||
leaderElectionConfig, err = makeLeaderElectionConfig(c.ComponentConfig.LeaderElection, leaderElectionClient, recorder)
|
leaderElectionConfig, err = makeLeaderElectionConfig(c.ComponentConfig.LeaderElection, leaderElectionClient, leaderElectionRecorder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -252,9 +254,11 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) {
|
|||||||
c.Client = client
|
c.Client = client
|
||||||
c.InformerFactory = informers.NewSharedInformerFactory(client, 0)
|
c.InformerFactory = informers.NewSharedInformerFactory(client, 0)
|
||||||
c.PodInformer = factory.NewPodInformer(client, 0)
|
c.PodInformer = factory.NewPodInformer(client, 0)
|
||||||
c.EventClient = eventClient
|
c.EventClient = eventClient.EventsV1beta1()
|
||||||
|
c.CoreEventClient = eventClient.CoreV1()
|
||||||
c.Recorder = recorder
|
c.Recorder = recorder
|
||||||
c.Broadcaster = eventBroadcaster
|
c.Broadcaster = eventBroadcaster
|
||||||
|
c.LeaderElectionBroadcaster = leaderElectionBroadcaster
|
||||||
c.LeaderElection = leaderElectionConfig
|
c.LeaderElection = leaderElectionConfig
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
@ -295,7 +299,7 @@ func makeLeaderElectionConfig(config kubeschedulerconfig.KubeSchedulerLeaderElec
|
|||||||
|
|
||||||
// createClients creates a kube client and an event client from the given config and masterOverride.
|
// createClients creates a kube client and an event client from the given config and masterOverride.
|
||||||
// TODO remove masterOverride when CLI flags are removed.
|
// TODO remove masterOverride when CLI flags are removed.
|
||||||
func createClients(config componentbaseconfig.ClientConnectionConfiguration, masterOverride string, timeout time.Duration) (clientset.Interface, clientset.Interface, v1core.EventsGetter, error) {
|
func createClients(config componentbaseconfig.ClientConnectionConfiguration, masterOverride string, timeout time.Duration) (clientset.Interface, clientset.Interface, clientset.Interface, error) {
|
||||||
if len(config.Kubeconfig) == 0 && len(masterOverride) == 0 {
|
if len(config.Kubeconfig) == 0 && len(masterOverride) == 0 {
|
||||||
klog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
|
klog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
|
||||||
}
|
}
|
||||||
@ -333,5 +337,5 @@ func createClients(config componentbaseconfig.ClientConnectionConfiguration, mas
|
|||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return client, leaderElectionClient, eventClient.CoreV1(), nil
|
return client, leaderElectionClient, eventClient, nil
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/server/mux"
|
"k8s.io/apiserver/pkg/server/mux"
|
||||||
"k8s.io/apiserver/pkg/server/routes"
|
"k8s.io/apiserver/pkg/server/routes"
|
||||||
"k8s.io/apiserver/pkg/util/term"
|
"k8s.io/apiserver/pkg/util/term"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/tools/leaderelection"
|
"k8s.io/client-go/tools/leaderelection"
|
||||||
cliflag "k8s.io/component-base/cli/flag"
|
cliflag "k8s.io/component-base/cli/flag"
|
||||||
"k8s.io/component-base/cli/globalflag"
|
"k8s.io/component-base/cli/globalflag"
|
||||||
@ -190,10 +190,11 @@ func Run(cc schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error
|
|||||||
|
|
||||||
// Prepare the event broadcaster.
|
// Prepare the event broadcaster.
|
||||||
if cc.Broadcaster != nil && cc.EventClient != nil {
|
if cc.Broadcaster != nil && cc.EventClient != nil {
|
||||||
cc.Broadcaster.StartLogging(klog.V(6).Infof)
|
cc.Broadcaster.StartRecordingToSink(stopCh)
|
||||||
cc.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: cc.EventClient.Events("")})
|
}
|
||||||
|
if cc.LeaderElectionBroadcaster != nil && cc.CoreEventClient != nil {
|
||||||
|
cc.LeaderElectionBroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: cc.CoreEventClient.Events("")})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup healthz checks.
|
// Setup healthz checks.
|
||||||
var checks []healthz.HealthzChecker
|
var checks []healthz.HealthzChecker
|
||||||
if cc.ComponentConfig.LeaderElection.LeaderElect {
|
if cc.ComponentConfig.LeaderElection.LeaderElect {
|
||||||
|
@ -39,7 +39,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@ -67,6 +67,7 @@ go_test(
|
|||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/scheduler/volumebinder:go_default_library",
|
"//pkg/scheduler/volumebinder:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/events/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
@ -80,7 +81,7 @@ go_test(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -45,7 +45,7 @@ import (
|
|||||||
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
|
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
|
||||||
storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1"
|
storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
@ -114,7 +114,7 @@ type Config struct {
|
|||||||
Error func(*v1.Pod, error)
|
Error func(*v1.Pod, error)
|
||||||
|
|
||||||
// Recorder is the EventRecorder to use
|
// Recorder is the EventRecorder to use
|
||||||
Recorder record.EventRecorder
|
Recorder events.EventRecorder
|
||||||
|
|
||||||
// Close this to shut down the scheduler.
|
// Close this to shut down the scheduler.
|
||||||
StopEverything <-chan struct{}
|
StopEverything <-chan struct{}
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
|
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
|
||||||
storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1"
|
storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/events"
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||||
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
|
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
|
||||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
@ -130,7 +130,7 @@ func New(client clientset.Interface,
|
|||||||
pdbInformer policyinformers.PodDisruptionBudgetInformer,
|
pdbInformer policyinformers.PodDisruptionBudgetInformer,
|
||||||
storageClassInformer storageinformersv1.StorageClassInformer,
|
storageClassInformer storageinformersv1.StorageClassInformer,
|
||||||
csiNodeInformer storageinformersv1beta1.CSINodeInformer,
|
csiNodeInformer storageinformersv1beta1.CSINodeInformer,
|
||||||
recorder record.EventRecorder,
|
recorder events.EventRecorder,
|
||||||
schedulerAlgorithmSource kubeschedulerconfig.SchedulerAlgorithmSource,
|
schedulerAlgorithmSource kubeschedulerconfig.SchedulerAlgorithmSource,
|
||||||
stopCh <-chan struct{},
|
stopCh <-chan struct{},
|
||||||
registry framework.Registry,
|
registry framework.Registry,
|
||||||
@ -271,7 +271,7 @@ func (sched *Scheduler) Config() *factory.Config {
|
|||||||
// NOTE: This function modifies "pod". "pod" should be copied before being passed.
|
// NOTE: This function modifies "pod". "pod" should be copied before being passed.
|
||||||
func (sched *Scheduler) recordSchedulingFailure(pod *v1.Pod, err error, reason string, message string) {
|
func (sched *Scheduler) recordSchedulingFailure(pod *v1.Pod, err error, reason string, message string) {
|
||||||
sched.config.Error(pod, err)
|
sched.config.Error(pod, err)
|
||||||
sched.config.Recorder.Event(pod, v1.EventTypeWarning, "FailedScheduling", message)
|
sched.config.Recorder.Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", message)
|
||||||
sched.config.PodConditionUpdater.Update(pod, &v1.PodCondition{
|
sched.config.PodConditionUpdater.Update(pod, &v1.PodCondition{
|
||||||
Type: v1.PodScheduled,
|
Type: v1.PodScheduled,
|
||||||
Status: v1.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
@ -328,7 +328,8 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e
|
|||||||
klog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err)
|
klog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
sched.config.Recorder.Eventf(victim, v1.EventTypeNormal, "Preempted", "by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName)
|
sched.config.Recorder.Eventf(victim, preemptor, v1.EventTypeNormal, "Preempted", "Preempting", "Preempted by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName)
|
||||||
|
|
||||||
}
|
}
|
||||||
metrics.PreemptionVictims.Set(float64(len(victims)))
|
metrics.PreemptionVictims.Set(float64(len(victims)))
|
||||||
}
|
}
|
||||||
@ -436,7 +437,7 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error {
|
|||||||
metrics.DeprecatedBindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
|
metrics.DeprecatedBindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
|
||||||
metrics.SchedulingLatency.WithLabelValues(metrics.Binding).Observe(metrics.SinceInSeconds(bindingStart))
|
metrics.SchedulingLatency.WithLabelValues(metrics.Binding).Observe(metrics.SinceInSeconds(bindingStart))
|
||||||
metrics.DeprecatedSchedulingLatency.WithLabelValues(metrics.Binding).Observe(metrics.SinceInSeconds(bindingStart))
|
metrics.DeprecatedSchedulingLatency.WithLabelValues(metrics.Binding).Observe(metrics.SinceInSeconds(bindingStart))
|
||||||
sched.config.Recorder.Eventf(assumed, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v/%v to %v", assumed.Namespace, assumed.Name, b.Target.Name)
|
sched.config.Recorder.Eventf(assumed, nil, v1.EventTypeNormal, "Scheduled", "Binding", "Successfully assigned %v/%v to %v", assumed.Namespace, assumed.Name, b.Target.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -450,7 +451,7 @@ func (sched *Scheduler) scheduleOne() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if pod.DeletionTimestamp != nil {
|
if pod.DeletionTimestamp != nil {
|
||||||
sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
|
sched.config.Recorder.Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
|
||||||
klog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
|
klog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/api/events/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -40,7 +41,7 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
corelister "k8s.io/client-go/listers/core/v1"
|
corelister "k8s.io/client-go/listers/core/v1"
|
||||||
clientcache "k8s.io/client-go/tools/cache"
|
clientcache "k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/events"
|
||||||
volumescheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
volumescheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
@ -179,8 +180,7 @@ func TestSchedulerCreation(t *testing.T) {
|
|||||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||||
|
|
||||||
testSource := "testProvider"
|
testSource := "testProvider"
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1beta1().Events("")})
|
||||||
eventBroadcaster.StartLogging(t.Logf).Stop()
|
|
||||||
|
|
||||||
defaultBindTimeout := int64(30)
|
defaultBindTimeout := int64(30)
|
||||||
factory.RegisterFitPredicate("PredicateOne", PredicateOne)
|
factory.RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||||
@ -201,7 +201,7 @@ func TestSchedulerCreation(t *testing.T) {
|
|||||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||||
informerFactory.Storage().V1().StorageClasses(),
|
informerFactory.Storage().V1().StorageClasses(),
|
||||||
informerFactory.Storage().V1beta1().CSINodes(),
|
informerFactory.Storage().V1beta1().CSINodes(),
|
||||||
eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "scheduler"}),
|
eventBroadcaster.NewRecorder(scheme.Scheme, "scheduler"),
|
||||||
kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &testSource},
|
kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &testSource},
|
||||||
stopCh,
|
stopCh,
|
||||||
EmptyPluginRegistry,
|
EmptyPluginRegistry,
|
||||||
@ -215,11 +215,11 @@ func TestSchedulerCreation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestScheduler(t *testing.T) {
|
func TestScheduler(t *testing.T) {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||||
eventBroadcaster.StartLogging(t.Logf).Stop()
|
client := clientsetfake.NewSimpleClientset(&testNode)
|
||||||
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1beta1().Events("")})
|
||||||
errS := errors.New("scheduler")
|
errS := errors.New("scheduler")
|
||||||
errB := errors.New("binder")
|
errB := errors.New("binder")
|
||||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
@ -269,7 +269,6 @@ func TestScheduler(t *testing.T) {
|
|||||||
|
|
||||||
stop := make(chan struct{})
|
stop := make(chan struct{})
|
||||||
defer close(stop)
|
defer close(stop)
|
||||||
client := clientsetfake.NewSimpleClientset(&testNode)
|
|
||||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||||
nl := informerFactory.Core().V1().Nodes().Lister()
|
nl := informerFactory.Core().V1().Nodes().Lister()
|
||||||
|
|
||||||
@ -310,11 +309,12 @@ func TestScheduler(t *testing.T) {
|
|||||||
return item.sendPod
|
return item.sendPod
|
||||||
},
|
},
|
||||||
Framework: EmptyFramework,
|
Framework: EmptyFramework,
|
||||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "scheduler"}),
|
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, "scheduler"),
|
||||||
VolumeBinder: volumebinder.NewFakeVolumeBinder(&volumescheduling.FakeVolumeBinderConfig{AllBound: true}),
|
VolumeBinder: volumebinder.NewFakeVolumeBinder(&volumescheduling.FakeVolumeBinderConfig{AllBound: true}),
|
||||||
})
|
})
|
||||||
called := make(chan struct{})
|
called := make(chan struct{})
|
||||||
events := eventBroadcaster.StartEventWatcher(func(e *v1.Event) {
|
stopFunc := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
|
||||||
|
e, _ := obj.(*v1beta1.Event)
|
||||||
if e, a := item.eventReason, e.Reason; e != a {
|
if e, a := item.eventReason, e.Reason; e != a {
|
||||||
t.Errorf("expected %v, got %v", e, a)
|
t.Errorf("expected %v, got %v", e, a)
|
||||||
}
|
}
|
||||||
@ -337,7 +337,7 @@ func TestScheduler(t *testing.T) {
|
|||||||
if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
|
if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
|
||||||
t.Errorf("error: %s", diff.ObjectDiff(e, a))
|
t.Errorf("error: %s", diff.ObjectDiff(e, a))
|
||||||
}
|
}
|
||||||
events.Stop()
|
stopFunc()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -646,7 +646,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||||||
|
|
||||||
// queuedPodStore: pods queued before processing.
|
// queuedPodStore: pods queued before processing.
|
||||||
// scache: scheduler cache that might contain assumed pods.
|
// scache: scheduler cache that might contain assumed pods.
|
||||||
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, recorder record.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, recorder events.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
||||||
algo := core.NewGenericScheduler(
|
algo := core.NewGenericScheduler(
|
||||||
scache,
|
scache,
|
||||||
internalqueue.NewSchedulingQueue(nil, nil),
|
internalqueue.NewSchedulingQueue(nil, nil),
|
||||||
@ -683,7 +683,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
|
|||||||
Error: func(p *v1.Pod, err error) {
|
Error: func(p *v1.Pod, err error) {
|
||||||
errChan <- err
|
errChan <- err
|
||||||
},
|
},
|
||||||
Recorder: &record.FakeRecorder{},
|
Recorder: &events.FakeRecorder{},
|
||||||
PodConditionUpdater: fakePodConditionUpdater{},
|
PodConditionUpdater: fakePodConditionUpdater{},
|
||||||
PodPreemptor: fakePodPreemptor{},
|
PodPreemptor: fakePodPreemptor{},
|
||||||
Framework: EmptyFramework,
|
Framework: EmptyFramework,
|
||||||
@ -740,7 +740,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc
|
|||||||
Error: func(p *v1.Pod, err error) {
|
Error: func(p *v1.Pod, err error) {
|
||||||
queuedPodStore.AddIfNotPresent(p)
|
queuedPodStore.AddIfNotPresent(p)
|
||||||
},
|
},
|
||||||
Recorder: &record.FakeRecorder{},
|
Recorder: &events.FakeRecorder{},
|
||||||
PodConditionUpdater: fakePodConditionUpdater{},
|
PodConditionUpdater: fakePodConditionUpdater{},
|
||||||
PodPreemptor: fakePodPreemptor{},
|
PodPreemptor: fakePodPreemptor{},
|
||||||
StopEverything: stop,
|
StopEverything: stop,
|
||||||
@ -751,7 +751,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc
|
|||||||
return sched, bindingChan
|
return sched, bindingChan
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupTestSchedulerWithVolumeBinding(fakeVolumeBinder *volumebinder.VolumeBinder, stop <-chan struct{}, broadcaster record.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
|
func setupTestSchedulerWithVolumeBinding(fakeVolumeBinder *volumebinder.VolumeBinder, stop <-chan struct{}, broadcaster events.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
|
||||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||||
pod := podWithID("foo", "")
|
pod := podWithID("foo", "")
|
||||||
@ -769,7 +769,7 @@ func setupTestSchedulerWithVolumeBinding(fakeVolumeBinder *volumebinder.VolumeBi
|
|||||||
predicates.CheckVolumeBindingPred: predicates.NewVolumeBindingPredicate(fakeVolumeBinder),
|
predicates.CheckVolumeBindingPred: predicates.NewVolumeBindingPredicate(fakeVolumeBinder),
|
||||||
}
|
}
|
||||||
|
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "scheduler"})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, "scheduler")
|
||||||
s, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, predicateMap, recorder)
|
s, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, predicateMap, recorder)
|
||||||
informerFactory.Start(stop)
|
informerFactory.Start(stop)
|
||||||
informerFactory.WaitForCacheSync(stop)
|
informerFactory.WaitForCacheSync(stop)
|
||||||
@ -789,9 +789,9 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||||||
findErr := fmt.Errorf("find err")
|
findErr := fmt.Errorf("find err")
|
||||||
assumeErr := fmt.Errorf("assume err")
|
assumeErr := fmt.Errorf("assume err")
|
||||||
bindErr := fmt.Errorf("bind err")
|
bindErr := fmt.Errorf("bind err")
|
||||||
|
client := clientsetfake.NewSimpleClientset()
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1beta1().Events("")})
|
||||||
eventBroadcaster.StartLogging(t.Logf).Stop()
|
|
||||||
|
|
||||||
// This can be small because we wait for pod to finish scheduling first
|
// This can be small because we wait for pod to finish scheduling first
|
||||||
chanTimeout := 2 * time.Second
|
chanTimeout := 2 * time.Second
|
||||||
@ -897,26 +897,22 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||||||
t.Fatalf("Failed to get fake volume binder")
|
t.Fatalf("Failed to get fake volume binder")
|
||||||
}
|
}
|
||||||
s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(fakeVolumeBinder, stop, eventBroadcaster)
|
s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(fakeVolumeBinder, stop, eventBroadcaster)
|
||||||
|
|
||||||
eventChan := make(chan struct{})
|
eventChan := make(chan struct{})
|
||||||
events := eventBroadcaster.StartEventWatcher(func(e *v1.Event) {
|
stopFunc := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
|
||||||
|
e, _ := obj.(*v1beta1.Event)
|
||||||
if e, a := item.eventReason, e.Reason; e != a {
|
if e, a := item.eventReason, e.Reason; e != a {
|
||||||
t.Errorf("expected %v, got %v", e, a)
|
t.Errorf("expected %v, got %v", e, a)
|
||||||
}
|
}
|
||||||
close(eventChan)
|
close(eventChan)
|
||||||
})
|
})
|
||||||
|
|
||||||
s.scheduleOne()
|
s.scheduleOne()
|
||||||
|
|
||||||
// Wait for pod to succeed or fail scheduling
|
// Wait for pod to succeed or fail scheduling
|
||||||
select {
|
select {
|
||||||
case <-eventChan:
|
case <-eventChan:
|
||||||
case <-time.After(wait.ForeverTestTimeout):
|
case <-time.After(wait.ForeverTestTimeout):
|
||||||
t.Fatalf("scheduling timeout after %v", wait.ForeverTestTimeout)
|
t.Fatalf("scheduling timeout after %v", wait.ForeverTestTimeout)
|
||||||
}
|
}
|
||||||
|
stopFunc()
|
||||||
events.Stop()
|
|
||||||
|
|
||||||
// Wait for scheduling to return an error
|
// Wait for scheduling to return an error
|
||||||
select {
|
select {
|
||||||
case err := <-errChan:
|
case err := <-errChan:
|
||||||
|
@ -39,7 +39,7 @@ go_test(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||||
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
|
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
|
||||||
|
@ -37,7 +37,7 @@ import (
|
|||||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/client-go/util/flowcontrol"
|
"k8s.io/client-go/util/flowcontrol"
|
||||||
"k8s.io/client-go/util/retry"
|
"k8s.io/client-go/util/retry"
|
||||||
"k8s.io/component-base/featuregate"
|
"k8s.io/component-base/featuregate"
|
||||||
@ -135,14 +135,14 @@ func setupScheduler(
|
|||||||
informerFactory.Storage().V1beta1().CSINodes(),
|
informerFactory.Storage().V1beta1().CSINodes(),
|
||||||
)
|
)
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
||||||
|
Interface: cs.EventsV1beta1().Events(""),
|
||||||
|
})
|
||||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(
|
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(
|
||||||
legacyscheme.Scheme,
|
legacyscheme.Scheme,
|
||||||
v1.EventSource{Component: v1.DefaultSchedulerName},
|
v1.DefaultSchedulerName,
|
||||||
)
|
)
|
||||||
eventBroadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{
|
eventBroadcaster.StartRecordingToSink(stopCh)
|
||||||
Interface: cs.CoreV1().Events(""),
|
|
||||||
})
|
|
||||||
|
|
||||||
algorithmprovider.ApplyFeatureGates()
|
algorithmprovider.ApplyFeatureGates()
|
||||||
|
|
||||||
|
@ -57,11 +57,10 @@ go_test(
|
|||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||||
"//test/integration/framework:go_default_library",
|
"//test/integration/framework:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
@ -114,12 +113,11 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
"//test/integration/framework:go_default_library",
|
"//test/integration/framework:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -31,11 +31,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/scheduler"
|
"k8s.io/kubernetes/pkg/scheduler"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
@ -238,8 +237,9 @@ priorities: []
|
|||||||
policyConfigMap.APIVersion = "v1"
|
policyConfigMap.APIVersion = "v1"
|
||||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap)
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: clientSet.EventsV1beta1().Events("")})
|
||||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
stopCh := make(chan struct{})
|
||||||
|
eventBroadcaster.StartRecordingToSink(stopCh)
|
||||||
|
|
||||||
defaultBindTimeout := int64(30)
|
defaultBindTimeout := int64(30)
|
||||||
|
|
||||||
@ -255,7 +255,7 @@ priorities: []
|
|||||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||||
informerFactory.Storage().V1().StorageClasses(),
|
informerFactory.Storage().V1().StorageClasses(),
|
||||||
informerFactory.Storage().V1beta1().CSINodes(),
|
informerFactory.Storage().V1beta1().CSINodes(),
|
||||||
eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.DefaultSchedulerName),
|
||||||
kubeschedulerconfig.SchedulerAlgorithmSource{
|
kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||||
@ -310,8 +310,9 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
|||||||
|
|
||||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: clientSet.EventsV1beta1().Events("")})
|
||||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
stopCh := make(chan struct{})
|
||||||
|
eventBroadcaster.StartRecordingToSink(stopCh)
|
||||||
|
|
||||||
defaultBindTimeout := int64(30)
|
defaultBindTimeout := int64(30)
|
||||||
|
|
||||||
@ -327,7 +328,7 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
|||||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||||
informerFactory.Storage().V1().StorageClasses(),
|
informerFactory.Storage().V1().StorageClasses(),
|
||||||
informerFactory.Storage().V1beta1().CSINodes(),
|
informerFactory.Storage().V1beta1().CSINodes(),
|
||||||
eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.DefaultSchedulerName),
|
||||||
kubeschedulerconfig.SchedulerAlgorithmSource{
|
kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||||
@ -610,9 +611,9 @@ func TestMultiScheduler(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Couldn't create scheduler config: %v", err)
|
t.Errorf("Couldn't create scheduler config: %v", err)
|
||||||
}
|
}
|
||||||
eventBroadcaster2 := record.NewBroadcaster()
|
eventBroadcaster2 := events.NewBroadcaster(&events.EventSinkImpl{Interface: clientSet2.EventsV1beta1().Events("")})
|
||||||
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: fooScheduler})
|
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(legacyscheme.Scheme, "k8s.io/"+fooScheduler)
|
||||||
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet2.CoreV1().Events("")})
|
eventBroadcaster2.StartRecordingToSink(stopCh)
|
||||||
|
|
||||||
sched2 := scheduler.NewFromConfig(schedulerConfig2)
|
sched2 := scheduler.NewFromConfig(schedulerConfig2)
|
||||||
scheduler.AddAllEventHandlers(sched2,
|
scheduler.AddAllEventHandlers(sched2,
|
||||||
|
@ -38,12 +38,11 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/restmapper"
|
"k8s.io/client-go/restmapper"
|
||||||
"k8s.io/client-go/scale"
|
"k8s.io/client-go/scale"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
@ -225,14 +224,15 @@ func initTestSchedulerWithOptions(
|
|||||||
controller.WaitForCacheSync("scheduler", context.schedulerConfig.StopEverything, podInformer.Informer().HasSynced)
|
controller.WaitForCacheSync("scheduler", context.schedulerConfig.StopEverything, podInformer.Informer().HasSynced)
|
||||||
}
|
}
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
||||||
|
Interface: context.clientSet.EventsV1beta1().Events(""),
|
||||||
|
})
|
||||||
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(
|
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(
|
||||||
legacyscheme.Scheme,
|
legacyscheme.Scheme,
|
||||||
v1.EventSource{Component: v1.DefaultSchedulerName},
|
v1.DefaultSchedulerName,
|
||||||
)
|
)
|
||||||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
stopCh := make(chan struct{})
|
||||||
Interface: context.clientSet.CoreV1().Events(""),
|
eventBroadcaster.StartRecordingToSink(stopCh)
|
||||||
})
|
|
||||||
|
|
||||||
context.informerFactory.Start(context.schedulerConfig.StopEverything)
|
context.informerFactory.Start(context.schedulerConfig.StopEverything)
|
||||||
context.informerFactory.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
context.informerFactory.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
||||||
|
@ -21,8 +21,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
|
||||||
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
|
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
|
||||||
"//test/integration/framework:go_default_library",
|
"//test/integration/framework:go_default_library",
|
||||||
"//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud:go_default_library",
|
"//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud:go_default_library",
|
||||||
|
@ -23,8 +23,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/client-go/tools/record"
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/scheduler"
|
"k8s.io/kubernetes/pkg/scheduler"
|
||||||
@ -62,19 +61,19 @@ func StartApiserver() (string, ShutdownFunc) {
|
|||||||
// and the shutdown function to stop it.
|
// and the shutdown function to stop it.
|
||||||
func StartScheduler(clientSet clientset.Interface) (factory.Configurator, ShutdownFunc) {
|
func StartScheduler(clientSet clientset.Interface) (factory.Configurator, ShutdownFunc) {
|
||||||
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||||
|
|
||||||
evtBroadcaster := record.NewBroadcaster()
|
|
||||||
evtWatch := evtBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{
|
|
||||||
Interface: clientSet.CoreV1().Events("")})
|
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
|
evtBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
||||||
|
Interface: clientSet.EventsV1beta1().Events("")})
|
||||||
|
|
||||||
|
evtBroadcaster.StartRecordingToSink(stopCh)
|
||||||
|
|
||||||
schedulerConfigurator := createSchedulerConfigurator(clientSet, informerFactory, stopCh)
|
schedulerConfigurator := createSchedulerConfigurator(clientSet, informerFactory, stopCh)
|
||||||
|
|
||||||
config, err := schedulerConfigurator.CreateFromConfig(schedulerapi.Policy{})
|
config, err := schedulerConfigurator.CreateFromConfig(schedulerapi.Policy{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("Error creating scheduler: %v", err)
|
klog.Fatalf("Error creating scheduler: %v", err)
|
||||||
}
|
}
|
||||||
config.Recorder = evtBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"})
|
config.Recorder = evtBroadcaster.NewRecorder(legacyscheme.Scheme, "scheduler")
|
||||||
|
|
||||||
sched := scheduler.NewFromConfig(config)
|
sched := scheduler.NewFromConfig(config)
|
||||||
scheduler.AddAllEventHandlers(sched,
|
scheduler.AddAllEventHandlers(sched,
|
||||||
@ -93,7 +92,6 @@ func StartScheduler(clientSet clientset.Interface) (factory.Configurator, Shutdo
|
|||||||
|
|
||||||
shutdownFunc := func() {
|
shutdownFunc := func() {
|
||||||
klog.Infof("destroying scheduler")
|
klog.Infof("destroying scheduler")
|
||||||
evtWatch.Stop()
|
|
||||||
close(stopCh)
|
close(stopCh)
|
||||||
klog.Infof("destroyed scheduler")
|
klog.Infof("destroyed scheduler")
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user