mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Make each new instance of kubelet generate a new event channel (instead of reusing existing).
This commit is contained in:
parent
10b4fe6f30
commit
e0f71cb21f
@ -204,6 +204,9 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Couldn't create scheduler config: %v", err)
|
glog.Fatalf("Couldn't create scheduler config: %v", err)
|
||||||
}
|
}
|
||||||
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
|
||||||
|
eventBroadcaster.StartRecordingToSink(cl.Events(""))
|
||||||
scheduler.New(schedulerConfig).Run()
|
scheduler.New(schedulerConfig).Run()
|
||||||
|
|
||||||
endpoints := service.NewEndpointController(cl)
|
endpoints := service.NewEndpointController(cl)
|
||||||
@ -221,8 +224,7 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
|
|||||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||||
}}
|
}}
|
||||||
|
|
||||||
nodeController := nodeControllerPkg.NewNodeController(nil, "", machineList, nodeResources, cl, fakeKubeletClient{},
|
nodeController := nodeControllerPkg.NewNodeController(nil, "", machineList, nodeResources, cl, fakeKubeletClient{}, 10, 5*time.Minute)
|
||||||
record.FromSource(api.EventSource{Component: "controllermanager"}), 10, 5*time.Minute)
|
|
||||||
nodeController.Run(5*time.Second, true, false)
|
nodeController.Run(5*time.Second, true, false)
|
||||||
cadvisorInterface := new(cadvisor.Fake)
|
cadvisorInterface := new(cadvisor.Fake)
|
||||||
|
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||||
nodeControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller"
|
nodeControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller"
|
||||||
replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
||||||
@ -178,8 +177,7 @@ func (s *CMServer) Run(_ []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeController := nodeControllerPkg.NewNodeController(cloud, s.MinionRegexp, s.MachineList, nodeResources,
|
nodeController := nodeControllerPkg.NewNodeController(cloud, s.MinionRegexp, s.MachineList, nodeResources,
|
||||||
kubeClient, kubeletClient, record.FromSource(api.EventSource{Component: "controllermanager"}),
|
kubeClient, kubeletClient, s.RegisterRetryCount, s.PodEvictionTimeout)
|
||||||
s.RegisterRetryCount, s.PodEvictionTimeout)
|
|
||||||
nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList, s.SyncNodeStatus)
|
nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList, s.SyncNodeStatus)
|
||||||
|
|
||||||
resourceQuotaManager := resourcequota.NewResourceQuotaManager(kubeClient)
|
resourceQuotaManager := resourcequota.NewResourceQuotaManager(kubeClient)
|
||||||
|
@ -323,13 +323,15 @@ func SimpleKubelet(client *client.Client,
|
|||||||
// Eventually, #2 will be replaced with instances of #3
|
// Eventually, #2 will be replaced with instances of #3
|
||||||
func RunKubelet(kcfg *KubeletConfig) {
|
func RunKubelet(kcfg *KubeletConfig) {
|
||||||
kcfg.Hostname = util.GetHostname(kcfg.HostnameOverride)
|
kcfg.Hostname = util.GetHostname(kcfg.HostnameOverride)
|
||||||
kcfg.Recorder = record.FromSource(api.EventSource{Component: "kubelet", Host: kcfg.Hostname})
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.Hostname})
|
||||||
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
if kcfg.KubeClient != nil {
|
if kcfg.KubeClient != nil {
|
||||||
kubelet.SetupEventSending(kcfg.KubeClient, kcfg.Hostname)
|
glog.Infof("Sending events to api server.")
|
||||||
|
eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events(""))
|
||||||
} else {
|
} else {
|
||||||
glog.Infof("No api server defined - no events will be sent.")
|
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||||
}
|
}
|
||||||
kubelet.SetupLogging()
|
|
||||||
kubelet.SetupCapabilities(kcfg.AllowPrivileged, kcfg.HostNetworkSources)
|
kubelet.SetupCapabilities(kcfg.AllowPrivileged, kcfg.HostNetworkSources)
|
||||||
|
|
||||||
credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)
|
credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
|
||||||
nodeControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller"
|
nodeControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/controller"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor"
|
||||||
@ -129,8 +128,7 @@ func runControllerManager(machineList []string, cl *client.Client, nodeMilliCPU,
|
|||||||
}
|
}
|
||||||
kubeClient := &client.HTTPKubeletClient{Client: http.DefaultClient, Port: ports.KubeletPort}
|
kubeClient := &client.HTTPKubeletClient{Client: http.DefaultClient, Port: ports.KubeletPort}
|
||||||
|
|
||||||
nodeController := nodeControllerPkg.NewNodeController(nil, "", machineList, nodeResources, cl, kubeClient,
|
nodeController := nodeControllerPkg.NewNodeController(nil, "", machineList, nodeResources, cl, kubeClient, 10, 5*time.Minute)
|
||||||
record.FromSource(api.EventSource{Component: "controllermanager"}), 10, 5*time.Minute)
|
|
||||||
nodeController.Run(10*time.Second, true, true)
|
nodeController.Run(10*time.Second, true, true)
|
||||||
|
|
||||||
endpoints := service.NewEndpointController(cl)
|
endpoints := service.NewEndpointController(cl)
|
||||||
|
@ -35,6 +35,8 @@ const maxTriesPerEvent = 12
|
|||||||
|
|
||||||
var sleepDuration = 10 * time.Second
|
var sleepDuration = 10 * time.Second
|
||||||
|
|
||||||
|
const maxQueuedEvents = 1000
|
||||||
|
|
||||||
// EventSink knows how to store events (client.Client implements it.)
|
// EventSink knows how to store events (client.Client implements it.)
|
||||||
// EventSink must respect the namespace that will be embedded in 'event'.
|
// EventSink must respect the namespace that will be embedded in 'event'.
|
||||||
// It is assumed that EventSink will return the same sorts of errors as
|
// It is assumed that EventSink will return the same sorts of errors as
|
||||||
@ -44,50 +46,94 @@ type EventSink interface {
|
|||||||
Update(event *api.Event) (*api.Event, error)
|
Update(event *api.Event) (*api.Event, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
var emptySource = api.EventSource{}
|
// EventRecorder knows how to record events on behalf of an EventSource.
|
||||||
|
type EventRecorder interface {
|
||||||
|
// Event constructs an event from the given information and puts it in the queue for sending.
|
||||||
|
// 'object' is the object this event is about. Event will make a reference-- or you may also
|
||||||
|
// pass a reference to the object directly.
|
||||||
|
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it will
|
||||||
|
// be used to automate handling of events, so imagine people writing switch statements to
|
||||||
|
// handle them. You want to make that easy.
|
||||||
|
// 'message' is intended to be human readable.
|
||||||
|
//
|
||||||
|
// The resulting event will be created in the same namespace as the reference object.
|
||||||
|
Event(object runtime.Object, reason, message string)
|
||||||
|
|
||||||
// StartRecording starts sending events to a sink. Call once while initializing
|
// Eventf is just like Event, but with Sprintf for the message field.
|
||||||
// your binary. Subsequent calls will be ignored. The return value can be ignored
|
Eventf(object runtime.Object, reason, messageFmt string, args ...interface{})
|
||||||
// or used to stop recording, if desired.
|
}
|
||||||
|
|
||||||
|
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
|
||||||
|
type EventBroadcaster interface {
|
||||||
|
// StartEventWatcher starts sending events recieved from this EventBroadcaster to the given
|
||||||
|
// event handler function. The return value can be ignored or used to stop recording, if
|
||||||
|
// desired.
|
||||||
|
StartEventWatcher(eventHandler func(*api.Event)) watch.Interface
|
||||||
|
|
||||||
|
// StartRecordingToSink starts sending events recieved from this EventBroadcaster to the given
|
||||||
|
// sink. The return value can be ignored or used to stop recording, if desired.
|
||||||
|
StartRecordingToSink(sink EventSink) watch.Interface
|
||||||
|
|
||||||
|
// StartLogging starts sending events recieved from this EventBroadcaster to the given logging
|
||||||
|
// function. The return value can be ignored or used to stop recording, if desired.
|
||||||
|
StartLogging(logf func(format string, args ...interface{})) watch.Interface
|
||||||
|
|
||||||
|
// NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster
|
||||||
|
// with the event source set to the given event source.
|
||||||
|
NewRecorder(source api.EventSource) EventRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a new event broadcaster.
|
||||||
|
func NewBroadcaster() EventBroadcaster {
|
||||||
|
return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventBroadcasterImpl struct {
|
||||||
|
*watch.Broadcaster
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartRecordingToSink starts sending events recieved from the specified eventBroadcaster to the given sink.
|
||||||
|
// The return value can be ignored or used to stop recording, if desired.
|
||||||
// TODO: make me an object with parameterizable queue length and retry interval
|
// TODO: make me an object with parameterizable queue length and retry interval
|
||||||
func StartRecording(sink EventSink) watch.Interface {
|
func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface {
|
||||||
// The default math/rand package functions aren't thread safe, so create a
|
// The default math/rand package functions aren't thread safe, so create a
|
||||||
// new Rand object for each StartRecording call.
|
// new Rand object for each StartRecording call.
|
||||||
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
|
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
return GetEvents(func(event *api.Event) {
|
return eventBroadcaster.StartEventWatcher(
|
||||||
// Make a copy before modification, because there could be multiple listeners.
|
func(event *api.Event) {
|
||||||
// Events are safe to copy like this.
|
// Make a copy before modification, because there could be multiple listeners.
|
||||||
eventCopy := *event
|
// Events are safe to copy like this.
|
||||||
event = &eventCopy
|
eventCopy := *event
|
||||||
|
event = &eventCopy
|
||||||
|
|
||||||
previousEvent := getEvent(event)
|
previousEvent := getEvent(event)
|
||||||
updateExistingEvent := previousEvent.Count > 0
|
updateExistingEvent := previousEvent.Count > 0
|
||||||
if updateExistingEvent {
|
if updateExistingEvent {
|
||||||
event.Count = previousEvent.Count + 1
|
event.Count = previousEvent.Count + 1
|
||||||
event.FirstTimestamp = previousEvent.FirstTimestamp
|
event.FirstTimestamp = previousEvent.FirstTimestamp
|
||||||
event.Name = previousEvent.Name
|
event.Name = previousEvent.Name
|
||||||
event.ResourceVersion = previousEvent.ResourceVersion
|
event.ResourceVersion = previousEvent.ResourceVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
tries := 0
|
tries := 0
|
||||||
for {
|
for {
|
||||||
if recordEvent(sink, event, updateExistingEvent) {
|
if recordEvent(sink, event, updateExistingEvent) {
|
||||||
break
|
break
|
||||||
|
}
|
||||||
|
tries++
|
||||||
|
if tries >= maxTriesPerEvent {
|
||||||
|
glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Randomize the first sleep so that various clients won't all be
|
||||||
|
// synced up if the master goes down.
|
||||||
|
if tries == 1 {
|
||||||
|
time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64()))
|
||||||
|
} else {
|
||||||
|
time.Sleep(sleepDuration)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
tries++
|
})
|
||||||
if tries >= maxTriesPerEvent {
|
|
||||||
glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Randomize the first sleep so that various clients won't all be
|
|
||||||
// synced up if the master goes down.
|
|
||||||
if tries == 1 {
|
|
||||||
time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64()))
|
|
||||||
} else {
|
|
||||||
time.Sleep(sleepDuration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// recordEvent attempts to write event to a sink. It returns true if the event
|
// recordEvent attempts to write event to a sink. It returns true if the event
|
||||||
@ -131,22 +177,23 @@ func recordEvent(sink EventSink, event *api.Event, updateExistingEvent bool) boo
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartLogging just logs local events, using the given logging function. The
|
// StartLogging starts sending events recieved from this EventBroadcaster to the given logging function.
|
||||||
// return value can be ignored or used to stop logging, if desired.
|
// The return value can be ignored or used to stop recording, if desired.
|
||||||
func StartLogging(logf func(format string, args ...interface{})) watch.Interface {
|
func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface {
|
||||||
return GetEvents(func(e *api.Event) {
|
return eventBroadcaster.StartEventWatcher(
|
||||||
logf("Event(%#v): reason: '%v' %v", e.InvolvedObject, e.Reason, e.Message)
|
func(e *api.Event) {
|
||||||
})
|
logf("Event(%#v): reason: '%v' %v", e.InvolvedObject, e.Reason, e.Message)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEvents lets you see *local* events. Convenience function for testing. The
|
// StartEventWatcher starts sending events recieved from this EventBroadcaster to the given event handler function.
|
||||||
// return value can be ignored or used to stop logging, if desired.
|
// The return value can be ignored or used to stop recording, if desired.
|
||||||
func GetEvents(f func(*api.Event)) watch.Interface {
|
func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*api.Event)) watch.Interface {
|
||||||
w := events.Watch()
|
watcher := eventBroadcaster.Watch()
|
||||||
go func() {
|
go func() {
|
||||||
defer util.HandleCrash()
|
defer util.HandleCrash()
|
||||||
for {
|
for {
|
||||||
watchEvent, open := <-w.ResultChan()
|
watchEvent, open := <-watcher.ResultChan()
|
||||||
if !open {
|
if !open {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -156,58 +203,37 @@ func GetEvents(f func(*api.Event)) watch.Interface {
|
|||||||
// ever happen.
|
// ever happen.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f(event)
|
eventHandler(event)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
return w
|
return watcher
|
||||||
}
|
}
|
||||||
|
|
||||||
const maxQueuedEvents = 1000
|
// NewRecorder returns an EventRecorder that records events with the given event source.
|
||||||
|
func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(source api.EventSource) EventRecorder {
|
||||||
var events = watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull)
|
return &recorderImpl{source, eventBroadcaster.Broadcaster}
|
||||||
|
|
||||||
// EventRecorder knows how to record events for an EventSource.
|
|
||||||
type EventRecorder interface {
|
|
||||||
// Event constructs an event from the given information and puts it in the queue for sending.
|
|
||||||
// 'object' is the object this event is about. Event will make a reference-- or you may also
|
|
||||||
// pass a reference to the object directly.
|
|
||||||
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it will
|
|
||||||
// be used to automate handling of events, so imagine people writing switch statements to
|
|
||||||
// handle them. You want to make that easy.
|
|
||||||
// 'message' is intended to be human readable.
|
|
||||||
//
|
|
||||||
// The resulting event will be created in the same namespace as the reference object.
|
|
||||||
Event(object runtime.Object, reason, message string)
|
|
||||||
|
|
||||||
// Eventf is just like Event, but with Sprintf for the message field.
|
|
||||||
Eventf(object runtime.Object, reason, messageFmt string, args ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromSource returns an EventRecorder that records events with the
|
|
||||||
// given event source.
|
|
||||||
func FromSource(source api.EventSource) EventRecorder {
|
|
||||||
return &recorderImpl{source}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type recorderImpl struct {
|
type recorderImpl struct {
|
||||||
source api.EventSource
|
source api.EventSource
|
||||||
|
*watch.Broadcaster
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *recorderImpl) Event(object runtime.Object, reason, message string) {
|
func (recorder *recorderImpl) Event(object runtime.Object, reason, message string) {
|
||||||
ref, err := api.GetReference(object)
|
ref, err := api.GetReference(object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v'", object, err, reason, message)
|
glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v'", object, err, reason, message)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
e := makeEvent(ref, reason, message)
|
event := makeEvent(ref, reason, message)
|
||||||
e.Source = i.source
|
event.Source = recorder.source
|
||||||
|
|
||||||
events.Action(watch.Added, e)
|
recorder.Action(watch.Added, event)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *recorderImpl) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) {
|
func (recorder *recorderImpl) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) {
|
||||||
i.Event(object, reason, fmt.Sprintf(messageFmt, args...))
|
recorder.Event(object, reason, fmt.Sprintf(messageFmt, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeEvent(ref *api.ObjectReference, reason, message string) *api.Event {
|
func makeEvent(ref *api.ObjectReference, reason, message string) *api.Event {
|
||||||
|
@ -291,23 +291,23 @@ func TestEventf(t *testing.T) {
|
|||||||
return returnEvent, nil
|
return returnEvent, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
recorder := StartRecording(&testEvents)
|
eventBroadcaster := NewBroadcaster()
|
||||||
logger := StartLogging(t.Logf) // Prove that it is useful
|
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
|
||||||
logger2 := StartLogging(func(formatter string, args ...interface{}) {
|
logWatcher1 := eventBroadcaster.StartLogging(t.Logf) // Prove that it is useful
|
||||||
|
logWatcher2 := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) {
|
||||||
if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
|
if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
|
||||||
t.Errorf("Expected '%v', got '%v'", e, a)
|
t.Errorf("Expected '%v', got '%v'", e, a)
|
||||||
}
|
}
|
||||||
called <- struct{}{}
|
called <- struct{}{}
|
||||||
})
|
})
|
||||||
|
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "eventTest"})
|
||||||
testSource := api.EventSource{Component: "eventTest"}
|
recorder.Eventf(item.obj, item.reason, item.messageFmt, item.elements...)
|
||||||
FromSource(testSource).Eventf(item.obj, item.reason, item.messageFmt, item.elements...)
|
|
||||||
|
|
||||||
<-called
|
<-called
|
||||||
<-called
|
<-called
|
||||||
recorder.Stop()
|
sinkWatcher.Stop()
|
||||||
logger.Stop()
|
logWatcher1.Stop()
|
||||||
logger2.Stop()
|
logWatcher2.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -387,7 +387,8 @@ func TestWriteEventError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
|
||||||
defer StartRecording(
|
eventBroadcaster := NewBroadcaster()
|
||||||
|
defer eventBroadcaster.StartRecordingToSink(
|
||||||
&testEventSink{
|
&testEventSink{
|
||||||
OnCreate: func(event *api.Event) (*api.Event, error) {
|
OnCreate: func(event *api.Event) (*api.Event, error) {
|
||||||
if event.Message == "finished" {
|
if event.Message == "finished" {
|
||||||
@ -407,12 +408,11 @@ func TestWriteEventError(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
).Stop()
|
).Stop()
|
||||||
|
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "eventTest"})
|
||||||
testSource := api.EventSource{Component: "eventTest"}
|
|
||||||
for caseName := range table {
|
for caseName := range table {
|
||||||
FromSource(testSource).Event(ref, "Reason", caseName)
|
recorder.Event(ref, "Reason", caseName)
|
||||||
}
|
}
|
||||||
FromSource(testSource).Event(ref, "Reason", "finished")
|
recorder.Event(ref, "Reason", "finished")
|
||||||
<-done
|
<-done
|
||||||
|
|
||||||
for caseName, item := range table {
|
for caseName, item := range table {
|
||||||
@ -443,12 +443,13 @@ func TestLotsOfEvents(t *testing.T) {
|
|||||||
return event, nil
|
return event, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
recorder := StartRecording(&testEvents)
|
|
||||||
testSource := api.EventSource{Component: "eventTest"}
|
eventBroadcaster := NewBroadcaster()
|
||||||
logger := StartLogging(func(formatter string, args ...interface{}) {
|
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
|
||||||
|
logWatcher := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) {
|
||||||
loggerCalled <- struct{}{}
|
loggerCalled <- struct{}{}
|
||||||
})
|
})
|
||||||
|
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "eventTest"})
|
||||||
ref := &api.ObjectReference{
|
ref := &api.ObjectReference{
|
||||||
Kind: "Pod",
|
Kind: "Pod",
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@ -457,7 +458,7 @@ func TestLotsOfEvents(t *testing.T) {
|
|||||||
APIVersion: "v1beta1",
|
APIVersion: "v1beta1",
|
||||||
}
|
}
|
||||||
for i := 0; i < maxQueuedEvents; i++ {
|
for i := 0; i < maxQueuedEvents; i++ {
|
||||||
go FromSource(testSource).Event(ref, "Reason", strconv.Itoa(i))
|
go recorder.Eventf(ref, "Reason", strconv.Itoa(i))
|
||||||
}
|
}
|
||||||
// Make sure no events were dropped by either of the listeners.
|
// Make sure no events were dropped by either of the listeners.
|
||||||
for i := 0; i < maxQueuedEvents; i++ {
|
for i := 0; i < maxQueuedEvents; i++ {
|
||||||
@ -470,6 +471,6 @@ func TestLotsOfEvents(t *testing.T) {
|
|||||||
t.Errorf("Only attempted to record event '%d' %d times.", i, counts[i])
|
t.Errorf("Only attempted to record event '%d' %d times.", i, counts[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
recorder.Stop()
|
sinkWatcher.Stop()
|
||||||
logger.Stop()
|
logWatcher.Stop()
|
||||||
}
|
}
|
||||||
|
@ -91,9 +91,16 @@ func NewNodeController(
|
|||||||
staticResources *api.NodeResources,
|
staticResources *api.NodeResources,
|
||||||
kubeClient client.Interface,
|
kubeClient client.Interface,
|
||||||
kubeletClient client.KubeletClient,
|
kubeletClient client.KubeletClient,
|
||||||
recorder record.EventRecorder,
|
|
||||||
registerRetryCount int,
|
registerRetryCount int,
|
||||||
podEvictionTimeout time.Duration) *NodeController {
|
podEvictionTimeout time.Duration) *NodeController {
|
||||||
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||||
|
if kubeClient != nil {
|
||||||
|
glog.Infof("Sending events to api server.")
|
||||||
|
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||||
|
} else {
|
||||||
|
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||||
|
}
|
||||||
return &NodeController{
|
return &NodeController{
|
||||||
cloud: cloud,
|
cloud: cloud,
|
||||||
matchRE: matchRE,
|
matchRE: matchRE,
|
||||||
@ -125,7 +132,6 @@ func (nc *NodeController) Run(period time.Duration, syncNodeList, syncNodeStatus
|
|||||||
// Register intial set of nodes with their status set.
|
// Register intial set of nodes with their status set.
|
||||||
var nodes *api.NodeList
|
var nodes *api.NodeList
|
||||||
var err error
|
var err error
|
||||||
record.StartRecording(nc.kubeClient.Events(""))
|
|
||||||
if nc.isRunningCloudProvider() {
|
if nc.isRunningCloudProvider() {
|
||||||
if syncNodeList {
|
if syncNodeList {
|
||||||
if nodes, err = nc.GetCloudNodesWithSpec(); err != nil {
|
if nodes, err = nc.GetCloudNodesWithSpec(); err != nil {
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
|
||||||
fake_cloud "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake"
|
fake_cloud "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
@ -248,7 +247,7 @@ func TestRegisterNodes(t *testing.T) {
|
|||||||
for _, machine := range item.machines {
|
for _, machine := range item.machines {
|
||||||
nodes.Items = append(nodes.Items, *newNode(machine))
|
nodes.Items = append(nodes.Items, *newNode(machine))
|
||||||
}
|
}
|
||||||
nodeController := NewNodeController(nil, "", item.machines, &api.NodeResources{}, item.fakeNodeHandler, nil, nil, 10, time.Minute)
|
nodeController := NewNodeController(nil, "", item.machines, &api.NodeResources{}, item.fakeNodeHandler, nil, 10, time.Minute)
|
||||||
err := nodeController.RegisterNodes(&nodes, item.retryCount, time.Millisecond)
|
err := nodeController.RegisterNodes(&nodes, item.retryCount, time.Millisecond)
|
||||||
if !item.expectedFail && err != nil {
|
if !item.expectedFail && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -333,7 +332,7 @@ func TestCreateGetStaticNodesWithSpec(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(nil, "", item.machines, &resources, nil, nil, nil, 10, time.Minute)
|
nodeController := NewNodeController(nil, "", item.machines, &resources, nil, nil, 10, time.Minute)
|
||||||
nodes, err := nodeController.GetStaticNodesWithSpec()
|
nodes, err := nodeController.GetStaticNodesWithSpec()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -394,7 +393,7 @@ func TestCreateGetCloudNodesWithSpec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(item.fakeCloud, ".*", nil, &api.NodeResources{}, nil, nil, nil, 10, time.Minute)
|
nodeController := NewNodeController(item.fakeCloud, ".*", nil, &api.NodeResources{}, nil, nil, 10, time.Minute)
|
||||||
nodes, err := nodeController.GetCloudNodesWithSpec()
|
nodes, err := nodeController.GetCloudNodesWithSpec()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -491,7 +490,7 @@ func TestSyncCloudNodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, nil, nil, 10, time.Minute)
|
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, nil, 10, time.Minute)
|
||||||
if err := nodeController.SyncCloudNodes(); err != nil {
|
if err := nodeController.SyncCloudNodes(); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -573,7 +572,7 @@ func TestSyncCloudNodesEvictPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, nil, nil, 10, time.Minute)
|
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, nil, 10, time.Minute)
|
||||||
if err := nodeController.SyncCloudNodes(); err != nil {
|
if err := nodeController.SyncCloudNodes(); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -675,7 +674,7 @@ func TestNodeConditionsCheck(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(nil, "", nil, nil, nil, item.fakeKubeletClient, nil, 10, time.Minute)
|
nodeController := NewNodeController(nil, "", nil, nil, nil, item.fakeKubeletClient, 10, time.Minute)
|
||||||
nodeController.now = func() util.Time { return fakeNow }
|
nodeController.now = func() util.Time { return fakeNow }
|
||||||
conditions := nodeController.DoCheck(item.node)
|
conditions := nodeController.DoCheck(item.node)
|
||||||
if !reflect.DeepEqual(item.expectedConditions, conditions) {
|
if !reflect.DeepEqual(item.expectedConditions, conditions) {
|
||||||
@ -706,7 +705,7 @@ func TestPopulateNodeAddresses(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, nil, nil, nil, 10, time.Minute)
|
nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, nil, nil, 10, time.Minute)
|
||||||
result, err := nodeController.PopulateAddresses(item.nodes)
|
result, err := nodeController.PopulateAddresses(item.nodes)
|
||||||
// In case of IP querying error, we should continue.
|
// In case of IP querying error, we should continue.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -809,7 +808,7 @@ func TestSyncProbedNodeStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, item.fakeNodeHandler, item.fakeKubeletClient, nil, 10, time.Minute)
|
nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, item.fakeNodeHandler, item.fakeKubeletClient, 10, time.Minute)
|
||||||
nodeController.now = func() util.Time { return fakeNow }
|
nodeController.now = func() util.Time { return fakeNow }
|
||||||
if err := nodeController.SyncProbedNodeStatus(); err != nil {
|
if err := nodeController.SyncProbedNodeStatus(); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -912,7 +911,7 @@ func TestSyncProbedNodeStatusTransitionTime(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, item.fakeKubeletClient, &record.FakeRecorder{}, 10, time.Minute)
|
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, item.fakeKubeletClient, 10, time.Minute)
|
||||||
nodeController.lookupIP = func(host string) ([]net.IP, error) { return nil, fmt.Errorf("lookup %v: no such host", host) }
|
nodeController.lookupIP = func(host string) ([]net.IP, error) { return nil, fmt.Errorf("lookup %v: no such host", host) }
|
||||||
nodeController.now = func() util.Time { return fakeNow }
|
nodeController.now = func() util.Time { return fakeNow }
|
||||||
if err := nodeController.SyncProbedNodeStatus(); err != nil {
|
if err := nodeController.SyncProbedNodeStatus(); err != nil {
|
||||||
@ -1065,7 +1064,7 @@ func TestSyncProbedNodeStatusEvictPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, item.fakeKubeletClient, &record.FakeRecorder{}, 10, 5*time.Minute)
|
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, item.fakeKubeletClient, 10, 5*time.Minute)
|
||||||
nodeController.lookupIP = func(host string) ([]net.IP, error) { return nil, fmt.Errorf("lookup %v: no such host", host) }
|
nodeController.lookupIP = func(host string) ([]net.IP, error) { return nil, fmt.Errorf("lookup %v: no such host", host) }
|
||||||
if err := nodeController.SyncProbedNodeStatus(); err != nil {
|
if err := nodeController.SyncProbedNodeStatus(); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -1223,7 +1222,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, nil, &record.FakeRecorder{}, 10, item.evictionTimeout)
|
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, nil, 10, item.evictionTimeout)
|
||||||
nodeController.now = func() util.Time { return fakeNow }
|
nodeController.now = func() util.Time { return fakeNow }
|
||||||
if err := nodeController.MonitorNodeStatus(); err != nil {
|
if err := nodeController.MonitorNodeStatus(); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -1405,7 +1404,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, nil, &record.FakeRecorder{}, 10, 5*time.Minute)
|
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, nil, 10, 5*time.Minute)
|
||||||
nodeController.now = func() util.Time { return fakeNow }
|
nodeController.now = func() util.Time { return fakeNow }
|
||||||
if err := nodeController.MonitorNodeStatus(); err != nil {
|
if err := nodeController.MonitorNodeStatus(); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
@ -76,7 +76,8 @@ func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...api.Pod) ku
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) {
|
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) {
|
||||||
config := NewPodConfig(mode, record.FromSource(api.EventSource{Component: "kubelet"}))
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"}))
|
||||||
channel := config.Channel(TestSource)
|
channel := config.Channel(TestSource)
|
||||||
ch := config.Updates()
|
ch := config.Updates()
|
||||||
return channel, ch, config
|
return channel, ch, config
|
||||||
|
@ -20,9 +20,6 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
|
||||||
"github.com/golang/glog"
|
|
||||||
cadvisorApi "github.com/google/cadvisor/info/v1"
|
cadvisorApi "github.com/google/cadvisor/info/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -34,17 +31,6 @@ func SetupCapabilities(allowPrivileged bool, hostNetworkSources []string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Split this up?
|
|
||||||
func SetupLogging() {
|
|
||||||
// Log the events locally too.
|
|
||||||
record.StartLogging(glog.Infof)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SetupEventSending(client *client.Client, hostname string) {
|
|
||||||
glog.Infof("Sending events to api server.")
|
|
||||||
record.StartRecording(client.Events(""))
|
|
||||||
}
|
|
||||||
|
|
||||||
func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) api.ResourceList {
|
func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) api.ResourceList {
|
||||||
c := api.ResourceList{
|
c := api.ResourceList{
|
||||||
api.ResourceCPU: *resource.NewMilliQuantity(
|
api.ResourceCPU: *resource.NewMilliQuantity(
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports"
|
||||||
@ -77,8 +78,6 @@ func (s *SchedulerServer) Run(_ []string) error {
|
|||||||
glog.Fatalf("Invalid API configuration: %v", err)
|
glog.Fatalf("Invalid API configuration: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
record.StartRecording(kubeClient.Events(""))
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if s.EnableProfiling {
|
if s.EnableProfiling {
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
@ -95,6 +94,10 @@ func (s *SchedulerServer) Run(_ []string) error {
|
|||||||
glog.Fatalf("Failed to create scheduler configuration: %v", err)
|
glog.Fatalf("Failed to create scheduler configuration: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
|
||||||
|
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||||
|
|
||||||
sched := scheduler.New(config)
|
sched := scheduler.New(config)
|
||||||
sched.Run()
|
sched.Run()
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||||
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
|
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||||
@ -194,8 +193,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSe
|
|||||||
glog.V(2).Infof("About to try and schedule pod %v", pod.Name)
|
glog.V(2).Infof("About to try and schedule pod %v", pod.Name)
|
||||||
return pod
|
return pod
|
||||||
},
|
},
|
||||||
Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue),
|
Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue),
|
||||||
Recorder: record.FromSource(api.EventSource{Component: "scheduler"}),
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,7 +56,8 @@ func (es mockScheduler) Schedule(pod api.Pod, ml scheduler.MinionLister) (string
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestScheduler(t *testing.T) {
|
func TestScheduler(t *testing.T) {
|
||||||
defer record.StartLogging(t.Logf).Stop()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
defer eventBroadcaster.StartLogging(t.Logf).Stop()
|
||||||
errS := errors.New("scheduler")
|
errS := errors.New("scheduler")
|
||||||
errB := errors.New("binder")
|
errB := errors.New("binder")
|
||||||
|
|
||||||
@ -119,11 +120,11 @@ func TestScheduler(t *testing.T) {
|
|||||||
NextPod: func() *api.Pod {
|
NextPod: func() *api.Pod {
|
||||||
return item.sendPod
|
return item.sendPod
|
||||||
},
|
},
|
||||||
Recorder: record.FromSource(api.EventSource{Component: "scheduler"}),
|
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
|
||||||
}
|
}
|
||||||
s := New(c)
|
s := New(c)
|
||||||
called := make(chan struct{})
|
called := make(chan struct{})
|
||||||
events := record.GetEvents(func(e *api.Event) {
|
events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
|
||||||
if e, a := item.eventReason, e.Reason; e != a {
|
if e, a := item.eventReason, e.Reason; e != a {
|
||||||
t.Errorf("%v: expected %v, got %v", i, e, a)
|
t.Errorf("%v: expected %v, got %v", i, e, a)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user