Merge pull request #1359 from smarterclayton/tone_down_logging

Split log statements into more buckets for better control
This commit is contained in:
Brendan Burns 2014-09-25 13:42:30 -07:00
commit 1a906a7ce0
21 changed files with 103 additions and 76 deletions

25
docs/logging.md Normal file
View File

@ -0,0 +1,25 @@
Logging Conventions
===================
The following conventions for the glog levels to use. glog is globally prefered to "log" for better runtime control.
* glog.Errorf() - Always an error
* glog.Warningf() - Something unexpected, but probably not an error
* glog.Infof / glog.V(0) - Generally useful for this to ALWAYS be visible to an operator
* Programmer errors
* Logging extra info about a panic
* CLI argument handling
* glog.V(1) - A reasonable default log level if you don't want verbosity.
* Information about config (listening on X, watching Y)
* Errors that repeat frequently that relate to conditions that can be corrected (pod detected as unhealthy)
* glog.V(2) - Useful steady state information about the service and important log messages that may correlate to significant changes in the system. This is the recommended default log level for most systems.
* Logging HTTP requests and their exit code
* System state changing (killing pod)
* Controller state change events (starting pods)
* Scheduler log messages
* glog.V(3) - Extended information about changes
* More info about system state changes
* glog.V(4) - Debug level verbosity (for now)
* Logging in particularly thorny parts of code where you may want to come back later and check it
As per the comments, the practical default level is V(2). Developers and QE environments may wish to run at V(3) or V(4).

View File

@ -22,6 +22,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/golang/glog"
) )
// statusError is an object that can be converted into an api.Status // statusError is an object that can be converted into an api.Status
@ -44,6 +45,11 @@ func errToAPIStatus(err error) *api.Status {
case tools.IsEtcdTestFailed(err): case tools.IsEtcdTestFailed(err):
status = http.StatusConflict status = http.StatusConflict
} }
// Log errors that were not converted to an error status
// by REST storage - these typically indicate programmer
// error by not using pkg/api/errors, or unexpected failure
// cases.
glog.V(1).Infof("An unchecked error was received: %v", err)
return &api.Status{ return &api.Status{
Status: api.StatusFailure, Status: api.StatusFailure,
Code: status, Code: status,

View File

@ -166,6 +166,6 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *uint64) err
glog.Errorf("unexpected watch close - watch lasted less than a second and no items received") glog.Errorf("unexpected watch close - watch lasted less than a second and no items received")
return errors.New("very short watch") return errors.New("very short watch")
} }
glog.Infof("watch close - %v total items received", eventCount) glog.V(4).Infof("watch close - %v total items received", eventCount)
return nil return nil
} }

View File

@ -42,7 +42,7 @@ func RegisterCloudProvider(name string, cloud Factory) {
if found { if found {
glog.Fatalf("Cloud provider %q was registered twice", name) glog.Fatalf("Cloud provider %q was registered twice", name)
} }
glog.Infof("Registered cloud provider %q", name) glog.V(1).Infof("Registered cloud provider %q", name)
providers[name] = cloud providers[name] = cloud
} }

View File

@ -115,7 +115,7 @@ func (rm *ReplicationManager) watchControllers(resourceVersion *uint64) {
// that called us call us again. // that called us call us again.
return return
} }
glog.Infof("Got watch: %#v", event) glog.V(4).Infof("Got watch: %#v", event)
rc, ok := event.Object.(*api.ReplicationController) rc, ok := event.Object.(*api.ReplicationController)
if !ok { if !ok {
glog.Errorf("unexpected object: %#v", event.Object) glog.Errorf("unexpected object: %#v", event.Object)
@ -125,7 +125,7 @@ func (rm *ReplicationManager) watchControllers(resourceVersion *uint64) {
*resourceVersion = rc.ResourceVersion + 1 *resourceVersion = rc.ResourceVersion + 1
// Sync even if this is a deletion event, to ensure that we leave // Sync even if this is a deletion event, to ensure that we leave
// it in the desired state. // it in the desired state.
glog.Infof("About to sync from watch: %v", rc.ID) glog.V(4).Infof("About to sync from watch: %v", rc.ID)
rm.syncHandler(*rc) rm.syncHandler(*rc)
} }
} }
@ -153,7 +153,7 @@ func (rm *ReplicationManager) syncReplicationController(controllerSpec api.Repli
diff *= -1 diff *= -1
wait := sync.WaitGroup{} wait := sync.WaitGroup{}
wait.Add(diff) wait.Add(diff)
glog.Infof("Too few replicas, creating %d\n", diff) glog.V(2).Infof("Too few replicas, creating %d\n", diff)
for i := 0; i < diff; i++ { for i := 0; i < diff; i++ {
go func() { go func() {
defer wait.Done() defer wait.Done()
@ -162,7 +162,7 @@ func (rm *ReplicationManager) syncReplicationController(controllerSpec api.Repli
} }
wait.Wait() wait.Wait()
} else if diff > 0 { } else if diff > 0 {
glog.Infof("Too many replicas, deleting %d\n", diff) glog.V(2).Infof("Too many replicas, deleting %d\n", diff)
wait := sync.WaitGroup{} wait := sync.WaitGroup{}
wait.Add(diff) wait.Add(diff)
for i := 0; i < diff; i++ { for i := 0; i < diff; i++ {
@ -191,7 +191,7 @@ func (rm *ReplicationManager) synchronize() {
for ix := range controllerSpecs { for ix := range controllerSpecs {
go func(ix int) { go func(ix int) {
defer wg.Done() defer wg.Done()
glog.Infof("periodic sync of %v", controllerSpecs[ix].ID) glog.V(4).Infof("periodic sync of %v", controllerSpecs[ix].ID)
err := rm.syncHandler(controllerSpecs[ix]) err := rm.syncHandler(controllerSpecs[ix])
if err != nil { if err != nil {
glog.Errorf("Error synchronizing: %#v", err) glog.Errorf("Error synchronizing: %#v", err)

View File

@ -148,7 +148,7 @@ func (rl *respLogger) Addf(format string, data ...interface{}) {
// Log is intended to be called once at the end of your request handler, via defer // Log is intended to be called once at the end of your request handler, via defer
func (rl *respLogger) Log() { func (rl *respLogger) Log() {
latency := time.Since(rl.startTime) latency := time.Since(rl.startTime)
glog.Infof("%s %s: (%v) %v%v%v", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo) glog.V(2).Infof("%s %s: (%v) %v%v%v", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo)
} }
// Header implements http.ResponseWriter. // Header implements http.ResponseWriter.

View File

@ -168,9 +168,9 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
switch update.Op { switch update.Op {
case kubelet.ADD, kubelet.UPDATE: case kubelet.ADD, kubelet.UPDATE:
if update.Op == kubelet.ADD { if update.Op == kubelet.ADD {
glog.Infof("Adding new pods from source %s : %v", source, update.Pods) glog.V(4).Infof("Adding new pods from source %s : %v", source, update.Pods)
} else { } else {
glog.Infof("Updating pods from source %s : %v", source, update.Pods) glog.V(4).Infof("Updating pods from source %s : %v", source, update.Pods)
} }
filtered := filterInvalidPods(update.Pods, source) filtered := filterInvalidPods(update.Pods, source)
@ -193,7 +193,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
} }
case kubelet.REMOVE: case kubelet.REMOVE:
glog.Infof("Removing a pod %v", update) glog.V(4).Infof("Removing a pod %v", update)
for _, value := range update.Pods { for _, value := range update.Pods {
name := value.Name name := value.Name
if existing, found := pods[name]; found { if existing, found := pods[name]; found {
@ -206,7 +206,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
} }
case kubelet.SET: case kubelet.SET:
glog.Infof("Setting pods for source %s : %v", source, update) glog.V(4).Infof("Setting pods for source %s : %v", source, update)
// Clear the old map entries by just creating a new map // Clear the old map entries by just creating a new map
oldPods := pods oldPods := pods
pods = make(map[string]*kubelet.Pod) pods = make(map[string]*kubelet.Pod)
@ -238,7 +238,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
} }
default: default:
glog.Infof("Received invalid update type: %v", update) glog.Warningf("Received invalid update type: %v", update)
} }
@ -259,7 +259,7 @@ func filterInvalidPods(pods []kubelet.Pod, source string) (filtered []*kubelet.P
errors = append(errors, errs...) errors = append(errors, errs...)
} }
if len(errors) > 0 { if len(errors) > 0 {
glog.Warningf("Pod %d from %s failed validation, ignoring: %v", i+1, source, errors) glog.Warningf("Pod %d (%s) from %s failed validation, ignoring: %v", i+1, pods[i].Name, source, errors)
continue continue
} }
filtered = append(filtered, &pods[i]) filtered = append(filtered, &pods[i])

View File

@ -54,7 +54,7 @@ func NewSourceEtcd(key string, client tools.EtcdClient, updates chan<- interface
helper: helper, helper: helper,
updates: updates, updates: updates,
} }
glog.Infof("Watching etcd for %s", key) glog.V(1).Infof("Watching etcd for %s", key)
go util.Forever(source.run, time.Second) go util.Forever(source.run, time.Second)
return source return source
} }
@ -78,7 +78,7 @@ func (s *SourceEtcd) run() {
continue continue
} }
glog.Infof("Received state from etcd watch: %+v", pods) glog.V(4).Infof("Received state from etcd watch: %+v", pods)
s.updates <- kubelet.PodUpdate{pods, kubelet.SET} s.updates <- kubelet.PodUpdate{pods, kubelet.SET}
} }
} }

View File

@ -45,7 +45,7 @@ func NewSourceFile(path string, period time.Duration, updates chan<- interface{}
path: path, path: path,
updates: updates, updates: updates,
} }
glog.Infof("Watching file %s", path) glog.V(1).Infof("Watching file %s", path)
go util.Forever(config.run, period) go util.Forever(config.run, period)
return config return config
} }

View File

@ -44,7 +44,7 @@ func NewSourceURL(url string, period time.Duration, updates chan<- interface{})
updates: updates, updates: updates,
data: nil, data: nil,
} }
glog.Infof("Watching URL %s", url) glog.V(1).Infof("Watching URL %s", url)
go util.Forever(config.run, period) go util.Forever(config.run, period)
return config return config
} }

View File

@ -75,11 +75,11 @@ func NewDockerPuller(client DockerInterface) DockerPuller {
if err == nil { if err == nil {
cfg.addToKeyring(dp.keyring) cfg.addToKeyring(dp.keyring)
} else { } else {
glog.Errorf("Unable to parse docker config file: %v", err) glog.Errorf("Unable to parse Docker config file: %v", err)
} }
if dp.keyring.count() == 0 { if dp.keyring.count() == 0 {
glog.Infof("Continuing with empty docker keyring") glog.V(1).Infof("Continuing with empty Docker keyring")
} }
return dp return dp
@ -348,7 +348,7 @@ func ParseDockerName(name string) (podFullName, uuid, containerName string, hash
var err error var err error
hash, err = strconv.ParseUint(pieces[1], 16, 32) hash, err = strconv.ParseUint(pieces[1], 16, 32)
if err != nil { if err != nil {
glog.Infof("invalid container hash: %s", pieces[1]) glog.Warningf("invalid container hash: %s", pieces[1])
} }
} }
} }

View File

@ -220,7 +220,7 @@ func makePortsAndBindings(container *api.Container) (map[docker.Port]struct{}, m
case "TCP": case "TCP":
protocol = "/tcp" protocol = "/tcp"
default: default:
glog.Infof("Unknown protocol '%s': defaulting to TCP", port.Protocol) glog.Warningf("Unknown protocol '%s': defaulting to TCP", port.Protocol)
protocol = "/tcp" protocol = "/tcp"
} }
dockerPort := docker.Port(strconv.Itoa(interiorPort) + protocol) dockerPort := docker.Port(strconv.Itoa(interiorPort) + protocol)
@ -345,7 +345,7 @@ func (kl *Kubelet) killContainer(dockerContainer *docker.APIContainers) error {
} }
func (kl *Kubelet) killContainerByID(ID, name string) error { func (kl *Kubelet) killContainerByID(ID, name string) error {
glog.Infof("Killing: %s", ID) glog.V(2).Infof("Killing: %s", ID)
err := kl.dockerClient.StopContainer(ID, 10) err := kl.dockerClient.StopContainer(ID, 10)
if len(name) == 0 { if len(name) == 0 {
return err return err
@ -425,7 +425,7 @@ func (kl *Kubelet) syncPod(pod *Pod, dockerContainers dockertools.DockerContaine
if networkDockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uuid, networkContainerName); found { if networkDockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uuid, networkContainerName); found {
netID = dockertools.DockerID(networkDockerContainer.ID) netID = dockertools.DockerID(networkDockerContainer.ID)
} else { } else {
glog.Infof("Network container doesn't exist, creating") glog.V(3).Infof("Network container doesn't exist, creating")
count, err := kl.deleteAllContainers(pod, podFullName, dockerContainers) count, err := kl.deleteAllContainers(pod, podFullName, dockerContainers)
if err != nil { if err != nil {
return err return err
@ -468,7 +468,7 @@ func (kl *Kubelet) syncPod(pod *Pod, dockerContainers dockertools.DockerContaine
expectedHash := dockertools.HashContainer(&container) expectedHash := dockertools.HashContainer(&container)
if dockerContainer, found, hash := dockerContainers.FindPodContainer(podFullName, uuid, container.Name); found { if dockerContainer, found, hash := dockerContainers.FindPodContainer(podFullName, uuid, container.Name); found {
containerID := dockertools.DockerID(dockerContainer.ID) containerID := dockertools.DockerID(dockerContainer.ID)
glog.V(1).Infof("pod %s container %s exists as %v", podFullName, container.Name, containerID) glog.V(3).Infof("pod %s container %s exists as %v", podFullName, container.Name, containerID)
// look for changes in the container. // look for changes in the container.
if hash == 0 || hash == expectedHash { if hash == 0 || hash == expectedHash {
@ -485,7 +485,7 @@ func (kl *Kubelet) syncPod(pod *Pod, dockerContainers dockertools.DockerContaine
} }
glog.V(1).Infof("pod %s container %s is unhealthy.", podFullName, container.Name, healthy) glog.V(1).Infof("pod %s container %s is unhealthy.", podFullName, container.Name, healthy)
} else { } else {
glog.V(1).Infof("container hash changed %d vs %d.", hash, expectedHash) glog.V(3).Infof("container hash changed %d vs %d.", hash, expectedHash)
} }
if err := kl.killContainer(dockerContainer); err != nil { if err := kl.killContainer(dockerContainer); err != nil {
glog.V(1).Infof("Failed to kill container %s: %v", dockerContainer.ID, err) glog.V(1).Infof("Failed to kill container %s: %v", dockerContainer.ID, err)
@ -503,21 +503,21 @@ func (kl *Kubelet) syncPod(pod *Pod, dockerContainers dockertools.DockerContaine
if len(recentContainers) > 0 && pod.Manifest.RestartPolicy.Always == nil { if len(recentContainers) > 0 && pod.Manifest.RestartPolicy.Always == nil {
if pod.Manifest.RestartPolicy.Never != nil { if pod.Manifest.RestartPolicy.Never != nil {
glog.Infof("Already ran container with name %s--%s--%s, do nothing", glog.V(3).Infof("Already ran container with name %s--%s--%s, do nothing",
podFullName, uuid, container.Name) podFullName, uuid, container.Name)
continue continue
} }
if pod.Manifest.RestartPolicy.OnFailure != nil { if pod.Manifest.RestartPolicy.OnFailure != nil {
// Check the exit code of last run // Check the exit code of last run
if recentContainers[0].State.ExitCode == 0 { if recentContainers[0].State.ExitCode == 0 {
glog.Infof("Already successfully ran container with name %s--%s--%s, do nothing", glog.V(3).Infof("Already successfully ran container with name %s--%s--%s, do nothing",
podFullName, uuid, container.Name) podFullName, uuid, container.Name)
continue continue
} }
} }
} }
glog.Infof("Container with name %s--%s--%s doesn't exist, creating %#v", podFullName, uuid, container.Name, container) glog.V(3).Infof("Container with name %s--%s--%s doesn't exist, creating %#v", podFullName, uuid, container.Name, container)
if err := kl.dockerPuller.Pull(container.Image); err != nil { if err := kl.dockerPuller.Pull(container.Image); err != nil {
glog.Errorf("Failed to pull image %s: %v skipping pod %s container %s.", container.Image, err, podFullName, container.Name) glog.Errorf("Failed to pull image %s: %v skipping pod %s container %s.", container.Image, err, podFullName, container.Name)
continue continue
@ -579,11 +579,11 @@ func (kl *Kubelet) reconcileVolumes(pods []Pod) error {
if _, ok := desiredVolumes[name]; !ok { if _, ok := desiredVolumes[name]; !ok {
//TODO (jonesdl) We should somehow differentiate between volumes that are supposed //TODO (jonesdl) We should somehow differentiate between volumes that are supposed
//to be deleted and volumes that are leftover after a crash. //to be deleted and volumes that are leftover after a crash.
glog.Infof("Orphaned volume %s found, tearing down volume", name) glog.Warningf("Orphaned volume %s found, tearing down volume", name)
//TODO (jonesdl) This should not block other kubelet synchronization procedures //TODO (jonesdl) This should not block other kubelet synchronization procedures
err := vol.TearDown() err := vol.TearDown()
if err != nil { if err != nil {
glog.Infof("Could not tear down volume %s (%s)", name, err) glog.Errorf("Could not tear down volume %s (%s)", name, err)
} }
} }
} }
@ -592,7 +592,7 @@ func (kl *Kubelet) reconcileVolumes(pods []Pod) error {
// SyncPods synchronizes the configured list of pods (desired state) with the host current state. // SyncPods synchronizes the configured list of pods (desired state) with the host current state.
func (kl *Kubelet) SyncPods(pods []Pod) error { func (kl *Kubelet) SyncPods(pods []Pod) error {
glog.Infof("Desired [%s]: %+v", kl.hostname, pods) glog.V(4).Infof("Desired [%s]: %+v", kl.hostname, pods)
var err error var err error
desiredContainers := make(map[podContainer]empty) desiredContainers := make(map[podContainer]empty)
@ -675,13 +675,13 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
case u := <-updates: case u := <-updates:
switch u.Op { switch u.Op {
case SET: case SET:
glog.Infof("Containers changed [%s]", kl.hostname) glog.V(3).Infof("Containers changed [%s]", kl.hostname)
pods = u.Pods pods = u.Pods
pods = filterHostPortConflicts(pods) pods = filterHostPortConflicts(pods)
case UPDATE: case UPDATE:
//TODO: implement updates of containers //TODO: implement updates of containers
glog.Infof("Containers updated, not implemented [%s]", kl.hostname) glog.Warningf("Containers updated, not implemented [%s]", kl.hostname)
continue continue
default: default:

View File

@ -48,7 +48,7 @@ type Server struct {
// ListenAndServeKubeletServer initializes a server to respond to HTTP network requests on the Kubelet. // ListenAndServeKubeletServer initializes a server to respond to HTTP network requests on the Kubelet.
func ListenAndServeKubeletServer(host HostInterface, updates chan<- interface{}, address string, port uint) { func ListenAndServeKubeletServer(host HostInterface, updates chan<- interface{}, address string, port uint) {
glog.Infof("Starting to listen on %s:%d", address, port) glog.V(1).Infof("Starting to listen on %s:%d", address, port)
handler := NewServer(host, updates) handler := NewServer(host, updates)
s := &http.Server{ s := &http.Server{
Addr: net.JoinHostPort(address, strconv.FormatUint(uint64(port), 10)), Addr: net.JoinHostPort(address, strconv.FormatUint(uint64(port), 10)),

View File

@ -101,7 +101,7 @@ func handleServicesWatch(resourceVersion *uint64, ch <-chan watch.Event, updates
select { select {
case event, ok := <-ch: case event, ok := <-ch:
if !ok { if !ok {
glog.V(2).Infof("WatchServices channel closed") glog.V(4).Infof("WatchServices channel closed")
return return
} }
@ -150,7 +150,7 @@ func handleEndpointsWatch(resourceVersion *uint64, ch <-chan watch.Event, update
select { select {
case event, ok := <-ch: case event, ok := <-ch:
if !ok { if !ok {
glog.V(2).Infof("WatchEndpoints channel closed") glog.V(4).Infof("WatchEndpoints channel closed")
return return
} }

View File

@ -125,24 +125,24 @@ func (s *endpointsStore) Merge(source string, change interface{}) error {
update := change.(EndpointsUpdate) update := change.(EndpointsUpdate)
switch update.Op { switch update.Op {
case ADD: case ADD:
glog.Infof("Adding new endpoint from source %s : %v", source, update.Endpoints) glog.V(4).Infof("Adding new endpoint from source %s : %v", source, update.Endpoints)
for _, value := range update.Endpoints { for _, value := range update.Endpoints {
endpoints[value.ID] = value endpoints[value.ID] = value
} }
case REMOVE: case REMOVE:
glog.Infof("Removing an endpoint %v", update) glog.V(4).Infof("Removing an endpoint %v", update)
for _, value := range update.Endpoints { for _, value := range update.Endpoints {
delete(endpoints, value.ID) delete(endpoints, value.ID)
} }
case SET: case SET:
glog.Infof("Setting endpoints %v", update) glog.V(4).Infof("Setting endpoints %v", update)
// Clear the old map entries by just creating a new map // Clear the old map entries by just creating a new map
endpoints = make(map[string]api.Endpoints) endpoints = make(map[string]api.Endpoints)
for _, value := range update.Endpoints { for _, value := range update.Endpoints {
endpoints[value.ID] = value endpoints[value.ID] = value
} }
default: default:
glog.Infof("Received invalid update type: %v", update) glog.V(4).Infof("Received invalid update type: %v", update)
} }
s.endpoints[source] = endpoints s.endpoints[source] = endpoints
s.endpointLock.Unlock() s.endpointLock.Unlock()
@ -220,24 +220,24 @@ func (s *serviceStore) Merge(source string, change interface{}) error {
update := change.(ServiceUpdate) update := change.(ServiceUpdate)
switch update.Op { switch update.Op {
case ADD: case ADD:
glog.Infof("Adding new service from source %s : %v", source, update.Services) glog.V(4).Infof("Adding new service from source %s : %v", source, update.Services)
for _, value := range update.Services { for _, value := range update.Services {
services[value.ID] = value services[value.ID] = value
} }
case REMOVE: case REMOVE:
glog.Infof("Removing a service %v", update) glog.V(4).Infof("Removing a service %v", update)
for _, value := range update.Services { for _, value := range update.Services {
delete(services, value.ID) delete(services, value.ID)
} }
case SET: case SET:
glog.Infof("Setting services %v", update) glog.V(4).Infof("Setting services %v", update)
// Clear the old map entries by just creating a new map // Clear the old map entries by just creating a new map
services = make(map[string]api.Service) services = make(map[string]api.Service)
for _, value := range update.Services { for _, value := range update.Services {
services[value.ID] = value services[value.ID] = value
} }
default: default:
glog.Infof("Received invalid update type: %v", update) glog.V(4).Infof("Received invalid update type: %v", update)
} }
s.services[source] = services s.services[source] = services
s.serviceLock.Unlock() s.serviceLock.Unlock()

View File

@ -121,7 +121,7 @@ func (s ConfigSourceEtcd) GetServices() ([]api.Service, []api.Endpoints, error)
response, err := s.client.Get(registryRoot+"/specs", true, false) response, err := s.client.Get(registryRoot+"/specs", true, false)
if err != nil { if err != nil {
if tools.IsEtcdNotFound(err) { if tools.IsEtcdNotFound(err) {
glog.V(1).Infof("Failed to get the key %s: %v", registryRoot, err) glog.V(4).Infof("Failed to get the key %s: %v", registryRoot, err)
} else { } else {
glog.Errorf("Failed to contact etcd for key %s: %v", registryRoot, err) glog.Errorf("Failed to contact etcd for key %s: %v", registryRoot, err)
} }
@ -144,12 +144,12 @@ func (s ConfigSourceEtcd) GetServices() ([]api.Service, []api.Endpoints, error)
endpoints, err := s.GetEndpoints(svc.ID) endpoints, err := s.GetEndpoints(svc.ID)
if err != nil { if err != nil {
if tools.IsEtcdNotFound(err) { if tools.IsEtcdNotFound(err) {
glog.V(1).Infof("Unable to get endpoints for %s : %v", svc.ID, err) glog.V(4).Infof("Unable to get endpoints for %s : %v", svc.ID, err)
} }
glog.Errorf("Couldn't get endpoints for %s : %v skipping", svc.ID, err) glog.Errorf("Couldn't get endpoints for %s : %v skipping", svc.ID, err)
endpoints = api.Endpoints{} endpoints = api.Endpoints{}
} else { } else {
glog.Infof("Got service: %s on localport %d mapping to: %s", svc.ID, svc.Port, endpoints) glog.V(3).Infof("Got service: %s on localport %d mapping to: %s", svc.ID, svc.Port, endpoints)
} }
retEndpoints[i] = endpoints retEndpoints[i] = endpoints
} }
@ -186,7 +186,7 @@ func etcdResponseToService(response *etcd.Response) (*api.Service, error) {
} }
func (s ConfigSourceEtcd) WatchForChanges() { func (s ConfigSourceEtcd) WatchForChanges() {
glog.Info("Setting up a watch for new services") glog.V(4).Info("Setting up a watch for new services")
watchChannel := make(chan *etcd.Response) watchChannel := make(chan *etcd.Response)
go s.client.Watch("/registry/services/", 0, true, watchChannel, nil) go s.client.Watch("/registry/services/", 0, true, watchChannel, nil)
for { for {
@ -199,7 +199,7 @@ func (s ConfigSourceEtcd) WatchForChanges() {
} }
func (s ConfigSourceEtcd) ProcessChange(response *etcd.Response) { func (s ConfigSourceEtcd) ProcessChange(response *etcd.Response) {
glog.Infof("Processing a change in service configuration... %s", *response) glog.V(4).Infof("Processing a change in service configuration... %s", *response)
// If it's a new service being added (signified by a localport being added) // If it's a new service being added (signified by a localport being added)
// then process it as such // then process it as such
@ -212,7 +212,7 @@ func (s ConfigSourceEtcd) ProcessChange(response *etcd.Response) {
return return
} }
glog.Infof("New service added/updated: %#v", service) glog.V(4).Infof("New service added/updated: %#v", service)
serviceUpdate := ServiceUpdate{Op: ADD, Services: []api.Service{*service}} serviceUpdate := ServiceUpdate{Op: ADD, Services: []api.Service{*service}}
s.serviceChannel <- serviceUpdate s.serviceChannel <- serviceUpdate
return return
@ -220,17 +220,17 @@ func (s ConfigSourceEtcd) ProcessChange(response *etcd.Response) {
if response.Action == "delete" { if response.Action == "delete" {
parts := strings.Split(response.Node.Key[1:], "/") parts := strings.Split(response.Node.Key[1:], "/")
if len(parts) == 4 { if len(parts) == 4 {
glog.Infof("Deleting service: %s", parts[3]) glog.V(4).Infof("Deleting service: %s", parts[3])
serviceUpdate := ServiceUpdate{Op: REMOVE, Services: []api.Service{{JSONBase: api.JSONBase{ID: parts[3]}}}} serviceUpdate := ServiceUpdate{Op: REMOVE, Services: []api.Service{{JSONBase: api.JSONBase{ID: parts[3]}}}}
s.serviceChannel <- serviceUpdate s.serviceChannel <- serviceUpdate
return return
} }
glog.Infof("Unknown service delete: %#v", parts) glog.Warningf("Unknown service delete: %#v", parts)
} }
} }
func (s ConfigSourceEtcd) ProcessEndpointResponse(response *etcd.Response) { func (s ConfigSourceEtcd) ProcessEndpointResponse(response *etcd.Response) {
glog.Infof("Processing a change in endpoint configuration... %s", *response) glog.V(4).Infof("Processing a change in endpoint configuration... %s", *response)
var endpoints api.Endpoints var endpoints api.Endpoints
err := latest.Codec.DecodeInto([]byte(response.Node.Value), &endpoints) err := latest.Codec.DecodeInto([]byte(response.Node.Value), &endpoints)
if err != nil { if err != nil {

View File

@ -72,7 +72,7 @@ func NewConfigSourceFile(filename string, serviceChannel chan ServiceUpdate, end
// Run begins watching the config file. // Run begins watching the config file.
func (s ConfigSourceFile) Run() { func (s ConfigSourceFile) Run() {
glog.Infof("Watching file %s", s.filename) glog.V(1).Infof("Watching file %s", s.filename)
var lastData []byte var lastData []byte
var lastServices []api.Service var lastServices []api.Service
var lastEndpoints []api.Endpoints var lastEndpoints []api.Endpoints

View File

@ -90,14 +90,14 @@ func (tcp *tcpProxySocket) ProxyLoop(service string, proxier *Proxier) {
glog.Errorf("Accept failed: %v", err) glog.Errorf("Accept failed: %v", err)
continue continue
} }
glog.Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) glog.V(2).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
endpoint, err := proxier.loadBalancer.NextEndpoint(service, inConn.RemoteAddr()) endpoint, err := proxier.loadBalancer.NextEndpoint(service, inConn.RemoteAddr())
if err != nil { if err != nil {
glog.Errorf("Couldn't find an endpoint for %s %v", service, err) glog.Errorf("Couldn't find an endpoint for %s %v", service, err)
inConn.Close() inConn.Close()
continue continue
} }
glog.Infof("Mapped service %s to endpoint %s", service, endpoint) glog.V(3).Infof("Mapped service %s to endpoint %s", service, endpoint)
// TODO: This could spin up a new goroutine to make the outbound connection, // TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic. // and keep accepting inbound traffic.
outConn, err := net.DialTimeout("tcp", endpoint, endpointDialTimeout) outConn, err := net.DialTimeout("tcp", endpoint, endpointDialTimeout)
@ -116,7 +116,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service string, proxier *Proxier) {
func proxyTCP(in, out *net.TCPConn) { func proxyTCP(in, out *net.TCPConn) {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(2) wg.Add(2)
glog.Infof("Creating proxy between %v <-> %v <-> %v <-> %v", glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr()) in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
go copyBytes(in, out, &wg) go copyBytes(in, out, &wg)
go copyBytes(out, in, &wg) go copyBytes(out, in, &wg)
@ -127,7 +127,7 @@ func proxyTCP(in, out *net.TCPConn) {
func copyBytes(in, out *net.TCPConn, wg *sync.WaitGroup) { func copyBytes(in, out *net.TCPConn, wg *sync.WaitGroup) {
defer wg.Done() defer wg.Done()
glog.Infof("Copying from %v <-> %v <-> %v <-> %v", glog.V(4).Infof("Copying from %v <-> %v <-> %v <-> %v",
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr()) in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
if _, err := io.Copy(in, out); err != nil { if _, err := io.Copy(in, out); err != nil {
glog.Errorf("I/O error: %v", err) glog.Errorf("I/O error: %v", err)
@ -176,7 +176,7 @@ func (udp *udpProxySocket) ProxyLoop(service string, proxier *Proxier) {
if err != nil { if err != nil {
if e, ok := err.(net.Error); ok { if e, ok := err.(net.Error); ok {
if e.Temporary() { if e.Temporary() {
glog.Infof("ReadFrom had a temporary failure: %v", err) glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
continue continue
} }
} }
@ -214,13 +214,13 @@ func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, cliAddr ne
if !found { if !found {
// TODO: This could spin up a new goroutine to make the outbound connection, // TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic. // and keep accepting inbound traffic.
glog.Infof("New UDP connection from %s", cliAddr) glog.V(2).Infof("New UDP connection from %s", cliAddr)
endpoint, err := proxier.loadBalancer.NextEndpoint(service, cliAddr) endpoint, err := proxier.loadBalancer.NextEndpoint(service, cliAddr)
if err != nil { if err != nil {
glog.Errorf("Couldn't find an endpoint for %s %v", service, err) glog.Errorf("Couldn't find an endpoint for %s %v", service, err)
return nil, err return nil, err
} }
glog.Infof("Mapped service %s to endpoint %s", service, endpoint) glog.V(4).Infof("Mapped service %s to endpoint %s", service, endpoint)
svrConn, err = net.DialTimeout("udp", endpoint, endpointDialTimeout) svrConn, err = net.DialTimeout("udp", endpoint, endpointDialTimeout)
if err != nil { if err != nil {
// TODO: Try another endpoint? // TODO: Try another endpoint?
@ -269,7 +269,7 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ
func logTimeout(err error) bool { func logTimeout(err error) bool {
if e, ok := err.(net.Error); ok { if e, ok := err.(net.Error); ok {
if e.Timeout() { if e.Timeout() {
glog.Infof("connection to endpoint closed due to inactivity") glog.V(1).Infof("connection to endpoint closed due to inactivity")
return true return true
} }
} }
@ -329,7 +329,7 @@ func (proxier *Proxier) stopProxyInternal(service string, info *serviceInfo) err
if !info.setActive(false) { if !info.setActive(false) {
return nil return nil
} }
glog.Infof("Removing service: %s", service) glog.V(3).Infof("Removing service: %s", service)
delete(proxier.serviceMap, service) delete(proxier.serviceMap, service)
return info.socket.Close() return info.socket.Close()
} }
@ -375,7 +375,7 @@ func (proxier *Proxier) addServiceOnUnusedPort(service, protocol string, timeout
} }
func (proxier *Proxier) startAccepting(service string, sock proxySocket) { func (proxier *Proxier) startAccepting(service string, sock proxySocket) {
glog.Infof("Listening for %s on %s:%s", service, sock.Addr().Network(), sock.Addr().String()) glog.V(1).Infof("Listening for %s on %s:%s", service, sock.Addr().Network(), sock.Addr().String())
go func(service string, proxier *Proxier) { go func(service string, proxier *Proxier) {
defer util.HandleCrash() defer util.HandleCrash()
sock.ProxyLoop(service, proxier) sock.ProxyLoop(service, proxier)
@ -389,7 +389,7 @@ const udpIdleTimeout = 1 * time.Minute
// Active service proxies are reinitialized if found in the update set or // Active service proxies are reinitialized if found in the update set or
// shutdown if missing from the update set. // shutdown if missing from the update set.
func (proxier *Proxier) OnUpdate(services []api.Service) { func (proxier *Proxier) OnUpdate(services []api.Service) {
glog.Infof("Received update notice: %+v", services) glog.V(4).Infof("Received update notice: %+v", services)
activeServices := util.StringSet{} activeServices := util.StringSet{}
for _, service := range services { for _, service := range services {
activeServices.Insert(service.ID) activeServices.Insert(service.ID)
@ -404,7 +404,7 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
glog.Errorf("error stopping %s: %v", service.ID, err) glog.Errorf("error stopping %s: %v", service.ID, err)
} }
} }
glog.Infof("Adding a new service %s on %s port %d", service.ID, service.Protocol, service.Port) glog.V(3).Infof("Adding a new service %s on %s port %d", service.ID, service.Protocol, service.Port)
sock, err := newProxySocket(service.Protocol, proxier.address, service.Port) sock, err := newProxySocket(service.Protocol, proxier.address, service.Port)
if err != nil { if err != nil {
glog.Errorf("Failed to get a socket for %s: %+v", service.ID, err) glog.Errorf("Failed to get a socket for %s: %+v", service.ID, err)

View File

@ -101,7 +101,7 @@ func (lb *LoadBalancerRR) OnUpdate(endpoints []api.Endpoints) {
existingEndpoints, exists := lb.endpointsMap[endpoint.ID] existingEndpoints, exists := lb.endpointsMap[endpoint.ID]
validEndpoints := filterValidEndpoints(endpoint.Endpoints) validEndpoints := filterValidEndpoints(endpoint.Endpoints)
if !exists || !reflect.DeepEqual(existingEndpoints, validEndpoints) { if !exists || !reflect.DeepEqual(existingEndpoints, validEndpoints) {
glog.Infof("LoadBalancerRR: Setting endpoints for %s to %+v", endpoint.ID, endpoint.Endpoints) glog.V(3).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", endpoint.ID, endpoint.Endpoints)
lb.endpointsMap[endpoint.ID] = validEndpoints lb.endpointsMap[endpoint.ID] = validEndpoints
// Reset the round-robin index. // Reset the round-robin index.
lb.rrIndex[endpoint.ID] = 0 lb.rrIndex[endpoint.ID] = 0
@ -111,7 +111,7 @@ func (lb *LoadBalancerRR) OnUpdate(endpoints []api.Endpoints) {
// Remove endpoints missing from the update. // Remove endpoints missing from the update.
for k, v := range lb.endpointsMap { for k, v := range lb.endpointsMap {
if _, exists := registeredEndpoints[k]; !exists { if _, exists := registeredEndpoints[k]; !exists {
glog.Infof("LoadBalancerRR: Removing endpoints for %s -> %+v", k, v) glog.V(3).Infof("LoadBalancerRR: Removing endpoints for %s -> %+v", k, v)
delete(lb.endpointsMap, k) delete(lb.endpointsMap, k)
} }
} }

View File

@ -218,7 +218,7 @@ func (r *Registry) DeletePod(podID string) error {
// This really shouldn't happen, it indicates something is broken, and likely // This really shouldn't happen, it indicates something is broken, and likely
// there is a lost pod somewhere. // there is a lost pod somewhere.
// However it is "deleted" so log it and move on // However it is "deleted" so log it and move on
glog.Infof("Couldn't find: %s in %#v", podID, manifests) glog.Warningf("Couldn't find: %s in %#v", podID, manifests)
} }
manifests.Items = newManifests manifests.Items = newManifests
return manifests, nil return manifests, nil

View File

@ -71,9 +71,7 @@ func (factory *ConfigFactory) Create() *scheduler.Config {
Binder: &binder{factory.Client}, Binder: &binder{factory.Client},
NextPod: func() *api.Pod { NextPod: func() *api.Pod {
pod := podQueue.Pop().(*api.Pod) pod := podQueue.Pop().(*api.Pod)
// TODO: Remove or reduce verbosity by sep 6th, 2014. Leave until then to glog.V(2).Infof("About to try and schedule pod %v\n"+
// make it easy to find scheduling problems.
glog.Infof("About to try and schedule pod %v\n"+
"\tknown minions: %v\n"+ "\tknown minions: %v\n"+
"\tknown scheduled pods: %v\n", "\tknown scheduled pods: %v\n",
pod.ID, minionCache.Contains(), podCache.Contains()) pod.ID, minionCache.Contains(), podCache.Contains())
@ -229,8 +227,6 @@ type binder struct {
// Bind just does a POST binding RPC. // Bind just does a POST binding RPC.
func (b *binder) Bind(binding *api.Binding) error { func (b *binder) Bind(binding *api.Binding) error {
// TODO: Remove or reduce verbosity by sep 6th, 2014. Leave until then to glog.V(2).Infof("Attempting to bind %v to %v", binding.PodID, binding.Host)
// make it easy to find scheduling problems.
glog.Infof("Attempting to bind %v to %v", binding.PodID, binding.Host)
return b.Post().Path("bindings").Body(binding).Do().Error() return b.Post().Path("bindings").Body(binding).Do().Error()
} }