diff --git a/contrib/mesos/pkg/scheduler/components/framework/framework.go b/contrib/mesos/pkg/scheduler/components/framework/framework.go index 38c161abd25..36c8e118370 100644 --- a/contrib/mesos/pkg/scheduler/components/framework/framework.go +++ b/contrib/mesos/pkg/scheduler/components/framework/framework.go @@ -548,7 +548,7 @@ func (k *framework) reconcileTerminalTask(driver bindings.SchedulerDriver, taskS //is unrecognized by the master at this point, so KillTask is not guaranteed //to do anything. The underlying driver transport may be able to send a //FrameworkMessage directly to the slave to terminate the task. - log.V(2).Info("forwarding TASK_LOST message to executor %v on slave %v", taskStatus.ExecutorId, taskStatus.SlaveId) + log.V(2).Infof("forwarding TASK_LOST message to executor %v on slave %v", taskStatus.ExecutorId, taskStatus.SlaveId) data := fmt.Sprintf("%s:%s", messages.TaskLost, task.ID) //TODO(jdef) use a real message type if _, err := driver.SendFrameworkMessage(taskStatus.ExecutorId, taskStatus.SlaveId, data); err != nil { log.Error(err.Error()) diff --git a/contrib/mesos/pkg/scheduler/components/podreconciler/podreconciler.go b/contrib/mesos/pkg/scheduler/components/podreconciler/podreconciler.go index 72bd78a183e..a01417fe734 100644 --- a/contrib/mesos/pkg/scheduler/components/podreconciler/podreconciler.go +++ b/contrib/mesos/pkg/scheduler/components/podreconciler/podreconciler.go @@ -76,7 +76,7 @@ func (s *podReconciler) Reconcile(t *podtask.T) { } else { //TODO(jdef) other errors should probably trigger a retry (w/ backoff). //For now, drop the pod on the floor - log.Warning("aborting reconciliation for pod %v: %v", t.Pod.Name, err) + log.Warningf("aborting reconciliation for pod %v: %v", t.Pod.Name, err) } return } diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index ce43de1f0e8..be4d63f9418 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -1413,7 +1413,7 @@ func (c *Cloud) DetachDisk(diskName string, instanceName string) (string, error) } if !alreadyAttached { - glog.Warning("DetachDisk called on non-attached disk: %s", diskName) + glog.Warningf("DetachDisk called on non-attached disk: %s", diskName) // TODO: Continue? Tolerate non-attached error in DetachVolume? } diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index c647e049e9a..562e1ae5e4e 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -434,9 +434,9 @@ func (i *Instances) InstanceID(name string) (string, error) { } if mvm.Summary.Config.Template == false { - glog.Warning("VM %s, is not in %s state", name, ActivePowerState) + glog.Warningf("VM %s, is not in %s state", name, ActivePowerState) } else { - glog.Warning("VM %s, is a template", name) + glog.Warningf("VM %s, is a template", name) } return "", cloudprovider.InstanceNotFound diff --git a/pkg/dns/dns.go b/pkg/dns/dns.go index 8d530ce262b..f9aebd2e65e 100644 --- a/pkg/dns/dns.go +++ b/pkg/dns/dns.go @@ -247,7 +247,7 @@ func (kd *KubeDNS) newService(obj interface{}) { return } if len(service.Spec.Ports) == 0 { - glog.Warning("Unexpected service with no ports, this should not have happend: %v", service) + glog.Warningf("Unexpected service with no ports, this should not have happend: %v", service) } kd.newPortalService(service) } diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index 97d5d6ea722..d5264223a53 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -539,7 +539,7 @@ func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue break } if errors.IsNotFound(err) { - glog.Warning("A pod %v no longer exists", podID) + glog.Warningf("A pod %v no longer exists", podID) return } glog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err) diff --git a/test/integration/persistentvolumes/persistent_volumes_test.go b/test/integration/persistentvolumes/persistent_volumes_test.go index 5e0d048c3a4..17d1f07cd37 100644 --- a/test/integration/persistentvolumes/persistent_volumes_test.go +++ b/test/integration/persistentvolumes/persistent_volumes_test.go @@ -246,7 +246,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)} claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc) if err != nil { - t.Fatal("Error creating newPvc: %v", err) + t.Fatalf("Error creating newPvc: %v", err) } claims = append(claims, claim) } diff --git a/vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go b/vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go index 130172a05e6..29737434bfd 100644 --- a/vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go +++ b/vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go @@ -80,7 +80,7 @@ func parseThinLsOutput(output []byte) map[string]uint64 { deviceID := fields[0] usage, err := strconv.ParseUint(fields[1], 10, 64) if err != nil { - glog.Warning("unexpected error parsing thin_ls output: %v", err) + glog.Warningf("unexpected error parsing thin_ls output: %v", err) continue } diff --git a/vendor/github.com/mesos/mesos-go/executor/executor.go b/vendor/github.com/mesos/mesos-go/executor/executor.go index 06c0c9317eb..6a2f2945fe5 100644 --- a/vendor/github.com/mesos/mesos-go/executor/executor.go +++ b/vendor/github.com/mesos/mesos-go/executor/executor.go @@ -292,7 +292,7 @@ func (driver *MesosExecutorDriver) recoveryTimedOut(connection string) { } // ensure that connection ID's match otherwise we've been re-registered if connection == driver.connection.String() { - log.Info("recovery timeout of %v exceeded; shutting down", driver.recoveryTimeout) + log.Infof("recovery timeout of %v exceeded; shutting down", driver.recoveryTimeout) driver.shutdown(driver.context(), nil, nil) } }