diff --git a/cluster/addons/dns/kube2sky/kube2sky.go b/cluster/addons/dns/kube2sky/kube2sky.go index 9c0c7a27218..d37b327ab46 100644 --- a/cluster/addons/dns/kube2sky/kube2sky.go +++ b/cluster/addons/dns/kube2sky/kube2sky.go @@ -336,7 +336,7 @@ func (ks *kube2sky) generateSRVRecord(subdomain, portSegment, recordName, cName func (ks *kube2sky) addDNS(subdomain string, service *kapi.Service) error { if len(service.Spec.Ports) == 0 { - glog.Fatalf("unexpected service with no ports: %v", service) + glog.Fatalf("Unexpected service with no ports: %v", service) } // if ClusterIP is not set, a DNS entry should not be created if !kapi.IsServiceIPSet(service) { diff --git a/contrib/mesos/pkg/election/etcd_master.go b/contrib/mesos/pkg/election/etcd_master.go index b740c2f81ac..8eb4fdb69b3 100644 --- a/contrib/mesos/pkg/election/etcd_master.go +++ b/contrib/mesos/pkg/election/etcd_master.go @@ -71,7 +71,7 @@ func (e *etcdMasterElector) run(path, id string) { Object: Master(m), } case e := <-errors: - glog.Errorf("error in election: %v", e) + glog.Errorf("Error in election: %v", e) } } } diff --git a/contrib/mesos/pkg/executor/suicide_test.go b/contrib/mesos/pkg/executor/suicide_test.go index cdb99f584d2..706ad2876e2 100644 --- a/contrib/mesos/pkg/executor/suicide_test.go +++ b/contrib/mesos/pkg/executor/suicide_test.go @@ -57,7 +57,7 @@ func (t *suicideTracker) Next(d time.Duration, driver bindings.ExecutorDriver, f func (t *suicideTracker) makeJumper(_ jumper) jumper { return jumper(func(driver bindings.ExecutorDriver, cancel <-chan struct{}) { - glog.Warningln("jumping?!") + glog.Warningln("Jumping?!") if t.jumps != nil { atomic.AddUint32(t.jumps, 1) } @@ -103,7 +103,7 @@ func TestSuicide_WithTasks(t *testing.T) { k.tasks["foo"] = &kuberTask{} // prevent suicide attempts from succeeding // call reset with a nil timer - glog.Infoln("resetting suicide watch with 1 task") + glog.Infoln("Resetting suicide watch with 1 task") select { case <-k.resetSuicideWatch(nil): tracker = k.suicideWatch.(*suicideTracker) @@ -125,7 +125,7 @@ func TestSuicide_WithTasks(t *testing.T) { suicideStart := time.Now() // reset the suicide watch, which should actually start a timer now - glog.Infoln("resetting suicide watch with 0 tasks") + glog.Infoln("Resetting suicide watch with 0 tasks") select { case <-k.resetSuicideWatch(nil): tracker = k.suicideWatch.(*suicideTracker) @@ -147,7 +147,7 @@ func TestSuicide_WithTasks(t *testing.T) { k.lock.Unlock() // reset the suicide watch, which should stop the existing timer - glog.Infoln("resetting suicide watch with 1 task") + glog.Infoln("Resetting suicide watch with 1 task") select { case <-k.resetSuicideWatch(nil): tracker = k.suicideWatch.(*suicideTracker) @@ -169,7 +169,7 @@ func TestSuicide_WithTasks(t *testing.T) { k.lock.Unlock() // reset the suicide watch, which should reset a stopped timer - glog.Infoln("resetting suicide watch with 0 tasks") + glog.Infoln("Resetting suicide watch with 0 tasks") select { case <-k.resetSuicideWatch(nil): tracker = k.suicideWatch.(*suicideTracker) @@ -192,6 +192,6 @@ func TestSuicide_WithTasks(t *testing.T) { if j := atomic.LoadUint32(&jumps); j != 1 { t.Fatalf("expected 1 jumps instead of %d since stop was called", j) } else { - glog.Infoln("jumps verified") // glog so we get a timestamp + glog.Infoln("Jumps verified") // glog so we get a timestamp } } diff --git a/contrib/mesos/pkg/scheduler/service/publish.go b/contrib/mesos/pkg/scheduler/service/publish.go index a9597f3de0f..adfabfdde42 100644 --- a/contrib/mesos/pkg/scheduler/service/publish.go +++ b/contrib/mesos/pkg/scheduler/service/publish.go @@ -112,7 +112,7 @@ func (m *SchedulerServer) setEndpoints(serviceName string, ip net.IP, port int) } if !reflect.DeepEqual(e.Subsets, want) { e.Subsets = want - glog.Infof("setting endpoints for master service %q to %#v", serviceName, e) + glog.Infof("Setting endpoints for master service %q to %#v", serviceName, e) _, err = createOrUpdate(e) return err } diff --git a/contrib/mesos/pkg/service/endpoints_controller.go b/contrib/mesos/pkg/service/endpoints_controller.go index 575b5e68db6..95ebf3f1089 100644 --- a/contrib/mesos/pkg/service/endpoints_controller.go +++ b/contrib/mesos/pkg/service/endpoints_controller.go @@ -350,7 +350,7 @@ func (e *endpointController) syncService(key string) { } } if reflect.DeepEqual(currentEndpoints.Subsets, subsets) && reflect.DeepEqual(currentEndpoints.Labels, service.Labels) { - glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name) + glog.V(5).Infof("Endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name) return } newEndpoints := currentEndpoints diff --git a/pkg/client/unversioned/clientcmd/merged_client_builder.go b/pkg/client/unversioned/clientcmd/merged_client_builder.go index 42c8d1df551..2888981f9f7 100644 --- a/pkg/client/unversioned/clientcmd/merged_client_builder.go +++ b/pkg/client/unversioned/clientcmd/merged_client_builder.go @@ -86,7 +86,7 @@ func (config DeferredLoadingClientConfig) ClientConfig() (*client.Config, error) icc := inClusterClientConfig{} defaultConfig, err := DefaultClientConfig.ClientConfig() if icc.Possible() && err == nil && reflect.DeepEqual(mergedConfig, defaultConfig) { - glog.V(2).Info("no kubeconfig could be created, falling back to service account.") + glog.V(2).Info("No kubeconfig could be created, falling back to service account.") return icc.ClientConfig() } diff --git a/pkg/client/unversioned/helper.go b/pkg/client/unversioned/helper.go index a4d114787b6..aa518191b27 100644 --- a/pkg/client/unversioned/helper.go +++ b/pkg/client/unversioned/helper.go @@ -259,7 +259,7 @@ func InClusterConfig() (*Config, error) { tlsClientConfig := TLSClientConfig{} rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey if _, err := util.CertPoolFromFile(rootCAFile); err != nil { - glog.Errorf("expected to load root CA config from %s, but got err: %v", rootCAFile, err) + glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile } diff --git a/pkg/client/unversioned/request.go b/pkg/client/unversioned/request.go index ed9ff4acd9d..afe491dcbf4 100644 --- a/pkg/client/unversioned/request.go +++ b/pkg/client/unversioned/request.go @@ -309,13 +309,13 @@ type versionToResourceToFieldMapping map[string]resourceTypeToFieldMapping func (v versionToResourceToFieldMapping) filterField(apiVersion, resourceType, field, value string) (newField, newValue string, err error) { rMapping, ok := v[apiVersion] if !ok { - glog.Warningf("field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", apiVersion, resourceType, field, value) + glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", apiVersion, resourceType, field, value) return field, value, nil } newField, newValue, err = rMapping.filterField(resourceType, field, value) if err != nil { // This is only a warning until we find and fix all of the client's usages. - glog.Warningf("field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", apiVersion, resourceType, field, value) + glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", apiVersion, resourceType, field, value) return field, value, nil } return newField, newValue, nil diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 943346c40ba..3eb2a1d3d35 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -1514,7 +1514,7 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string, vpcID st } } if !ignore { - glog.Error("error creating security group: ", err) + glog.Error("Error creating security group: ", err) return "", err } time.Sleep(1 * time.Second) @@ -1617,7 +1617,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancer(name, region string, publicIP net.IP, p subnets, err := s.ec2.DescribeSubnets(request) if err != nil { - glog.Error("error describing subnets: ", err) + glog.Error("Error describing subnets: ", err) return nil, err } @@ -1625,7 +1625,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancer(name, region string, publicIP net.IP, p for _, subnet := range subnets { subnetIDs = append(subnetIDs, orEmpty(subnet.SubnetId)) if !strings.HasPrefix(orEmpty(subnet.AvailabilityZone), region) { - glog.Error("found AZ that did not match region", orEmpty(subnet.AvailabilityZone), " vs ", region) + glog.Error("Found AZ that did not match region", orEmpty(subnet.AvailabilityZone), " vs ", region) return nil, fmt.Errorf("invalid AZ for region") } // zones = append(zones, subnet.AvailabilityZone) @@ -1639,7 +1639,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancer(name, region string, publicIP net.IP, p sgDescription := "Security group for Kubernetes ELB " + name securityGroupID, err = s.ensureSecurityGroup(sgName, sgDescription, orEmpty(vpc.VpcId)) if err != nil { - glog.Error("error creating load balancer security group: ", err) + glog.Error("Error creating load balancer security group: ", err) return nil, err } @@ -1814,7 +1814,7 @@ func (s *AWSCloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalan for _, instance := range allInstances { securityGroupId := findSecurityGroupForInstance(instance) if isNilOrEmpty(securityGroupId) { - glog.Warning("ignoring instance without security group: ", orEmpty(instance.InstanceId)) + glog.Warning("Ignoring instance without security group: ", orEmpty(instance.InstanceId)) continue } @@ -1824,7 +1824,7 @@ func (s *AWSCloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalan // Compare to actual groups for _, actualGroup := range actualGroups { if isNilOrEmpty(actualGroup.GroupId) { - glog.Warning("ignoring group without ID: ", actualGroup) + glog.Warning("Ignoring group without ID: ", actualGroup) continue } @@ -1899,7 +1899,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancerDeleted(name, region string) error { // De-authorize the load balancer security group from the instances security group err = s.updateInstanceSecurityGroupsForLoadBalancer(lb, nil) if err != nil { - glog.Error("error deregistering load balancer from instance security groups: ", err) + glog.Error("Error deregistering load balancer from instance security groups: ", err) return err } } @@ -1912,7 +1912,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancerDeleted(name, region string) error { _, err = s.elb.DeleteLoadBalancer(request) if err != nil { // TODO: Check if error was because load balancer was concurrently deleted - glog.Error("error deleting load balancer: ", err) + glog.Error("Error deleting load balancer: ", err) return err } } diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index d88255e6807..d5f287e28e3 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -340,7 +340,7 @@ func translateAffinityType(affinityType api.ServiceAffinity) GCEAffinityType { case api.ServiceAffinityNone: return GCEAffinityTypeNone default: - glog.Errorf("unexpected affinity type: %v", affinityType) + glog.Errorf("Unexpected affinity type: %v", affinityType) return GCEAffinityTypeNone } } diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index a9a08524d2e..be3bf184eaf 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -123,7 +123,7 @@ func (f *ConfigFactory) Create() (*scheduler.Config, error) { // Creates a scheduler from the name of a registered algorithm provider. func (f *ConfigFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) { - glog.V(2).Infof("creating scheduler from algorithm provider '%v'", providerName) + glog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName) provider, err := GetAlgorithmProvider(providerName) if err != nil { return nil, err @@ -134,7 +134,7 @@ func (f *ConfigFactory) CreateFromProvider(providerName string) (*scheduler.Conf // Creates a scheduler from the configuration file func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) { - glog.V(2).Infof("creating scheduler from configuration: %v", policy) + glog.V(2).Infof("Creating scheduler from configuration: %v", policy) // validate the policy configuration if err := validation.ValidatePolicy(policy); err != nil { diff --git a/plugin/pkg/scheduler/factory/plugins.go b/plugin/pkg/scheduler/factory/plugins.go index 2df1193b346..9ec7001e5f7 100644 --- a/plugin/pkg/scheduler/factory/plugins.go +++ b/plugin/pkg/scheduler/factory/plugins.go @@ -271,7 +271,7 @@ var validName = regexp.MustCompile("^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$") func validateAlgorithmNameOrDie(name string) { if !validName.MatchString(name) { - glog.Fatalf("algorithm name %v does not match the name validation regexp \"%v\".", name, validName) + glog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName) } } diff --git a/plugin/pkg/scheduler/generic_scheduler.go b/plugin/pkg/scheduler/generic_scheduler.go index 7ce4775f864..649d89484a9 100644 --- a/plugin/pkg/scheduler/generic_scheduler.go +++ b/plugin/pkg/scheduler/generic_scheduler.go @@ -44,7 +44,7 @@ func (f *FitError) Error() string { var reason string // We iterate over all nodes for logging purposes, even though we only return one reason from one node for node, predicateList := range f.FailedPredicates { - glog.Infof("failed to find fit for pod %v on node %s: %s", f.Pod.Name, node, strings.Join(predicateList.List(), ",")) + glog.Infof("Failed to find fit for pod %v on node %s: %s", f.Pod.Name, node, strings.Join(predicateList.List(), ",")) if len(reason) == 0 { reason, _ = predicateList.PopAny() } @@ -195,7 +195,7 @@ func getBestHosts(list algorithm.HostPriorityList) []string { func EqualPriority(_ *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) { nodes, err := nodeLister.List() if err != nil { - glog.Errorf("failed to list nodes: %v", err) + glog.Errorf("Failed to list nodes: %v", err) return []algorithm.HostPriority{}, err }