diff --git a/cluster/gce/gci/mounter/mounter.go b/cluster/gce/gci/mounter/mounter.go index 8f674ecdc8f..88dc459ea81 100644 --- a/cluster/gce/gci/mounter/mounter.go +++ b/cluster/gce/gci/mounter/mounter.go @@ -79,14 +79,14 @@ func mountInChroot(rootfsPath string, args []string) error { // Mount failed because it is NFS V3 and we need to run rpcBind output, err = exec.Command(chrootCmd, rootfsPath, rpcBindCmd, "-w").CombinedOutput() if err != nil { - return fmt.Errorf("Mount issued for NFS V3 but unable to run rpcbind:\n Output: %s\n Error: %v", string(output), err) + return fmt.Errorf("mount issued for NFS V3 but unable to run rpcbind:\n Output: %s\n Error: %v", string(output), err) } // Rpcbind is running, try mounting again output, err = exec.Command(chrootCmd, args...).CombinedOutput() if err != nil { - return fmt.Errorf("Mount failed for NFS V3 even after running rpcBind %s, %v", string(output), err) + return fmt.Errorf("mount failed for NFS V3 even after running rpcBind %s, %v", string(output), err) } return nil diff --git a/cluster/images/etcd/migrate/migrate_client.go b/cluster/images/etcd/migrate/migrate_client.go index b9c9cfb62bb..cfb019f7cde 100644 --- a/cluster/images/etcd/migrate/migrate_client.go +++ b/cluster/images/etcd/migrate/migrate_client.go @@ -195,12 +195,12 @@ func (e *CombinedEtcdClient) AttachLease(leaseDuration time.Duration) error { defer v3client.Close() objectsResp, err := v3client.KV.Get(ctx, ttlKeysPrefix, clientv3.WithPrefix()) if err != nil { - return fmt.Errorf("Error while getting objects to attach to the lease") + return fmt.Errorf("error while getting objects to attach to the lease") } lease, err := v3client.Lease.Grant(ctx, int64(leaseDuration/time.Second)) if err != nil { - return fmt.Errorf("Error while creating lease: %v", err) + return fmt.Errorf("error while creating lease: %v", err) } klog.Infof("Lease with TTL: %v created", lease.TTL) diff --git a/cluster/images/etcd/migrate/migrate_server.go b/cluster/images/etcd/migrate/migrate_server.go index ea630ff8b4a..54f375ceace 100644 --- a/cluster/images/etcd/migrate/migrate_server.go +++ b/cluster/images/etcd/migrate/migrate_server.go @@ -87,7 +87,7 @@ func (r *EtcdMigrateServer) Start(version *EtcdVersion) error { if err != nil { return fmt.Errorf("error killing etcd: %v", err) } - return fmt.Errorf("Timed out waiting for etcd on port %d", r.cfg.port) + return fmt.Errorf("timed out waiting for etcd on port %d", r.cfg.port) } } } diff --git a/cluster/images/etcd/migrate/versions.go b/cluster/images/etcd/migrate/versions.go index 5243330ae21..3b429c92d53 100644 --- a/cluster/images/etcd/migrate/versions.go +++ b/cluster/images/etcd/migrate/versions.go @@ -111,7 +111,7 @@ type EtcdVersionPair struct { func ParseEtcdVersionPair(s string) (*EtcdVersionPair, error) { parts := strings.Split(s, "/") if len(parts) != 2 { - return nil, fmt.Errorf("Malformed version file, expected ../ but got %s", s) + return nil, fmt.Errorf("malformed version file, expected ../ but got %s", s) } version, err := ParseEtcdVersion(parts[0]) if err != nil { diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index 78688db5d6d..fec5f90a2e9 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -393,7 +393,7 @@ func TestConfigChange(t *testing.T) { setUp := func() (*os.File, string, error) { tempDir, err := ioutil.TempDir("", "kubeproxy-config-change") if err != nil { - return nil, "", fmt.Errorf("Unable to create temporary directory: %v", err) + return nil, "", fmt.Errorf("unable to create temporary directory: %v", err) } fullPath := filepath.Join(tempDir, "kube-proxy-config") file, err := os.Create(fullPath) diff --git a/cmd/kubeadm/app/cmd/alpha/kubelet.go b/cmd/kubeadm/app/cmd/alpha/kubelet.go index 121adb95450..827c0c899ef 100644 --- a/cmd/kubeadm/app/cmd/alpha/kubelet.go +++ b/cmd/kubeadm/app/cmd/alpha/kubelet.go @@ -138,10 +138,10 @@ func newCmdKubeletConfigEnableDynamic() *cobra.Command { Example: kubeletConfigEnableDynamicExample, Run: func(cmd *cobra.Command, args []string) { if len(nodeName) == 0 { - kubeadmutil.CheckErr(errors.New("The --node-name argument is required")) + kubeadmutil.CheckErr(errors.New("the --node-name argument is required")) } if len(kubeletVersionStr) == 0 { - kubeadmutil.CheckErr(errors.New("The --kubelet-version argument is required")) + kubeadmutil.CheckErr(errors.New("the --kubelet-version argument is required")) } kubeletVersion, err := version.ParseSemantic(kubeletVersionStr) diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index e37a91dc815..8c09702b1fa 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -244,7 +244,7 @@ func NewCmdConfigMigrate(out io.Writer) *cobra.Command { `), kubeadmapiv1beta2.SchemeGroupVersion, kubeadmapiv1beta2.SchemeGroupVersion), Run: func(cmd *cobra.Command, args []string) { if len(oldCfgPath) == 0 { - kubeadmutil.CheckErr(errors.New("The --old-config flag is mandatory")) + kubeadmutil.CheckErr(errors.New("the --old-config flag is mandatory")) } oldCfgBytes, err := ioutil.ReadFile(oldCfgPath) @@ -321,7 +321,7 @@ func NewCmdConfigUploadFromFile(out io.Writer, kubeConfigFile *string) *cobra.Co `), metav1.NamespaceSystem, constants.KubeadmConfigConfigMap), Run: func(cmd *cobra.Command, args []string) { if len(cfgPath) == 0 { - kubeadmutil.CheckErr(errors.New("The --config flag is mandatory")) + kubeadmutil.CheckErr(errors.New("the --config flag is mandatory")) } klog.V(1).Infoln("[config] retrieving ClientSet from file") diff --git a/cmd/kubeadm/app/cmd/phases/reset/preflight.go b/cmd/kubeadm/app/cmd/phases/reset/preflight.go index 2473265afdc..e68985ea76b 100644 --- a/cmd/kubeadm/app/cmd/phases/reset/preflight.go +++ b/cmd/kubeadm/app/cmd/phases/reset/preflight.go @@ -58,7 +58,7 @@ func runPreflight(c workflow.RunData) error { return err } if strings.ToLower(s.Text()) != "y" { - return errors.New("Aborted reset operation") + return errors.New("aborted reset operation") } } diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index bfc02075f1a..86d195ad2de 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -428,7 +428,7 @@ func EtcdSupportedVersion(versionString string) (*version.Version, error) { } return etcdVersion, nil } - return nil, errors.Errorf("Unsupported or unknown Kubernetes version(%v)", kubernetesVersion) + return nil, errors.Errorf("unsupported or unknown Kubernetes version(%v)", kubernetesVersion) } // GetStaticPodDirectory returns the location on the disk where the Static Pod should be present diff --git a/cmd/kubeadm/app/constants/constants_test.go b/cmd/kubeadm/app/constants/constants_test.go index 95377623f4a..f8590962221 100644 --- a/cmd/kubeadm/app/constants/constants_test.go +++ b/cmd/kubeadm/app/constants/constants_test.go @@ -157,7 +157,7 @@ func TestEtcdSupportedVersion(t *testing.T) { { kubernetesVersion: "1.99.0", expectedVersion: nil, - expectedError: errors.New("Unsupported or unknown Kubernetes version(1.99.0)"), + expectedError: errors.New("unsupported or unknown Kubernetes version(1.99.0)"), }, { kubernetesVersion: MinimumControlPlaneVersion.WithPatch(1).String(), diff --git a/cmd/kubeadm/app/phases/upgrade/prepull.go b/cmd/kubeadm/app/phases/upgrade/prepull.go index 03c4d0f500e..ad17e8f6da2 100644 --- a/cmd/kubeadm/app/phases/upgrade/prepull.go +++ b/cmd/kubeadm/app/phases/upgrade/prepull.go @@ -132,7 +132,7 @@ func waitForItemsFromChan(timeoutChan <-chan time.Time, stringChan chan string, for { select { case <-timeoutChan: - return errors.New("The prepull operation timed out") + return errors.New("the prepull operation timed out") case result := <-stringChan: i++ // If the cleanup function errors; error here as well diff --git a/cmd/kubeadm/app/util/apiclient/idempotency_test.go b/cmd/kubeadm/app/util/apiclient/idempotency_test.go index 22db993b695..8b2d421a7b1 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency_test.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency_test.go @@ -158,7 +158,7 @@ func TestMutateConfigMapWithConflict(t *testing.T) { update := action.(core.UpdateAction) if conflict > 0 { conflict-- - return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), configMapName, errors.New("Conflict")) + return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), configMapName, errors.New("conflict")) } return false, update.GetObject(), nil }) diff --git a/cmd/kubelet/app/auth.go b/cmd/kubelet/app/auth.go index 14cb0da3e33..64109da3f97 100644 --- a/cmd/kubelet/app/auth.go +++ b/cmd/kubelet/app/auth.go @@ -98,10 +98,10 @@ func BuildAuthz(client authorizationclient.SubjectAccessReviewInterface, authz k return authorizerConfig.New() case "": - return nil, fmt.Errorf("No authorization mode specified") + return nil, fmt.Errorf("no authorization mode specified") default: - return nil, fmt.Errorf("Unknown authorization mode %s", authz.Mode) + return nil, fmt.Errorf("unknown authorization mode %s", authz.Mode) } } diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 1f357a4ad90..207893d8be8 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -418,7 +418,7 @@ func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan func checkPermissions() error { if uid := os.Getuid(); uid != 0 { - return fmt.Errorf("Kubelet needs to run as uid `0`. It is being run as %d", uid) + return fmt.Errorf("kubelet needs to run as uid `0`. It is being run as %d", uid) } // TODO: Check if kubelet is running in the `initial` user namespace. // http://man7.org/linux/man-pages/man7/user_namespaces.7.html diff --git a/hack/e2e.go b/hack/e2e.go index 6352f42f65e..6dc9b9e8fa4 100644 --- a/hack/e2e.go +++ b/hack/e2e.go @@ -153,7 +153,7 @@ func (t tester) getKubetest(get bool, old time.Duration) (string, error) { log.Printf("The kubetest binary is older than %s.", old) } if t.goPath == "" { - return "", fmt.Errorf("Cannot install kubetest until $GOPATH is set") + return "", fmt.Errorf("cannot install kubetest until $GOPATH is set") } log.Print("Updating kubetest binary...") cmd := []string{"go", "get", "-u", "k8s.io/test-infra/kubetest"} diff --git a/hack/e2e_test.go b/hack/e2e_test.go index cbede240968..ebd9bb88b81 100644 --- a/hack/e2e_test.go +++ b/hack/e2e_test.go @@ -138,10 +138,10 @@ func TestParse(t *testing.T) { } func TestLook(t *testing.T) { - lpf := errors.New("LookPath failed") - sf := errors.New("Stat failed") - lpnc := errors.New("LookPath should not be called") - snc := errors.New("Stat should not be called") + lpf := errors.New("lookPath failed") + sf := errors.New("stat failed") + lpnc := errors.New("lookPath should not be called") + snc := errors.New("stat should not be called") cases := []struct { stat error lookPath error @@ -324,7 +324,7 @@ func TestGetKubetest(t *testing.T) { stat: func(p string) (os.FileInfo, error) { // stat if p != c.stat { - return nil, fmt.Errorf("Failed to find %s", p) + return nil, fmt.Errorf("failed to find %s", p) } return FileInfo{time.Now().Add(c.age * -1)}, nil }, @@ -332,7 +332,7 @@ func TestGetKubetest(t *testing.T) { if c.path { return filepath.Join(p, name), nil } - return "", fmt.Errorf("Not on path: %s", name) + return "", fmt.Errorf("not on path: %s", name) }, goPath: c.goPath, wait: func(cmd string, args ...string) error { diff --git a/pkg/api/testing/compat/compatibility_tester.go b/pkg/api/testing/compat/compatibility_tester.go index 941b139c768..2dceeb3c1aa 100644 --- a/pkg/api/testing/compat/compatibility_tester.go +++ b/pkg/api/testing/compat/compatibility_tester.go @@ -114,17 +114,17 @@ func getJSONValue(data map[string]interface{}, keys ...string) (interface{}, boo // Look up the value value, ok := data[key] if !ok { - return nil, false, fmt.Errorf("No key %s found", key) + return nil, false, fmt.Errorf("no key %s found", key) } // Get the indexed value if an index is specified if index >= 0 { valueSlice, ok := value.([]interface{}) if !ok { - return nil, false, fmt.Errorf("Key %s did not hold a slice", key) + return nil, false, fmt.Errorf("key %s did not hold a slice", key) } if index >= len(valueSlice) { - return nil, false, fmt.Errorf("Index %d out of bounds for slice at key: %v", index, key) + return nil, false, fmt.Errorf("index %d out of bounds for slice at key: %v", index, key) } value = valueSlice[index] } @@ -135,7 +135,7 @@ func getJSONValue(data map[string]interface{}, keys ...string) (interface{}, boo childData, ok := value.(map[string]interface{}) if !ok { - return nil, false, fmt.Errorf("Key %s did not hold a map", keys[0]) + return nil, false, fmt.Errorf("key %s did not hold a map", keys[0]) } return getJSONValue(childData, keys[1:]...) } diff --git a/pkg/api/v1/resource/helpers.go b/pkg/api/v1/resource/helpers.go index a3cce12f8d4..b983d924843 100644 --- a/pkg/api/v1/resource/helpers.go +++ b/pkg/api/v1/resource/helpers.go @@ -148,7 +148,7 @@ func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.C return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) } - return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) + return "", fmt.Errorf("unsupported container resource : %v", fs.Resource) } // convertResourceCPUToString converts cpu value to the format of divisor and returns diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 228a26ef362..83b41b14db9 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -1519,7 +1519,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName if lbaas.opts.ManageSecurityGroups { err := lbaas.EnsureSecurityGroupDeleted(clusterName, service) if err != nil { - return fmt.Errorf("Failed to delete Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) + return fmt.Errorf("failed to delete Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) } } @@ -1536,7 +1536,7 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1 // It is OK when the security group has been deleted by others. return nil } - return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err) + return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err) } lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID) @@ -1567,7 +1567,7 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1 for _, rule := range secGroupRules { res := rules.Delete(lbaas.network, rule.ID) if res.Err != nil && !isNotFound(res.Err) { - return fmt.Errorf("Error occurred deleting security group rule: %s: %v", rule.ID, res.Err) + return fmt.Errorf("error occurred deleting security group rule: %s: %v", rule.ID, res.Err) } } } diff --git a/pkg/controller/certificates/rootcacertpublisher/publisher.go b/pkg/controller/certificates/rootcacertpublisher/publisher.go index a0cde41e7a4..1d4377781c8 100644 --- a/pkg/controller/certificates/rootcacertpublisher/publisher.go +++ b/pkg/controller/certificates/rootcacertpublisher/publisher.go @@ -211,11 +211,11 @@ func convertToCM(obj interface{}) (*v1.ConfigMap, error) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - return nil, fmt.Errorf("Couldn't get object from tombstone %#v", obj) + return nil, fmt.Errorf("couldn't get object from tombstone %#v", obj) } cm, ok = tombstone.Obj.(*v1.ConfigMap) if !ok { - return nil, fmt.Errorf("Tombstone contained object that is not a ConfigMap %#v", obj) + return nil, fmt.Errorf("tombstone contained object that is not a ConfigMap %#v", obj) } } return cm, nil diff --git a/pkg/controller/certificates/signer/cfssl_signer.go b/pkg/controller/certificates/signer/cfssl_signer.go index 3c704f86d92..3ea3ca22e51 100644 --- a/pkg/controller/certificates/signer/cfssl_signer.go +++ b/pkg/controller/certificates/signer/cfssl_signer.go @@ -87,7 +87,7 @@ func newCFSSLSigner(caFile, caKeyFile string, client clientset.Interface, certif priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password) if err != nil { - return nil, fmt.Errorf("Malformed private key %v", err) + return nil, fmt.Errorf("malformed private key %v", err) } return &cfsslSigner{ priv: priv, diff --git a/pkg/controller/cronjob/utils.go b/pkg/controller/cronjob/utils.go index 545fdd12b72..84876c6d334 100644 --- a/pkg/controller/cronjob/utils.go +++ b/pkg/controller/cronjob/utils.go @@ -94,7 +94,7 @@ func getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time starts := []time.Time{} sched, err := cron.ParseStandard(sj.Spec.Schedule) if err != nil { - return starts, fmt.Errorf("Unparseable schedule: %s : %s", sj.Spec.Schedule, err) + return starts, fmt.Errorf("unparseable schedule: %s : %s", sj.Spec.Schedule, err) } var earliestTime time.Time diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index f45472ee959..3172f23ff60 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -49,7 +49,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, nodeList []*v _, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash) maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeList, nodeToDaemonPods) if err != nil { - return fmt.Errorf("Couldn't get unavailable numbers: %v", err) + return fmt.Errorf("couldn't get unavailable numbers: %v", err) } oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods) @@ -416,7 +416,7 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeL } maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true) if err != nil { - return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err) + return -1, -1, fmt.Errorf("invalid value for MaxUnavailable: %v", err) } klog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable) return maxUnavailable, numUnavailable, nil diff --git a/pkg/controller/garbagecollector/garbagecollector_test.go b/pkg/controller/garbagecollector/garbagecollector_test.go index aa11e0b7495..766bd5423f6 100644 --- a/pkg/controller/garbagecollector/garbagecollector_test.go +++ b/pkg/controller/garbagecollector/garbagecollector_test.go @@ -857,7 +857,7 @@ func TestGarbageCollectorSync(t *testing.T) { // Simulate the discovery client returning an error fakeDiscoveryClient.setPreferredResources(nil) - fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()")) + fakeDiscoveryClient.setError(fmt.Errorf("error calling discoveryClient.ServerPreferredResources()")) // Wait until sync discovers the change time.Sleep(1 * time.Second) diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index bc5a493cefb..a29ffabfd4b 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -199,7 +199,7 @@ func TestControllerSyncJob(t *testing.T) { }, "too few active pods, with controller error": { 2, 5, 6, false, 0, - fmt.Errorf("Fake error"), true, 0, 1, 1, 0, + fmt.Errorf("fake error"), true, 0, 1, 1, 0, 1, 0, 1, 1, 0, nil, "", }, "too many active pods": { @@ -209,17 +209,17 @@ func TestControllerSyncJob(t *testing.T) { }, "too many active pods, with controller error": { 2, 5, 6, false, 0, - fmt.Errorf("Fake error"), true, 0, 3, 0, 0, + fmt.Errorf("fake error"), true, 0, 3, 0, 0, 0, 1, 3, 0, 0, nil, "", }, "failed + succeed pods: reset backoff delay": { 2, 5, 6, false, 0, - fmt.Errorf("Fake error"), true, 0, 1, 1, 1, + fmt.Errorf("fake error"), true, 0, 1, 1, 1, 1, 0, 1, 1, 1, nil, "", }, "only new failed pod": { 2, 5, 6, false, 0, - fmt.Errorf("Fake error"), false, 0, 1, 0, 1, + fmt.Errorf("fake error"), false, 0, 1, 0, 1, 1, 0, 1, 0, 1, nil, "", }, "job finish": { @@ -575,7 +575,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) { manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady - updateError := fmt.Errorf("Update error") + updateError := fmt.Errorf("update error") manager.updateHandler = func(job *batch.Job) error { manager.queue.AddRateLimited(testutil.GetKey(job, t)) return updateError diff --git a/pkg/controller/nodeipam/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go index 3e7b8214eaa..96079a1688f 100644 --- a/pkg/controller/nodeipam/ipam/cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cidr_allocator.go @@ -101,7 +101,7 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInfo case CloudAllocatorType: return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer) default: - return nil, fmt.Errorf("Invalid CIDR allocator type: %v", allocatorType) + return nil, fmt.Errorf("invalid CIDR allocator type: %v", allocatorType) } } @@ -121,7 +121,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) { } return true, nil }); pollErr != nil { - return nil, fmt.Errorf("Failed to list all nodes in %v, cannot proceed without updating CIDR map", + return nil, fmt.Errorf("failed to list all nodes in %v, cannot proceed without updating CIDR map", apiserverStartupGracePeriod) } return nodeList, nil diff --git a/pkg/controller/nodeipam/ipam/cidrset/cidr_set.go b/pkg/controller/nodeipam/ipam/cidrset/cidr_set.go index 38a0521d2dd..939e581f82d 100644 --- a/pkg/controller/nodeipam/ipam/cidrset/cidr_set.go +++ b/pkg/controller/nodeipam/ipam/cidrset/cidr_set.go @@ -162,7 +162,7 @@ func (s *CidrSet) getBeginingAndEndIndices(cidr *net.IPNet) (begin, end int, err var ipSize int if cidr == nil { - return -1, -1, fmt.Errorf("Error getting indices for cluster cidr %v, cidr is nil", s.clusterCIDR) + return -1, -1, fmt.Errorf("error getting indices for cluster cidr %v, cidr is nil", s.clusterCIDR) } if !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) { diff --git a/pkg/controller/nodeipam/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go index bfe80ca2440..09c33892f59 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -276,12 +276,12 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error { for idx, cidr := range node.Spec.PodCIDRs { _, podCIDR, err := net.ParseCIDR(cidr) if err != nil { - return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err) + return fmt.Errorf("failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err) } klog.V(4).Infof("release CIDR %s for node:%v", cidr, node.Name) if err = r.cidrSets[idx].Release(podCIDR); err != nil { - return fmt.Errorf("Error when releasing CIDR %v: %v", cidr, err) + return fmt.Errorf("error when releasing CIDR %v: %v", cidr, err) } } return nil diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 1f40d3f5c2d..b3426b6ae71 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -278,7 +278,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori // If all metrics are invalid return error and set condition on hpa based on first invalid metric. if invalidMetricsCount >= len(metricSpecs) { setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message) - return 0, "", statuses, time.Time{}, fmt.Errorf("Invalid metrics (%v invalid out of %v), first error is: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError) + return 0, "", statuses, time.Time{}, fmt.Errorf("invalid metrics (%v invalid out of %v), first error is: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError) } setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric) return replicas, metric, statuses, timestamp, nil diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index 4c98ef4d88b..99847bc7a92 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -326,7 +326,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) { rsSpec.Status.ReadyReplicas = 1 rsSpec.Status.AvailableReplicas = 1 fakePodControl.Clear() - fakePodControl.Err = fmt.Errorf("Fake Error") + fakePodControl.Err = fmt.Errorf("fake Error") manager.syncReplicaSet(GetKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) @@ -670,7 +670,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { fakeClient := &fake.Clientset{} fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil }) fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { - return true, &apps.ReplicaSet{}, fmt.Errorf("Fake error") + return true, &apps.ReplicaSet{}, fmt.Errorf("fake error") }) fakeRSClient := fakeClient.AppsV1().ReplicaSets("default") numReplicas := int32(10) @@ -1136,11 +1136,11 @@ func TestPatchPodFails(t *testing.T) { informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil, false)) // let both patches fail. The rs controller will assume it fails to take // control of the pods and requeue to try again. - fakePodControl.Err = fmt.Errorf("Fake Error") + fakePodControl.Err = fmt.Errorf("fake Error") rsKey := GetKey(rs, t) err := processSync(manager, rsKey) - if err == nil || !strings.Contains(err.Error(), "Fake Error") { - t.Errorf("expected Fake Error, got %+v", err) + if err == nil || !strings.Contains(err.Error(), "fake Error") { + t.Errorf("expected fake Error, got %+v", err) } // 2 patches to take control of pod1 and pod2 (both fail). validateSyncReplicaSet(t, fakePodControl, 0, 0, 2) diff --git a/pkg/controller/resourcequota/resource_quota_controller_test.go b/pkg/controller/resourcequota/resource_quota_controller_test.go index 031fe47ebc0..be09292777f 100644 --- a/pkg/controller/resourcequota/resource_quota_controller_test.go +++ b/pkg/controller/resourcequota/resource_quota_controller_test.go @@ -1086,7 +1086,7 @@ func TestDiscoverySync(t *testing.T) { // Simulate the discovery client returning an error fakeDiscoveryClient.setPreferredResources(nil) - fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()")) + fakeDiscoveryClient.setError(fmt.Errorf("error calling discoveryClient.ServerPreferredResources()")) // Wait until sync discovers the change time.Sleep(1 * time.Second) diff --git a/pkg/controller/service/service_controller_test.go b/pkg/controller/service/service_controller_test.go index b2e988825aa..e7515954ef1 100644 --- a/pkg/controller/service/service_controller_test.go +++ b/pkg/controller/service/service_controller_test.go @@ -621,7 +621,7 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) { // with various kubernetes errors. func TestProcessServiceCreateOrUpdateK8sError(t *testing.T) { svcName := "svc-k8s-err" - conflictErr := apierrors.NewConflict(schema.GroupResource{}, svcName, errors.New("Object conflict")) + conflictErr := apierrors.NewConflict(schema.GroupResource{}, svcName, errors.New("object conflict")) notFoundErr := apierrors.NewNotFound(schema.GroupResource{}, svcName) testCases := []struct { @@ -710,7 +710,7 @@ func TestSyncService(t *testing.T) { srv := controller.cache.getOrCreate("external-balancer") srv.state = defaultExternalService() }, - expectedErr: fmt.Errorf("Service somethingelse not in cache even though the watcher thought it was. Ignoring the deletion."), + expectedErr: fmt.Errorf("service somethingelse not in cache even though the watcher thought it was. Ignoring the deletion."), }, */ @@ -780,12 +780,12 @@ func TestProcessServiceDeletion(t *testing.T) { svc := controller.cache.getOrCreate(svcKey) svc.state = defaultExternalService() - cloud.Err = fmt.Errorf("Error Deleting the Loadbalancer") + cloud.Err = fmt.Errorf("error Deleting the Loadbalancer") }, expectedFn: func(svcErr error) error { - expectedError := "Error Deleting the Loadbalancer" + expectedError := "error Deleting the Loadbalancer" if svcErr == nil || svcErr.Error() != expectedError { return fmt.Errorf("Expected=%v Obtained=%v", expectedError, svcErr) @@ -1110,7 +1110,7 @@ func TestServiceCache(t *testing.T) { return fmt.Errorf("is Available Expected=true Obtained=%v", bool) } if Cs == nil { - return fmt.Errorf("CachedService expected:non-nil Obtained=nil") + return fmt.Errorf("cachedService expected:non-nil Obtained=nil") } return nil }, @@ -1125,7 +1125,7 @@ func TestServiceCache(t *testing.T) { //It should have two elements keys := sc.ListKeys() if len(keys) != 2 { - return fmt.Errorf("Elementes Expected=2 Obtained=%v", len(keys)) + return fmt.Errorf("elements Expected=2 Obtained=%v", len(keys)) } return nil }, diff --git a/pkg/controller/serviceaccount/tokens_controller_test.go b/pkg/controller/serviceaccount/tokens_controller_test.go index e7b54cc0099..3a7a6aab4bf 100644 --- a/pkg/controller/serviceaccount/tokens_controller_test.go +++ b/pkg/controller/serviceaccount/tokens_controller_test.go @@ -247,7 +247,7 @@ func TestTokenCreation(t *testing.T) { return func(core.Action) (bool, runtime.Object, error) { i++ if i < 3 { - return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("No can do")) + return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("no can do")) } return false, nil, nil } @@ -278,7 +278,7 @@ func TestTokenCreation(t *testing.T) { resource: "secrets", reactor: func(t *testing.T) core.ReactionFunc { return func(core.Action) (bool, runtime.Object, error) { - return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("No can do")) + return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("no can do")) } }, }}, diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index 50433dbdbe4..403007ed36f 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -184,13 +184,13 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef case apierrors.IsNotFound(err): _, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim) if err != nil { - errs = append(errs, fmt.Errorf("Failed to create PVC %s: %s", claim.Name, err)) + errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err)) } if err == nil || !apierrors.IsAlreadyExists(err) { spc.recordClaimEvent("create", set, pod, &claim, err) } case err != nil: - errs = append(errs, fmt.Errorf("Failed to retrieve PVC %s: %s", claim.Name, err)) + errs = append(errs, fmt.Errorf("failed to retrieve PVC %s: %s", claim.Name, err)) spc.recordClaimEvent("create", set, pod, &claim, err) } // TODO: Check resource requirements and accessmodes, update if necessary diff --git a/pkg/controller/statefulset/stateful_pod_control_test.go b/pkg/controller/statefulset/stateful_pod_control_test.go index e2428d64852..8c924e8c0a1 100644 --- a/pkg/controller/statefulset/stateful_pod_control_test.go +++ b/pkg/controller/statefulset/stateful_pod_control_test.go @@ -205,7 +205,7 @@ func TestStatefulPodControlNoOpUpdate(t *testing.T) { control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder) fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { t.Error("no-op update should not make any client invocation") - return true, nil, apierrors.NewInternalError(errors.New("If we are here we have a problem")) + return true, nil, apierrors.NewInternalError(errors.New("if we are here we have a problem")) }) if err := control.UpdateStatefulPod(set, pod); err != nil { t.Errorf("Error returned on no-op update error: %s", err) diff --git a/pkg/controller/statefulset/stateful_set_control_test.go b/pkg/controller/statefulset/stateful_set_control_test.go index 6af6630c378..7e5cfecdf36 100644 --- a/pkg/controller/statefulset/stateful_set_control_test.go +++ b/pkg/controller/statefulset/stateful_set_control_test.go @@ -1774,7 +1774,7 @@ func assertMonotonicInvariants(set *apps.StatefulSet, spc *fakeStatefulPodContro sort.Sort(ascendingOrdinal(pods)) for ord := 0; ord < len(pods); ord++ { if ord > 0 && isRunningAndReady(pods[ord]) && !isRunningAndReady(pods[ord-1]) { - return fmt.Errorf("Successor %s is Running and Ready while %s is not", pods[ord].Name, pods[ord-1].Name) + return fmt.Errorf("successor %s is Running and Ready while %s is not", pods[ord].Name, pods[ord-1].Name) } if getOrdinal(pods[ord]) != ord { diff --git a/pkg/controller/statefulset/stateful_set_status_updater_test.go b/pkg/controller/statefulset/stateful_set_status_updater_test.go index 8cbb25da6f5..74d67812da2 100644 --- a/pkg/controller/statefulset/stateful_set_status_updater_test.go +++ b/pkg/controller/statefulset/stateful_set_status_updater_test.go @@ -95,7 +95,7 @@ func TestStatefulSetStatusUpdaterUpdateReplicasConflict(t *testing.T) { update := action.(core.UpdateAction) if !conflict { conflict = true - return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("Object already exists")) + return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("object already exists")) } return true, update.GetObject(), nil @@ -118,7 +118,7 @@ func TestStatefulSetStatusUpdaterUpdateReplicasConflictFailure(t *testing.T) { updater := NewRealStatefulSetStatusUpdater(fakeClient, setLister) fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) - return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("Object already exists")) + return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("object already exists")) }) if err := updater.UpdateStatefulSetStatus(set, &status); err == nil { t.Error("UpdateStatefulSetStatus failed to return an error on get failure") diff --git a/pkg/controller/testutil/test_utils.go b/pkg/controller/testutil/test_utils.go index dd8f1182890..d94576ed495 100644 --- a/pkg/controller/testutil/test_utils.go +++ b/pkg/controller/testutil/test_utils.go @@ -247,7 +247,7 @@ func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) { } if !found { - return nil, fmt.Errorf("Not found node %v", node) + return nil, fmt.Errorf("not found node %v", node) } origNodeCopy.Status = node.Status diff --git a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go index eb5ea26fe8f..df8bc016144 100644 --- a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go +++ b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go @@ -267,7 +267,7 @@ func needsCleanup(j *batch.Job) bool { func getFinishAndExpireTime(j *batch.Job) (*time.Time, *time.Time, error) { if !needsCleanup(j) { - return nil, nil, fmt.Errorf("Job %s/%s should not be cleaned up", j.Namespace, j.Name) + return nil, nil, fmt.Errorf("job %s/%s should not be cleaned up", j.Namespace, j.Name) } finishAt, err := jobFinishTime(j) if err != nil { diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index 4dc7fd3f402..039fab7e6b8 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -357,7 +357,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode( volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName) if err != nil { - return fmt.Errorf("Failed to SetVolumeMountedByNode with error: %v", err) + return fmt.Errorf("failed to SetVolumeMountedByNode with error: %v", err) } nodeObj.mountedByNode = mounted @@ -390,7 +390,7 @@ func (asw *actualStateOfWorld) SetDetachRequestTime( volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName) if err != nil { - return 0, fmt.Errorf("Failed to set detach request time with error: %v", err) + return 0, fmt.Errorf("failed to set detach request time with error: %v", err) } // If there is no previous detach request, set it to the current time if nodeObj.detachRequestedTime.IsZero() {