Merge pull request #80318 from davidxia/fix-err-caps

cleanup: fix some log and error capitalizations
This commit is contained in:
Kubernetes Prow Robot 2019-07-25 10:41:28 -07:00 committed by GitHub
commit bf2dd03083
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 76 additions and 76 deletions

View File

@ -79,14 +79,14 @@ func mountInChroot(rootfsPath string, args []string) error {
// Mount failed because it is NFS V3 and we need to run rpcBind
output, err = exec.Command(chrootCmd, rootfsPath, rpcBindCmd, "-w").CombinedOutput()
if err != nil {
return fmt.Errorf("Mount issued for NFS V3 but unable to run rpcbind:\n Output: %s\n Error: %v", string(output), err)
return fmt.Errorf("mount issued for NFS V3 but unable to run rpcbind:\n Output: %s\n Error: %v", string(output), err)
}
// Rpcbind is running, try mounting again
output, err = exec.Command(chrootCmd, args...).CombinedOutput()
if err != nil {
return fmt.Errorf("Mount failed for NFS V3 even after running rpcBind %s, %v", string(output), err)
return fmt.Errorf("mount failed for NFS V3 even after running rpcBind %s, %v", string(output), err)
}
return nil

View File

@ -195,12 +195,12 @@ func (e *CombinedEtcdClient) AttachLease(leaseDuration time.Duration) error {
defer v3client.Close()
objectsResp, err := v3client.KV.Get(ctx, ttlKeysPrefix, clientv3.WithPrefix())
if err != nil {
return fmt.Errorf("Error while getting objects to attach to the lease")
return fmt.Errorf("error while getting objects to attach to the lease")
}
lease, err := v3client.Lease.Grant(ctx, int64(leaseDuration/time.Second))
if err != nil {
return fmt.Errorf("Error while creating lease: %v", err)
return fmt.Errorf("error while creating lease: %v", err)
}
klog.Infof("Lease with TTL: %v created", lease.TTL)

View File

@ -87,7 +87,7 @@ func (r *EtcdMigrateServer) Start(version *EtcdVersion) error {
if err != nil {
return fmt.Errorf("error killing etcd: %v", err)
}
return fmt.Errorf("Timed out waiting for etcd on port %d", r.cfg.port)
return fmt.Errorf("timed out waiting for etcd on port %d", r.cfg.port)
}
}
}

View File

@ -111,7 +111,7 @@ type EtcdVersionPair struct {
func ParseEtcdVersionPair(s string) (*EtcdVersionPair, error) {
parts := strings.Split(s, "/")
if len(parts) != 2 {
return nil, fmt.Errorf("Malformed version file, expected <major>.<minor>.<patch>/<storage> but got %s", s)
return nil, fmt.Errorf("malformed version file, expected <major>.<minor>.<patch>/<storage> but got %s", s)
}
version, err := ParseEtcdVersion(parts[0])
if err != nil {

View File

@ -393,7 +393,7 @@ func TestConfigChange(t *testing.T) {
setUp := func() (*os.File, string, error) {
tempDir, err := ioutil.TempDir("", "kubeproxy-config-change")
if err != nil {
return nil, "", fmt.Errorf("Unable to create temporary directory: %v", err)
return nil, "", fmt.Errorf("unable to create temporary directory: %v", err)
}
fullPath := filepath.Join(tempDir, "kube-proxy-config")
file, err := os.Create(fullPath)

View File

@ -138,10 +138,10 @@ func newCmdKubeletConfigEnableDynamic() *cobra.Command {
Example: kubeletConfigEnableDynamicExample,
Run: func(cmd *cobra.Command, args []string) {
if len(nodeName) == 0 {
kubeadmutil.CheckErr(errors.New("The --node-name argument is required"))
kubeadmutil.CheckErr(errors.New("the --node-name argument is required"))
}
if len(kubeletVersionStr) == 0 {
kubeadmutil.CheckErr(errors.New("The --kubelet-version argument is required"))
kubeadmutil.CheckErr(errors.New("the --kubelet-version argument is required"))
}
kubeletVersion, err := version.ParseSemantic(kubeletVersionStr)

View File

@ -244,7 +244,7 @@ func NewCmdConfigMigrate(out io.Writer) *cobra.Command {
`), kubeadmapiv1beta2.SchemeGroupVersion, kubeadmapiv1beta2.SchemeGroupVersion),
Run: func(cmd *cobra.Command, args []string) {
if len(oldCfgPath) == 0 {
kubeadmutil.CheckErr(errors.New("The --old-config flag is mandatory"))
kubeadmutil.CheckErr(errors.New("the --old-config flag is mandatory"))
}
oldCfgBytes, err := ioutil.ReadFile(oldCfgPath)
@ -321,7 +321,7 @@ func NewCmdConfigUploadFromFile(out io.Writer, kubeConfigFile *string) *cobra.Co
`), metav1.NamespaceSystem, constants.KubeadmConfigConfigMap),
Run: func(cmd *cobra.Command, args []string) {
if len(cfgPath) == 0 {
kubeadmutil.CheckErr(errors.New("The --config flag is mandatory"))
kubeadmutil.CheckErr(errors.New("the --config flag is mandatory"))
}
klog.V(1).Infoln("[config] retrieving ClientSet from file")

View File

@ -58,7 +58,7 @@ func runPreflight(c workflow.RunData) error {
return err
}
if strings.ToLower(s.Text()) != "y" {
return errors.New("Aborted reset operation")
return errors.New("aborted reset operation")
}
}

View File

@ -428,7 +428,7 @@ func EtcdSupportedVersion(versionString string) (*version.Version, error) {
}
return etcdVersion, nil
}
return nil, errors.Errorf("Unsupported or unknown Kubernetes version(%v)", kubernetesVersion)
return nil, errors.Errorf("unsupported or unknown Kubernetes version(%v)", kubernetesVersion)
}
// GetStaticPodDirectory returns the location on the disk where the Static Pod should be present

View File

@ -157,7 +157,7 @@ func TestEtcdSupportedVersion(t *testing.T) {
{
kubernetesVersion: "1.99.0",
expectedVersion: nil,
expectedError: errors.New("Unsupported or unknown Kubernetes version(1.99.0)"),
expectedError: errors.New("unsupported or unknown Kubernetes version(1.99.0)"),
},
{
kubernetesVersion: MinimumControlPlaneVersion.WithPatch(1).String(),

View File

@ -132,7 +132,7 @@ func waitForItemsFromChan(timeoutChan <-chan time.Time, stringChan chan string,
for {
select {
case <-timeoutChan:
return errors.New("The prepull operation timed out")
return errors.New("the prepull operation timed out")
case result := <-stringChan:
i++
// If the cleanup function errors; error here as well

View File

@ -158,7 +158,7 @@ func TestMutateConfigMapWithConflict(t *testing.T) {
update := action.(core.UpdateAction)
if conflict > 0 {
conflict--
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), configMapName, errors.New("Conflict"))
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), configMapName, errors.New("conflict"))
}
return false, update.GetObject(), nil
})

View File

@ -98,10 +98,10 @@ func BuildAuthz(client authorizationclient.SubjectAccessReviewInterface, authz k
return authorizerConfig.New()
case "":
return nil, fmt.Errorf("No authorization mode specified")
return nil, fmt.Errorf("no authorization mode specified")
default:
return nil, fmt.Errorf("Unknown authorization mode %s", authz.Mode)
return nil, fmt.Errorf("unknown authorization mode %s", authz.Mode)
}
}

View File

@ -418,7 +418,7 @@ func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
func checkPermissions() error {
if uid := os.Getuid(); uid != 0 {
return fmt.Errorf("Kubelet needs to run as uid `0`. It is being run as %d", uid)
return fmt.Errorf("kubelet needs to run as uid `0`. It is being run as %d", uid)
}
// TODO: Check if kubelet is running in the `initial` user namespace.
// http://man7.org/linux/man-pages/man7/user_namespaces.7.html

View File

@ -153,7 +153,7 @@ func (t tester) getKubetest(get bool, old time.Duration) (string, error) {
log.Printf("The kubetest binary is older than %s.", old)
}
if t.goPath == "" {
return "", fmt.Errorf("Cannot install kubetest until $GOPATH is set")
return "", fmt.Errorf("cannot install kubetest until $GOPATH is set")
}
log.Print("Updating kubetest binary...")
cmd := []string{"go", "get", "-u", "k8s.io/test-infra/kubetest"}

View File

@ -138,10 +138,10 @@ func TestParse(t *testing.T) {
}
func TestLook(t *testing.T) {
lpf := errors.New("LookPath failed")
sf := errors.New("Stat failed")
lpnc := errors.New("LookPath should not be called")
snc := errors.New("Stat should not be called")
lpf := errors.New("lookPath failed")
sf := errors.New("stat failed")
lpnc := errors.New("lookPath should not be called")
snc := errors.New("stat should not be called")
cases := []struct {
stat error
lookPath error
@ -324,7 +324,7 @@ func TestGetKubetest(t *testing.T) {
stat: func(p string) (os.FileInfo, error) {
// stat
if p != c.stat {
return nil, fmt.Errorf("Failed to find %s", p)
return nil, fmt.Errorf("failed to find %s", p)
}
return FileInfo{time.Now().Add(c.age * -1)}, nil
},
@ -332,7 +332,7 @@ func TestGetKubetest(t *testing.T) {
if c.path {
return filepath.Join(p, name), nil
}
return "", fmt.Errorf("Not on path: %s", name)
return "", fmt.Errorf("not on path: %s", name)
},
goPath: c.goPath,
wait: func(cmd string, args ...string) error {

View File

@ -114,17 +114,17 @@ func getJSONValue(data map[string]interface{}, keys ...string) (interface{}, boo
// Look up the value
value, ok := data[key]
if !ok {
return nil, false, fmt.Errorf("No key %s found", key)
return nil, false, fmt.Errorf("no key %s found", key)
}
// Get the indexed value if an index is specified
if index >= 0 {
valueSlice, ok := value.([]interface{})
if !ok {
return nil, false, fmt.Errorf("Key %s did not hold a slice", key)
return nil, false, fmt.Errorf("key %s did not hold a slice", key)
}
if index >= len(valueSlice) {
return nil, false, fmt.Errorf("Index %d out of bounds for slice at key: %v", index, key)
return nil, false, fmt.Errorf("index %d out of bounds for slice at key: %v", index, key)
}
value = valueSlice[index]
}
@ -135,7 +135,7 @@ func getJSONValue(data map[string]interface{}, keys ...string) (interface{}, boo
childData, ok := value.(map[string]interface{})
if !ok {
return nil, false, fmt.Errorf("Key %s did not hold a map", keys[0])
return nil, false, fmt.Errorf("key %s did not hold a map", keys[0])
}
return getJSONValue(childData, keys[1:]...)
}

View File

@ -148,7 +148,7 @@ func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.C
return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor)
}
return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource)
return "", fmt.Errorf("unsupported container resource : %v", fs.Resource)
}
// convertResourceCPUToString converts cpu value to the format of divisor and returns

View File

@ -1519,7 +1519,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName
if lbaas.opts.ManageSecurityGroups {
err := lbaas.EnsureSecurityGroupDeleted(clusterName, service)
if err != nil {
return fmt.Errorf("Failed to delete Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err)
return fmt.Errorf("failed to delete Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err)
}
}
@ -1536,7 +1536,7 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1
// It is OK when the security group has been deleted by others.
return nil
}
return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err)
return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err)
}
lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID)
@ -1567,7 +1567,7 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1
for _, rule := range secGroupRules {
res := rules.Delete(lbaas.network, rule.ID)
if res.Err != nil && !isNotFound(res.Err) {
return fmt.Errorf("Error occurred deleting security group rule: %s: %v", rule.ID, res.Err)
return fmt.Errorf("error occurred deleting security group rule: %s: %v", rule.ID, res.Err)
}
}
}

View File

@ -211,11 +211,11 @@ func convertToCM(obj interface{}) (*v1.ConfigMap, error) {
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
return nil, fmt.Errorf("Couldn't get object from tombstone %#v", obj)
return nil, fmt.Errorf("couldn't get object from tombstone %#v", obj)
}
cm, ok = tombstone.Obj.(*v1.ConfigMap)
if !ok {
return nil, fmt.Errorf("Tombstone contained object that is not a ConfigMap %#v", obj)
return nil, fmt.Errorf("tombstone contained object that is not a ConfigMap %#v", obj)
}
}
return cm, nil

View File

@ -87,7 +87,7 @@ func newCFSSLSigner(caFile, caKeyFile string, client clientset.Interface, certif
priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
if err != nil {
return nil, fmt.Errorf("Malformed private key %v", err)
return nil, fmt.Errorf("malformed private key %v", err)
}
return &cfsslSigner{
priv: priv,

View File

@ -94,7 +94,7 @@ func getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time
starts := []time.Time{}
sched, err := cron.ParseStandard(sj.Spec.Schedule)
if err != nil {
return starts, fmt.Errorf("Unparseable schedule: %s : %s", sj.Spec.Schedule, err)
return starts, fmt.Errorf("unparseable schedule: %s : %s", sj.Spec.Schedule, err)
}
var earliestTime time.Time

View File

@ -49,7 +49,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, nodeList []*v
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeList, nodeToDaemonPods)
if err != nil {
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
return fmt.Errorf("couldn't get unavailable numbers: %v", err)
}
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
@ -416,7 +416,7 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeL
}
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
if err != nil {
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
return -1, -1, fmt.Errorf("invalid value for MaxUnavailable: %v", err)
}
klog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
return maxUnavailable, numUnavailable, nil

View File

@ -857,7 +857,7 @@ func TestGarbageCollectorSync(t *testing.T) {
// Simulate the discovery client returning an error
fakeDiscoveryClient.setPreferredResources(nil)
fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()"))
fakeDiscoveryClient.setError(fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
// Wait until sync discovers the change
time.Sleep(1 * time.Second)

View File

@ -199,7 +199,7 @@ func TestControllerSyncJob(t *testing.T) {
},
"too few active pods, with controller error": {
2, 5, 6, false, 0,
fmt.Errorf("Fake error"), true, 0, 1, 1, 0,
fmt.Errorf("fake error"), true, 0, 1, 1, 0,
1, 0, 1, 1, 0, nil, "",
},
"too many active pods": {
@ -209,17 +209,17 @@ func TestControllerSyncJob(t *testing.T) {
},
"too many active pods, with controller error": {
2, 5, 6, false, 0,
fmt.Errorf("Fake error"), true, 0, 3, 0, 0,
fmt.Errorf("fake error"), true, 0, 3, 0, 0,
0, 1, 3, 0, 0, nil, "",
},
"failed + succeed pods: reset backoff delay": {
2, 5, 6, false, 0,
fmt.Errorf("Fake error"), true, 0, 1, 1, 1,
fmt.Errorf("fake error"), true, 0, 1, 1, 1,
1, 0, 1, 1, 1, nil, "",
},
"only new failed pod": {
2, 5, 6, false, 0,
fmt.Errorf("Fake error"), false, 0, 1, 0, 1,
fmt.Errorf("fake error"), false, 0, 1, 0, 1,
1, 0, 1, 0, 1, nil, "",
},
"job finish": {
@ -575,7 +575,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
updateError := fmt.Errorf("Update error")
updateError := fmt.Errorf("update error")
manager.updateHandler = func(job *batch.Job) error {
manager.queue.AddRateLimited(testutil.GetKey(job, t))
return updateError

View File

@ -101,7 +101,7 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInfo
case CloudAllocatorType:
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
default:
return nil, fmt.Errorf("Invalid CIDR allocator type: %v", allocatorType)
return nil, fmt.Errorf("invalid CIDR allocator type: %v", allocatorType)
}
}
@ -121,7 +121,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
}
return true, nil
}); pollErr != nil {
return nil, fmt.Errorf("Failed to list all nodes in %v, cannot proceed without updating CIDR map",
return nil, fmt.Errorf("failed to list all nodes in %v, cannot proceed without updating CIDR map",
apiserverStartupGracePeriod)
}
return nodeList, nil

View File

@ -162,7 +162,7 @@ func (s *CidrSet) getBeginingAndEndIndices(cidr *net.IPNet) (begin, end int, err
var ipSize int
if cidr == nil {
return -1, -1, fmt.Errorf("Error getting indices for cluster cidr %v, cidr is nil", s.clusterCIDR)
return -1, -1, fmt.Errorf("error getting indices for cluster cidr %v, cidr is nil", s.clusterCIDR)
}
if !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) {

View File

@ -276,12 +276,12 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
for idx, cidr := range node.Spec.PodCIDRs {
_, podCIDR, err := net.ParseCIDR(cidr)
if err != nil {
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err)
return fmt.Errorf("failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err)
}
klog.V(4).Infof("release CIDR %s for node:%v", cidr, node.Name)
if err = r.cidrSets[idx].Release(podCIDR); err != nil {
return fmt.Errorf("Error when releasing CIDR %v: %v", cidr, err)
return fmt.Errorf("error when releasing CIDR %v: %v", cidr, err)
}
}
return nil

View File

@ -278,7 +278,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
// If all metrics are invalid return error and set condition on hpa based on first invalid metric.
if invalidMetricsCount >= len(metricSpecs) {
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message)
return 0, "", statuses, time.Time{}, fmt.Errorf("Invalid metrics (%v invalid out of %v), first error is: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError)
return 0, "", statuses, time.Time{}, fmt.Errorf("invalid metrics (%v invalid out of %v), first error is: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError)
}
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
return replicas, metric, statuses, timestamp, nil

View File

@ -326,7 +326,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
rsSpec.Status.ReadyReplicas = 1
rsSpec.Status.AvailableReplicas = 1
fakePodControl.Clear()
fakePodControl.Err = fmt.Errorf("Fake Error")
fakePodControl.Err = fmt.Errorf("fake Error")
manager.syncReplicaSet(GetKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
@ -670,7 +670,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil })
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, &apps.ReplicaSet{}, fmt.Errorf("Fake error")
return true, &apps.ReplicaSet{}, fmt.Errorf("fake error")
})
fakeRSClient := fakeClient.AppsV1().ReplicaSets("default")
numReplicas := int32(10)
@ -1136,11 +1136,11 @@ func TestPatchPodFails(t *testing.T) {
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil, false))
// let both patches fail. The rs controller will assume it fails to take
// control of the pods and requeue to try again.
fakePodControl.Err = fmt.Errorf("Fake Error")
fakePodControl.Err = fmt.Errorf("fake Error")
rsKey := GetKey(rs, t)
err := processSync(manager, rsKey)
if err == nil || !strings.Contains(err.Error(), "Fake Error") {
t.Errorf("expected Fake Error, got %+v", err)
if err == nil || !strings.Contains(err.Error(), "fake Error") {
t.Errorf("expected fake Error, got %+v", err)
}
// 2 patches to take control of pod1 and pod2 (both fail).
validateSyncReplicaSet(t, fakePodControl, 0, 0, 2)

View File

@ -1086,7 +1086,7 @@ func TestDiscoverySync(t *testing.T) {
// Simulate the discovery client returning an error
fakeDiscoveryClient.setPreferredResources(nil)
fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()"))
fakeDiscoveryClient.setError(fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
// Wait until sync discovers the change
time.Sleep(1 * time.Second)

View File

@ -621,7 +621,7 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) {
// with various kubernetes errors.
func TestProcessServiceCreateOrUpdateK8sError(t *testing.T) {
svcName := "svc-k8s-err"
conflictErr := apierrors.NewConflict(schema.GroupResource{}, svcName, errors.New("Object conflict"))
conflictErr := apierrors.NewConflict(schema.GroupResource{}, svcName, errors.New("object conflict"))
notFoundErr := apierrors.NewNotFound(schema.GroupResource{}, svcName)
testCases := []struct {
@ -710,7 +710,7 @@ func TestSyncService(t *testing.T) {
srv := controller.cache.getOrCreate("external-balancer")
srv.state = defaultExternalService()
},
expectedErr: fmt.Errorf("Service somethingelse not in cache even though the watcher thought it was. Ignoring the deletion."),
expectedErr: fmt.Errorf("service somethingelse not in cache even though the watcher thought it was. Ignoring the deletion."),
},
*/
@ -780,12 +780,12 @@ func TestProcessServiceDeletion(t *testing.T) {
svc := controller.cache.getOrCreate(svcKey)
svc.state = defaultExternalService()
cloud.Err = fmt.Errorf("Error Deleting the Loadbalancer")
cloud.Err = fmt.Errorf("error Deleting the Loadbalancer")
},
expectedFn: func(svcErr error) error {
expectedError := "Error Deleting the Loadbalancer"
expectedError := "error Deleting the Loadbalancer"
if svcErr == nil || svcErr.Error() != expectedError {
return fmt.Errorf("Expected=%v Obtained=%v", expectedError, svcErr)
@ -1110,7 +1110,7 @@ func TestServiceCache(t *testing.T) {
return fmt.Errorf("is Available Expected=true Obtained=%v", bool)
}
if Cs == nil {
return fmt.Errorf("CachedService expected:non-nil Obtained=nil")
return fmt.Errorf("cachedService expected:non-nil Obtained=nil")
}
return nil
},
@ -1125,7 +1125,7 @@ func TestServiceCache(t *testing.T) {
//It should have two elements
keys := sc.ListKeys()
if len(keys) != 2 {
return fmt.Errorf("Elementes Expected=2 Obtained=%v", len(keys))
return fmt.Errorf("elements Expected=2 Obtained=%v", len(keys))
}
return nil
},

View File

@ -247,7 +247,7 @@ func TestTokenCreation(t *testing.T) {
return func(core.Action) (bool, runtime.Object, error) {
i++
if i < 3 {
return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("No can do"))
return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("no can do"))
}
return false, nil, nil
}
@ -278,7 +278,7 @@ func TestTokenCreation(t *testing.T) {
resource: "secrets",
reactor: func(t *testing.T) core.ReactionFunc {
return func(core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("No can do"))
return true, nil, apierrors.NewForbidden(api.Resource("secrets"), "foo", errors.New("no can do"))
}
},
}},

View File

@ -184,13 +184,13 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef
case apierrors.IsNotFound(err):
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim)
if err != nil {
errs = append(errs, fmt.Errorf("Failed to create PVC %s: %s", claim.Name, err))
errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err))
}
if err == nil || !apierrors.IsAlreadyExists(err) {
spc.recordClaimEvent("create", set, pod, &claim, err)
}
case err != nil:
errs = append(errs, fmt.Errorf("Failed to retrieve PVC %s: %s", claim.Name, err))
errs = append(errs, fmt.Errorf("failed to retrieve PVC %s: %s", claim.Name, err))
spc.recordClaimEvent("create", set, pod, &claim, err)
}
// TODO: Check resource requirements and accessmodes, update if necessary

View File

@ -205,7 +205,7 @@ func TestStatefulPodControlNoOpUpdate(t *testing.T) {
control := NewRealStatefulPodControl(fakeClient, nil, nil, nil, recorder)
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
t.Error("no-op update should not make any client invocation")
return true, nil, apierrors.NewInternalError(errors.New("If we are here we have a problem"))
return true, nil, apierrors.NewInternalError(errors.New("if we are here we have a problem"))
})
if err := control.UpdateStatefulPod(set, pod); err != nil {
t.Errorf("Error returned on no-op update error: %s", err)

View File

@ -1774,7 +1774,7 @@ func assertMonotonicInvariants(set *apps.StatefulSet, spc *fakeStatefulPodContro
sort.Sort(ascendingOrdinal(pods))
for ord := 0; ord < len(pods); ord++ {
if ord > 0 && isRunningAndReady(pods[ord]) && !isRunningAndReady(pods[ord-1]) {
return fmt.Errorf("Successor %s is Running and Ready while %s is not", pods[ord].Name, pods[ord-1].Name)
return fmt.Errorf("successor %s is Running and Ready while %s is not", pods[ord].Name, pods[ord-1].Name)
}
if getOrdinal(pods[ord]) != ord {

View File

@ -95,7 +95,7 @@ func TestStatefulSetStatusUpdaterUpdateReplicasConflict(t *testing.T) {
update := action.(core.UpdateAction)
if !conflict {
conflict = true
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("Object already exists"))
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("object already exists"))
}
return true, update.GetObject(), nil
@ -118,7 +118,7 @@ func TestStatefulSetStatusUpdaterUpdateReplicasConflictFailure(t *testing.T) {
updater := NewRealStatefulSetStatusUpdater(fakeClient, setLister)
fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("Object already exists"))
return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("object already exists"))
})
if err := updater.UpdateStatefulSetStatus(set, &status); err == nil {
t.Error("UpdateStatefulSetStatus failed to return an error on get failure")

View File

@ -247,7 +247,7 @@ func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
}
if !found {
return nil, fmt.Errorf("Not found node %v", node)
return nil, fmt.Errorf("not found node %v", node)
}
origNodeCopy.Status = node.Status

View File

@ -267,7 +267,7 @@ func needsCleanup(j *batch.Job) bool {
func getFinishAndExpireTime(j *batch.Job) (*time.Time, *time.Time, error) {
if !needsCleanup(j) {
return nil, nil, fmt.Errorf("Job %s/%s should not be cleaned up", j.Namespace, j.Name)
return nil, nil, fmt.Errorf("job %s/%s should not be cleaned up", j.Namespace, j.Name)
}
finishAt, err := jobFinishTime(j)
if err != nil {

View File

@ -357,7 +357,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil {
return fmt.Errorf("Failed to SetVolumeMountedByNode with error: %v", err)
return fmt.Errorf("failed to SetVolumeMountedByNode with error: %v", err)
}
nodeObj.mountedByNode = mounted
@ -390,7 +390,7 @@ func (asw *actualStateOfWorld) SetDetachRequestTime(
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil {
return 0, fmt.Errorf("Failed to set detach request time with error: %v", err)
return 0, fmt.Errorf("failed to set detach request time with error: %v", err)
}
// If there is no previous detach request, set it to the current time
if nodeObj.detachRequestedTime.IsZero() {