From 79680b5d9bfb8aea37d959d62689c72f9dd7bae5 Mon Sep 17 00:00:00 2001 From: taesun_lee Date: Mon, 24 Feb 2020 12:57:53 +0900 Subject: [PATCH] Fix pkg/controller typos in some error messages, comments etc - applied review results by LuisSanchez - Co-Authored-By: Luis Sanchez genernal -> general iniital -> initial initalObjects -> initialObjects intentionaly -> intentionally inforer -> informer anotother -> another triger -> trigger mutli -> multi Verifyies -> Verifies valume -> volume unexpect -> unexpected unfulfiled -> unfulfilled implenets -> implements assignement -> assignment expectataions -> expectations nexpected -> unexpected boundSatsified -> boundSatisfied externel -> external calcuates -> calculates workes -> workers unitialized -> uninitialized afater -> after Espected -> Expected nodeMontiorGracePeriod -> NodeMonitorGracePeriod estimateGrracefulTermination -> estimateGracefulTermination secondrary -> secondary ShouldRunDaemonPodOnUnscheduableNode -> ShouldRunDaemonPodOnUnschedulableNode rrror -> error expectatitons -> expectations foud -> found epackage -> package succesfulJobs -> successfulJobs namesapce -> namespace ConfigMapResynce -> ConfigMapResync --- pkg/controller/bootstrap/bootstrapsigner.go | 2 +- .../rootcacertpublisher/publisher_test.go | 2 +- pkg/controller/cronjob/cronjob_controller.go | 6 +++--- pkg/controller/cronjob/cronjob_controller_test.go | 2 +- pkg/controller/daemon/config/v1alpha1/conversion.go | 2 +- pkg/controller/daemon/daemon_controller_test.go | 8 ++++---- pkg/controller/endpoint/endpoints_controller_test.go | 2 +- pkg/controller/job/job_controller_test.go | 2 +- .../deletion/namespaced_resources_deleter.go | 2 +- pkg/controller/nodeipam/ipam/range_allocator.go | 2 +- pkg/controller/nodelifecycle/config/types.go | 2 +- .../nodelifecycle/node_lifecycle_controller.go | 2 +- .../nodelifecycle/node_lifecycle_controller_test.go | 2 +- .../nodelifecycle/scheduler/timed_workers.go | 2 +- .../nodelifecycle/scheduler/timed_workers_test.go | 8 ++++---- .../podautoscaler/metrics/rest_metrics_client.go | 2 +- pkg/controller/podautoscaler/metrics/utilization.go | 2 +- pkg/controller/replicaset/replica_set_test.go | 4 ++-- pkg/controller/service/controller_test.go | 4 ++-- pkg/controller/statefulset/stateful_set_test.go | 4 ++-- .../attachdetach/attach_detach_controller_test.go | 2 +- .../attachdetach/cache/actual_state_of_world.go | 2 +- .../attachdetach/cache/actual_state_of_world_test.go | 6 +++--- .../attachdetach/reconciler/reconciler_test.go | 10 +++++----- .../volume/persistentvolume/framework_test.go | 4 ++-- .../pvprotection/pv_protection_controller_test.go | 4 ++-- .../volume/scheduling/scheduler_assume_cache.go | 2 +- .../volume/scheduling/scheduler_assume_cache_test.go | 4 ++-- .../volume/scheduling/scheduler_binder_test.go | 12 ++++++------ 29 files changed, 54 insertions(+), 54 deletions(-) diff --git a/pkg/controller/bootstrap/bootstrapsigner.go b/pkg/controller/bootstrap/bootstrapsigner.go index 59e9ae7ea9a..0a6093adc6c 100644 --- a/pkg/controller/bootstrap/bootstrapsigner.go +++ b/pkg/controller/bootstrap/bootstrapsigner.go @@ -53,7 +53,7 @@ type SignerOptions struct { // TokenSecretNamespace string is the namespace for token Secrets. TokenSecretNamespace string - // ConfigMapResynce is the time.Duration at which to fully re-list configmaps. + // ConfigMapResync is the time.Duration at which to fully re-list configmaps. // If zero, re-list will be delayed as long as possible ConfigMapResync time.Duration diff --git a/pkg/controller/certificates/rootcacertpublisher/publisher_test.go b/pkg/controller/certificates/rootcacertpublisher/publisher_test.go index ed8f16e3130..594a18d15dd 100644 --- a/pkg/controller/certificates/rootcacertpublisher/publisher_test.go +++ b/pkg/controller/certificates/rootcacertpublisher/publisher_test.go @@ -83,7 +83,7 @@ func TestConfigMapCreation(t *testing.T) { UpdatedConfigMap *v1.ConfigMap ExpectActions []action }{ - "create new namesapce": { + "create new namespace": { AddedNamespace: newNs, ExpectActions: []action{{verb: "create", name: RootCACertConfigMapName}}, }, diff --git a/pkg/controller/cronjob/cronjob_controller.go b/pkg/controller/cronjob/cronjob_controller.go index 58a90224dc5..59974333a11 100644 --- a/pkg/controller/cronjob/cronjob_controller.go +++ b/pkg/controller/cronjob/cronjob_controller.go @@ -158,12 +158,12 @@ func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobContr } failedJobs := []batchv1.Job{} - succesfulJobs := []batchv1.Job{} + successfulJobs := []batchv1.Job{} for _, job := range js { isFinished, finishedStatus := getFinishedStatus(&job) if isFinished && finishedStatus == batchv1.JobComplete { - succesfulJobs = append(succesfulJobs, job) + successfulJobs = append(successfulJobs, job) } else if isFinished && finishedStatus == batchv1.JobFailed { failedJobs = append(failedJobs, job) } @@ -171,7 +171,7 @@ func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobContr if sj.Spec.SuccessfulJobsHistoryLimit != nil { removeOldestJobs(sj, - succesfulJobs, + successfulJobs, jc, *sj.Spec.SuccessfulJobsHistoryLimit, recorder) diff --git a/pkg/controller/cronjob/cronjob_controller_test.go b/pkg/controller/cronjob/cronjob_controller_test.go index 5c0a0b008e8..45df7ef6c23 100644 --- a/pkg/controller/cronjob/cronjob_controller_test.go +++ b/pkg/controller/cronjob/cronjob_controller_test.go @@ -268,7 +268,7 @@ func TestSyncOne_RunOrNot(t *testing.T) { sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()} job, err = getJobFromTemplate(&sj, sj.Status.LastScheduleTime.Time) if err != nil { - t.Fatalf("%s: nexpected error creating a job from template: %v", name, err) + t.Fatalf("%s: unexpected error creating a job from template: %v", name, err) } job.UID = "1234" job.Namespace = "" diff --git a/pkg/controller/daemon/config/v1alpha1/conversion.go b/pkg/controller/daemon/config/v1alpha1/conversion.go index dcd9171c621..05f0087b99c 100644 --- a/pkg/controller/daemon/config/v1alpha1/conversion.go +++ b/pkg/controller/daemon/config/v1alpha1/conversion.go @@ -22,7 +22,7 @@ import ( daemonconfig "k8s.io/kubernetes/pkg/controller/daemon/config" ) -// Important! The public back-and-forth conversion functions for the types in this epackage +// Important! The public back-and-forth conversion functions for the types in this package // with DaemonSetControllerConfiguration types need to be manually exposed like this in order for // other packages that reference this package to be able to call these conversion functions // in an autogenerated manner. diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 95936f62e6f..8c98bb64466 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -537,7 +537,7 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { } if len(nodeMap) != 0 { - t.Fatalf("did not foud pods on nodes %+v", nodeMap) + t.Fatalf("did not find pods on nodes %+v", nodeMap) } } @@ -587,7 +587,7 @@ func TestDaemonSetPodCreateExpectationsError(t *testing.T) { } if !manager.expectations.SatisfiedExpectations(dsKey) { - t.Errorf("Unsatisfied pod creation expectatitons. Expected %d", creationExpectations) + t.Errorf("Unsatisfied pod creation expectations. Expected %d", creationExpectations) } } } @@ -1146,7 +1146,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { manager, podControl, _, err := newTestController(daemon) if err != nil { - t.Fatalf("rrror creating DaemonSetsController: %v", err) + t.Fatalf("error creating DaemonSetsController: %v", err) } addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) @@ -1786,7 +1786,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { shouldContinueRunning: true, }, { - predicateName: "ShouldRunDaemonPodOnUnscheduableNode", + predicateName: "ShouldRunDaemonPodOnUnschedulableNode", ds: &apps.DaemonSet{ Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, diff --git a/pkg/controller/endpoint/endpoints_controller_test.go b/pkg/controller/endpoint/endpoints_controller_test.go index 80270b1f604..2bb59562275 100644 --- a/pkg/controller/endpoint/endpoints_controller_test.go +++ b/pkg/controller/endpoint/endpoints_controller_test.go @@ -1307,7 +1307,7 @@ func TestPodChanged(t *testing.T) { /* dual stack tests */ // primary changes, because changing IPs is done by changing sandbox - // case 1: add new secondrary IP + // case 1: add new secondary IP newPod.Status.PodIP = "1.1.3.1" newPod.Status.PodIPs = []v1.PodIP{ { diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index a29ffabfd4b..313ba5f426e 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -1147,7 +1147,7 @@ func TestSyncJobExpectations(t *testing.T) { manager.expectations = FakeJobExpectations{ controller.NewControllerExpectations(), true, func() { - // If we check active pods before checking expectataions, the job + // If we check active pods before checking expectations, the job // will create a new replica because it doesn't see this pod, but // has fulfilled its expectations. podIndexer.Add(&pods[1]) diff --git a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go index 94cfc757f4e..e4222087aca 100644 --- a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go +++ b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go @@ -561,7 +561,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ns *v1.Namespace) (int64, return estimate, utilerrors.NewAggregate(errs) } -// estimateGrracefulTermination will estimate the graceful termination required for the specific entity in the namespace +// estimateGracefulTermination will estimate the graceful termination required for the specific entity in the namespace func (d *namespacedResourcesDeleter) estimateGracefulTermination(gvr schema.GroupVersionResource, ns string, namespaceDeletedAt metav1.Time) (int64, error) { groupResource := gvr.GroupResource() klog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource) diff --git a/pkg/controller/nodeipam/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go index 49568020183..ab626b504bf 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -273,7 +273,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { allocated.allocatedCIDRs[idx] = podCIDR } - //queue the assignement + //queue the assignment klog.V(4).Infof("Putting node %s with CIDR %v into the work queue", node.Name, allocated.allocatedCIDRs) r.nodeCIDRUpdateChannel <- allocated return nil diff --git a/pkg/controller/nodelifecycle/config/types.go b/pkg/controller/nodelifecycle/config/types.go index 80173e6f2da..b6c856f2332 100644 --- a/pkg/controller/nodelifecycle/config/types.go +++ b/pkg/controller/nodelifecycle/config/types.go @@ -32,7 +32,7 @@ type NodeLifecycleControllerConfiguration struct { // nodeStartupGracePeriod is the amount of time which we allow starting a node to // be unresponsive before marking it unhealthy. NodeStartupGracePeriod metav1.Duration - // nodeMontiorGracePeriod is the amount of time which we allow a running node to be + // NodeMonitorGracePeriod is the amount of time which we allow a running node to be // unresponsive before marking it unhealthy. Must be N times more than kubelet's // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet // to post node status. diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index 4f135963c74..058547a37bc 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -129,7 +129,7 @@ const ( retrySleepTime = 20 * time.Millisecond nodeNameKeyIndex = "spec.nodeName" // podUpdateWorkerSizes assumes that in most cases pod will be handled by monitorNodeHealth pass. - // Pod update workes will only handle lagging cache pods. 4 workes should be enough. + // Pod update workers will only handle lagging cache pods. 4 workers should be enough. podUpdateWorkerSize = 4 ) diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index 0bb57f34e24..e9b7162eab6 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -734,7 +734,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { return true, 0 }) } else { - t.Fatalf("Zone %v was unitialized!", zone) + t.Fatalf("Zone %v was uninitialized!", zone) } } diff --git a/pkg/controller/nodelifecycle/scheduler/timed_workers.go b/pkg/controller/nodelifecycle/scheduler/timed_workers.go index d995fb22a36..36bb985130e 100644 --- a/pkg/controller/nodelifecycle/scheduler/timed_workers.go +++ b/pkg/controller/nodelifecycle/scheduler/timed_workers.go @@ -137,7 +137,7 @@ func (q *TimedWorkerQueue) CancelWork(key string) bool { } // GetWorkerUnsafe returns a TimedWorker corresponding to the given key. -// Unsafe method - workers have attached goroutines which can fire afater this function is called. +// Unsafe method - workers have attached goroutines which can fire after this function is called. func (q *TimedWorkerQueue) GetWorkerUnsafe(key string) *TimedWorker { q.Lock() defer q.Unlock() diff --git a/pkg/controller/nodelifecycle/scheduler/timed_workers_test.go b/pkg/controller/nodelifecycle/scheduler/timed_workers_test.go index 9489fd18437..0de8a9be5e6 100644 --- a/pkg/controller/nodelifecycle/scheduler/timed_workers_test.go +++ b/pkg/controller/nodelifecycle/scheduler/timed_workers_test.go @@ -47,7 +47,7 @@ func TestExecute(t *testing.T) { wg.Wait() lastVal := atomic.LoadInt32(&testVal) if lastVal != 5 { - t.Errorf("Espected testVal = 5, got %v", lastVal) + t.Errorf("Expected testVal = 5, got %v", lastVal) } } @@ -75,7 +75,7 @@ func TestExecuteDelayed(t *testing.T) { wg.Wait() lastVal := atomic.LoadInt32(&testVal) if lastVal != 5 { - t.Errorf("Espected testVal = 5, got %v", lastVal) + t.Errorf("Expected testVal = 5, got %v", lastVal) } } @@ -105,7 +105,7 @@ func TestCancel(t *testing.T) { wg.Wait() lastVal := atomic.LoadInt32(&testVal) if lastVal != 3 { - t.Errorf("Espected testVal = 3, got %v", lastVal) + t.Errorf("Expected testVal = 3, got %v", lastVal) } } @@ -136,6 +136,6 @@ func TestCancelAndReadd(t *testing.T) { wg.Wait() lastVal := atomic.LoadInt32(&testVal) if lastVal != 4 { - t.Errorf("Espected testVal = 4, got %v", lastVal) + t.Errorf("Expected testVal = 4, got %v", lastVal) } } diff --git a/pkg/controller/podautoscaler/metrics/rest_metrics_client.go b/pkg/controller/podautoscaler/metrics/rest_metrics_client.go index 8d17ac3c989..65dbe7f6f2a 100644 --- a/pkg/controller/podautoscaler/metrics/rest_metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/rest_metrics_client.go @@ -162,7 +162,7 @@ func (c *customMetricsClient) GetObjectMetric(metricName string, namespace strin return metricValue.Value.MilliValue(), metricValue.Timestamp.Time, nil } -// externalMetricsClient implenets the external metrics related parts of MetricsClient, +// externalMetricsClient implements the external metrics related parts of MetricsClient, // using data from the external metrics API. type externalMetricsClient struct { client externalclient.ExternalMetricsClient diff --git a/pkg/controller/podautoscaler/metrics/utilization.go b/pkg/controller/podautoscaler/metrics/utilization.go index 8ee53a6c47a..26b9a142271 100644 --- a/pkg/controller/podautoscaler/metrics/utilization.go +++ b/pkg/controller/podautoscaler/metrics/utilization.go @@ -52,7 +52,7 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int } // GetMetricUtilizationRatio takes in a set of metrics and a target utilization value, -// and calcuates the ratio of desired to actual utilization +// and calculates the ratio of desired to actual utilization // (returning that and the actual utilization) func GetMetricUtilizationRatio(metrics PodMetricsInfo, targetUtilization int64) (utilizationRatio float64, currentUtilization int64) { metricsTotal := int64(0) diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index df24f4870ac..ceded6f7710 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -1196,7 +1196,7 @@ func TestExpectationsOnRecreate(t *testing.T) { t.Errorf("No expectations found for ReplicaSet %q", oldRSKey) } if rsExp.Fulfilled() { - t.Errorf("There should be unfulfiled expectation for creating new pods for ReplicaSet %q", oldRSKey) + t.Errorf("There should be unfulfilled expectations for creating new pods for ReplicaSet %q", oldRSKey) } if manager.queue.Len() != 0 { @@ -1275,7 +1275,7 @@ func TestExpectationsOnRecreate(t *testing.T) { t.Errorf("No expectations found for ReplicaSet %q", oldRSKey) } if rsExp.Fulfilled() { - t.Errorf("There should be unfulfiled expectation for creating new pods for ReplicaSet %q", oldRSKey) + t.Errorf("There should be unfulfilled expectations for creating new pods for ReplicaSet %q", oldRSKey) } err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0) diff --git a/pkg/controller/service/controller_test.go b/pkg/controller/service/controller_test.go index bc6222961ea..ab0da4aaa30 100644 --- a/pkg/controller/service/controller_test.go +++ b/pkg/controller/service/controller_test.go @@ -952,7 +952,7 @@ func TestNeedsUpdate(t *testing.T) { expectedNeedsUpdate: true, }, { - testName: "If externel ip counts are different", + testName: "If external ip counts are different", updateFn: func() { oldSvc = defaultExternalService() newSvc = defaultExternalService() @@ -962,7 +962,7 @@ func TestNeedsUpdate(t *testing.T) { expectedNeedsUpdate: true, }, { - testName: "If externel ips are different", + testName: "If external ips are different", updateFn: func() { oldSvc = defaultExternalService() newSvc = defaultExternalService() diff --git a/pkg/controller/statefulset/stateful_set_test.go b/pkg/controller/statefulset/stateful_set_test.go index f78340766c7..4848e6c7917 100644 --- a/pkg/controller/statefulset/stateful_set_test.go +++ b/pkg/controller/statefulset/stateful_set_test.go @@ -251,7 +251,7 @@ func TestStatefulSetControllerDeletionTimestampRace(t *testing.T) { // It should not adopt pods. for _, pod := range pods { if len(pod.OwnerReferences) > 0 { - t.Errorf("unexpect pod owner references: %v", pod.OwnerReferences) + t.Errorf("unexpected pod owner references: %v", pod.OwnerReferences) } } @@ -265,7 +265,7 @@ func TestStatefulSetControllerDeletionTimestampRace(t *testing.T) { } for _, revision := range revisions { if len(revision.OwnerReferences) > 0 { - t.Errorf("unexpect revision owner references: %v", revision.OwnerReferences) + t.Errorf("unexpected revision owner references: %v", revision.OwnerReferences) } } } diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go index d7c59fccf5f..60b965353e8 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go @@ -199,7 +199,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 t.Fatalf("Error waiting for the informer caches to sync") } - // Make sure the nodes and pods are in the inforer cache + // Make sure the nodes and pods are in the informer cache i = 0 nodeList, err := informerFactory.Core().V1().Nodes().Lister().List(labels.Everything()) for len(nodeList) < nodesNum { diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index b8f50157f78..714b2e67553 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -120,7 +120,7 @@ type ActualStateOfWorld interface { GetAttachedVolumesPerNode() map[types.NodeName][]operationexecutor.AttachedVolume // GetNodesForAttachedVolume returns the nodes on which the volume is attached. - // This function is used by reconciler for mutli-attach check. + // This function is used by reconciler for multi-attach check. GetNodesForAttachedVolume(volumeName v1.UniqueVolumeName) []types.NodeName // GetVolumesToReportAttached returns a map containing the set of nodes for diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go index 20882c61b60..4a1a3e0b283 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go @@ -990,7 +990,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou // Populates data struct with one volume/node entry. // Calls RemoveVolumeFromReportAsAttached -// Verifyies there is no valume as reported as attached +// Verifies there is no volume as reported as attached func Test_RemoveVolumeFromReportAsAttached(t *testing.T) { // Arrange volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) @@ -1023,7 +1023,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) { // Populates data struct with one volume/node entry. // Calls RemoveVolumeFromReportAsAttached // Calls AddVolumeToReportAsAttached to add volume back as attached -// Verifyies there is one volume as reported as attached +// Verifies there is one volume as reported as attached func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(t *testing.T) { // Arrange volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) @@ -1066,7 +1066,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive( // Calls RemoveVolumeFromReportAsAttached // Calls DeleteVolumeNode // Calls AddVolumeNode -// Verifyies there is no volume as reported as attached +// Verifies there is no volume as reported as attached func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) { // Arrange volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) diff --git a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go index bafd2dd6cee..90023eb0ad2 100644 --- a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go +++ b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go @@ -263,7 +263,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test nodeName) } - // Assert -- Timer will triger detach + // Assert -- Timer will trigger detach waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin) waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin) @@ -415,7 +415,7 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing. nodeName1) } - // Assert -- Timer will triger detach + // Assert -- Timer will trigger detach waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin) waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin) @@ -433,7 +433,7 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing. nodeName2) } - // Assert -- Timer will triger detach + // Assert -- Timer will trigger detach waitForNewDetacherCallCount(t, 2 /* expectedCallCount */, fakePlugin) verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin) waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin) @@ -692,7 +692,7 @@ func Test_ReportMultiAttachError(t *testing.T) { []string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by pod(s) pod2"}, }, { - "pods in anotother namespace use the volume", + "pods in another namespace use the volume", []nodeWithPods{ {"node1", []string{"ns1/pod1"}}, {"node2", []string{"ns2/pod2"}}, @@ -700,7 +700,7 @@ func Test_ReportMultiAttachError(t *testing.T) { []string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by 1 pod(s) in different namespaces"}, }, { - "pods both in the same and anotother namespace use the volume", + "pods both in the same and another namespace use the volume", []nodeWithPods{ {"node1", []string{"ns1/pod1"}}, {"node2", []string{"ns2/pod2"}}, diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index 90891841439..ce2f41fc87a 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -769,7 +769,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s if err != nil { if err == pvtesting.ErrVersionConflict { // Ignore version errors - klog.V(4).Infof("test intentionaly ignores version error.") + klog.V(4).Infof("test intentionally ignores version error.") } else { t.Errorf("Error calling syncClaim: %v", err) // Finish the loop on the first error @@ -786,7 +786,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s if err != nil { if err == pvtesting.ErrVersionConflict { // Ignore version errors - klog.V(4).Infof("test intentionaly ignores version error.") + klog.V(4).Infof("test intentionally ignores version error.") } else { t.Errorf("Error calling syncVolume: %v", err) // Finish the loop on the first error diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller_test.go b/pkg/controller/volume/pvprotection/pv_protection_controller_test.go index db4ac859377..408f69550ae 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller_test.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller_test.go @@ -107,7 +107,7 @@ func TestPVProtectionController(t *testing.T) { // Optional client reactors. reactors []reaction // PV event to simulate. This PV will be automatically added to - // initalObjects. + // initialObjects. updatedPV *v1.PersistentVolume // List of expected kubeclient actions that should happen during the // test. @@ -220,7 +220,7 @@ func TestPVProtectionController(t *testing.T) { case *v1.PersistentVolume: pvInformer.Informer().GetStore().Add(obj) default: - t.Fatalf("Unknown initalObject type: %+v", obj) + t.Fatalf("Unknown initialObject type: %+v", obj) } } diff --git a/pkg/controller/volume/scheduling/scheduler_assume_cache.go b/pkg/controller/volume/scheduling/scheduler_assume_cache.go index 832bee91bfb..b79e9cb11a2 100644 --- a/pkg/controller/volume/scheduling/scheduler_assume_cache.go +++ b/pkg/controller/volume/scheduling/scheduler_assume_cache.go @@ -127,7 +127,7 @@ func (c *assumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) { return c.indexFunc(objInfo.latestObj) } -// NewAssumeCache creates an assume cache for genernal objects. +// NewAssumeCache creates an assume cache for general objects. func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) AssumeCache { c := &assumeCache{ description: description, diff --git a/pkg/controller/volume/scheduling/scheduler_assume_cache_test.go b/pkg/controller/volume/scheduling/scheduler_assume_cache_test.go index 2fb8b0f8ca3..9d83ad9bd22 100644 --- a/pkg/controller/volume/scheduling/scheduler_assume_cache_test.go +++ b/pkg/controller/volume/scheduling/scheduler_assume_cache_test.go @@ -163,7 +163,7 @@ func TestRestorePV(t *testing.T) { // Restore PV cache.Restore(oldPV.Name) if err := verifyPV(cache, oldPV.Name, oldPV); err != nil { - t.Fatalf("Failed to GetPV() after iniital restore: %v", err) + t.Fatalf("Failed to GetPV() after initial restore: %v", err) } // Assume newPV @@ -420,7 +420,7 @@ func TestRestorePVC(t *testing.T) { // Restore PVC cache.Restore(getPVCName(oldPVC)) if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil { - t.Fatalf("Failed to GetPVC() after iniital restore: %v", err) + t.Fatalf("Failed to GetPVC() after initial restore: %v", err) } // Assume newPVC diff --git a/pkg/controller/volume/scheduling/scheduler_binder_test.go b/pkg/controller/volume/scheduling/scheduler_binder_test.go index 000d149bb12..777fae8e333 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder_test.go +++ b/pkg/controller/volume/scheduling/scheduler_binder_test.go @@ -912,10 +912,10 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { t.Error("returned success but expected error") } if boundSatisfied != scenario.expectedBound { - t.Errorf("expected boundSatsified %v, got %v", scenario.expectedBound, boundSatisfied) + t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied) } if unboundSatisfied != scenario.expectedUnbound { - t.Errorf("expected unboundSatsified %v, got %v", scenario.expectedUnbound, unboundSatisfied) + t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied) } testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, nil) } @@ -1037,10 +1037,10 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) { t.Error("returned success but expected error") } if boundSatisfied != scenario.expectedBound { - t.Errorf("expected boundSatsified %v, got %v", scenario.expectedBound, boundSatisfied) + t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied) } if unboundSatisfied != scenario.expectedUnbound { - t.Errorf("expected unboundSatsified %v, got %v", scenario.expectedUnbound, unboundSatisfied) + t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied) } testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions) } @@ -1150,10 +1150,10 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) { t.Error("returned success but expected error") } if boundSatisfied != scenario.expectedBound { - t.Errorf("expected boundSatsified %v, got %v", scenario.expectedBound, boundSatisfied) + t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied) } if unboundSatisfied != scenario.expectedUnbound { - t.Errorf("expected unboundSatsified %v, got %v", scenario.expectedUnbound, unboundSatisfied) + t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied) } }