Fix pkg/controller typos in some error messages, comments etc

- applied review results by LuisSanchez
- Co-Authored-By: Luis Sanchez <sanchezl@redhat.com>

genernal -> general
iniital -> initial
initalObjects -> initialObjects
intentionaly -> intentionally
inforer -> informer
anotother -> another
triger -> trigger
mutli -> multi
Verifyies -> Verifies
valume -> volume
unexpect -> unexpected
unfulfiled -> unfulfilled
implenets -> implements
assignement -> assignment
expectataions -> expectations
nexpected -> unexpected
boundSatsified -> boundSatisfied
externel -> external
calcuates -> calculates
workes -> workers
unitialized -> uninitialized
afater -> after
Espected -> Expected
nodeMontiorGracePeriod -> NodeMonitorGracePeriod
estimateGrracefulTermination -> estimateGracefulTermination
secondrary -> secondary
ShouldRunDaemonPodOnUnscheduableNode -> ShouldRunDaemonPodOnUnschedulableNode
rrror -> error
expectatitons -> expectations
foud -> found
epackage -> package
succesfulJobs -> successfulJobs
namesapce -> namespace
ConfigMapResynce -> ConfigMapResync
This commit is contained in:
taesun_lee 2020-02-24 12:57:53 +09:00 committed by Taesun Lee
parent ac25069a05
commit 79680b5d9b
29 changed files with 54 additions and 54 deletions

View File

@ -53,7 +53,7 @@ type SignerOptions struct {
// TokenSecretNamespace string is the namespace for token Secrets.
TokenSecretNamespace string
// ConfigMapResynce is the time.Duration at which to fully re-list configmaps.
// ConfigMapResync is the time.Duration at which to fully re-list configmaps.
// If zero, re-list will be delayed as long as possible
ConfigMapResync time.Duration

View File

@ -83,7 +83,7 @@ func TestConfigMapCreation(t *testing.T) {
UpdatedConfigMap *v1.ConfigMap
ExpectActions []action
}{
"create new namesapce": {
"create new namespace": {
AddedNamespace: newNs,
ExpectActions: []action{{verb: "create", name: RootCACertConfigMapName}},
},

View File

@ -158,12 +158,12 @@ func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobContr
}
failedJobs := []batchv1.Job{}
succesfulJobs := []batchv1.Job{}
successfulJobs := []batchv1.Job{}
for _, job := range js {
isFinished, finishedStatus := getFinishedStatus(&job)
if isFinished && finishedStatus == batchv1.JobComplete {
succesfulJobs = append(succesfulJobs, job)
successfulJobs = append(successfulJobs, job)
} else if isFinished && finishedStatus == batchv1.JobFailed {
failedJobs = append(failedJobs, job)
}
@ -171,7 +171,7 @@ func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobContr
if sj.Spec.SuccessfulJobsHistoryLimit != nil {
removeOldestJobs(sj,
succesfulJobs,
successfulJobs,
jc,
*sj.Spec.SuccessfulJobsHistoryLimit,
recorder)

View File

@ -268,7 +268,7 @@ func TestSyncOne_RunOrNot(t *testing.T) {
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
job, err = getJobFromTemplate(&sj, sj.Status.LastScheduleTime.Time)
if err != nil {
t.Fatalf("%s: nexpected error creating a job from template: %v", name, err)
t.Fatalf("%s: unexpected error creating a job from template: %v", name, err)
}
job.UID = "1234"
job.Namespace = ""

View File

@ -22,7 +22,7 @@ import (
daemonconfig "k8s.io/kubernetes/pkg/controller/daemon/config"
)
// Important! The public back-and-forth conversion functions for the types in this epackage
// Important! The public back-and-forth conversion functions for the types in this package
// with DaemonSetControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.

View File

@ -537,7 +537,7 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
}
if len(nodeMap) != 0 {
t.Fatalf("did not foud pods on nodes %+v", nodeMap)
t.Fatalf("did not find pods on nodes %+v", nodeMap)
}
}
@ -587,7 +587,7 @@ func TestDaemonSetPodCreateExpectationsError(t *testing.T) {
}
if !manager.expectations.SatisfiedExpectations(dsKey) {
t.Errorf("Unsatisfied pod creation expectatitons. Expected %d", creationExpectations)
t.Errorf("Unsatisfied pod creation expectations. Expected %d", creationExpectations)
}
}
}
@ -1146,7 +1146,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
manager, podControl, _, err := newTestController(daemon)
if err != nil {
t.Fatalf("rrror creating DaemonSetsController: %v", err)
t.Fatalf("error creating DaemonSetsController: %v", err)
}
addNodes(manager.nodeStore, 0, 4, nil)
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
@ -1786,7 +1786,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
shouldContinueRunning: true,
},
{
predicateName: "ShouldRunDaemonPodOnUnscheduableNode",
predicateName: "ShouldRunDaemonPodOnUnschedulableNode",
ds: &apps.DaemonSet{
Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},

View File

@ -1307,7 +1307,7 @@ func TestPodChanged(t *testing.T) {
/* dual stack tests */
// primary changes, because changing IPs is done by changing sandbox
// case 1: add new secondrary IP
// case 1: add new secondary IP
newPod.Status.PodIP = "1.1.3.1"
newPod.Status.PodIPs = []v1.PodIP{
{

View File

@ -1147,7 +1147,7 @@ func TestSyncJobExpectations(t *testing.T) {
manager.expectations = FakeJobExpectations{
controller.NewControllerExpectations(), true, func() {
// If we check active pods before checking expectataions, the job
// If we check active pods before checking expectations, the job
// will create a new replica because it doesn't see this pod, but
// has fulfilled its expectations.
podIndexer.Add(&pods[1])

View File

@ -561,7 +561,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ns *v1.Namespace) (int64,
return estimate, utilerrors.NewAggregate(errs)
}
// estimateGrracefulTermination will estimate the graceful termination required for the specific entity in the namespace
// estimateGracefulTermination will estimate the graceful termination required for the specific entity in the namespace
func (d *namespacedResourcesDeleter) estimateGracefulTermination(gvr schema.GroupVersionResource, ns string, namespaceDeletedAt metav1.Time) (int64, error) {
groupResource := gvr.GroupResource()
klog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource)

View File

@ -273,7 +273,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
allocated.allocatedCIDRs[idx] = podCIDR
}
//queue the assignement
//queue the assignment
klog.V(4).Infof("Putting node %s with CIDR %v into the work queue", node.Name, allocated.allocatedCIDRs)
r.nodeCIDRUpdateChannel <- allocated
return nil

View File

@ -32,7 +32,7 @@ type NodeLifecycleControllerConfiguration struct {
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
// be unresponsive before marking it unhealthy.
NodeStartupGracePeriod metav1.Duration
// nodeMontiorGracePeriod is the amount of time which we allow a running node to be
// NodeMonitorGracePeriod is the amount of time which we allow a running node to be
// unresponsive before marking it unhealthy. Must be N times more than kubelet's
// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
// to post node status.

View File

@ -129,7 +129,7 @@ const (
retrySleepTime = 20 * time.Millisecond
nodeNameKeyIndex = "spec.nodeName"
// podUpdateWorkerSizes assumes that in most cases pod will be handled by monitorNodeHealth pass.
// Pod update workes will only handle lagging cache pods. 4 workes should be enough.
// Pod update workers will only handle lagging cache pods. 4 workers should be enough.
podUpdateWorkerSize = 4
)

View File

@ -734,7 +734,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
return true, 0
})
} else {
t.Fatalf("Zone %v was unitialized!", zone)
t.Fatalf("Zone %v was uninitialized!", zone)
}
}

View File

@ -137,7 +137,7 @@ func (q *TimedWorkerQueue) CancelWork(key string) bool {
}
// GetWorkerUnsafe returns a TimedWorker corresponding to the given key.
// Unsafe method - workers have attached goroutines which can fire afater this function is called.
// Unsafe method - workers have attached goroutines which can fire after this function is called.
func (q *TimedWorkerQueue) GetWorkerUnsafe(key string) *TimedWorker {
q.Lock()
defer q.Unlock()

View File

@ -47,7 +47,7 @@ func TestExecute(t *testing.T) {
wg.Wait()
lastVal := atomic.LoadInt32(&testVal)
if lastVal != 5 {
t.Errorf("Espected testVal = 5, got %v", lastVal)
t.Errorf("Expected testVal = 5, got %v", lastVal)
}
}
@ -75,7 +75,7 @@ func TestExecuteDelayed(t *testing.T) {
wg.Wait()
lastVal := atomic.LoadInt32(&testVal)
if lastVal != 5 {
t.Errorf("Espected testVal = 5, got %v", lastVal)
t.Errorf("Expected testVal = 5, got %v", lastVal)
}
}
@ -105,7 +105,7 @@ func TestCancel(t *testing.T) {
wg.Wait()
lastVal := atomic.LoadInt32(&testVal)
if lastVal != 3 {
t.Errorf("Espected testVal = 3, got %v", lastVal)
t.Errorf("Expected testVal = 3, got %v", lastVal)
}
}
@ -136,6 +136,6 @@ func TestCancelAndReadd(t *testing.T) {
wg.Wait()
lastVal := atomic.LoadInt32(&testVal)
if lastVal != 4 {
t.Errorf("Espected testVal = 4, got %v", lastVal)
t.Errorf("Expected testVal = 4, got %v", lastVal)
}
}

View File

@ -162,7 +162,7 @@ func (c *customMetricsClient) GetObjectMetric(metricName string, namespace strin
return metricValue.Value.MilliValue(), metricValue.Timestamp.Time, nil
}
// externalMetricsClient implenets the external metrics related parts of MetricsClient,
// externalMetricsClient implements the external metrics related parts of MetricsClient,
// using data from the external metrics API.
type externalMetricsClient struct {
client externalclient.ExternalMetricsClient

View File

@ -52,7 +52,7 @@ func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int
}
// GetMetricUtilizationRatio takes in a set of metrics and a target utilization value,
// and calcuates the ratio of desired to actual utilization
// and calculates the ratio of desired to actual utilization
// (returning that and the actual utilization)
func GetMetricUtilizationRatio(metrics PodMetricsInfo, targetUtilization int64) (utilizationRatio float64, currentUtilization int64) {
metricsTotal := int64(0)

View File

@ -1196,7 +1196,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
t.Errorf("No expectations found for ReplicaSet %q", oldRSKey)
}
if rsExp.Fulfilled() {
t.Errorf("There should be unfulfiled expectation for creating new pods for ReplicaSet %q", oldRSKey)
t.Errorf("There should be unfulfilled expectations for creating new pods for ReplicaSet %q", oldRSKey)
}
if manager.queue.Len() != 0 {
@ -1275,7 +1275,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
t.Errorf("No expectations found for ReplicaSet %q", oldRSKey)
}
if rsExp.Fulfilled() {
t.Errorf("There should be unfulfiled expectation for creating new pods for ReplicaSet %q", oldRSKey)
t.Errorf("There should be unfulfilled expectations for creating new pods for ReplicaSet %q", oldRSKey)
}
err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)

View File

@ -952,7 +952,7 @@ func TestNeedsUpdate(t *testing.T) {
expectedNeedsUpdate: true,
},
{
testName: "If externel ip counts are different",
testName: "If external ip counts are different",
updateFn: func() {
oldSvc = defaultExternalService()
newSvc = defaultExternalService()
@ -962,7 +962,7 @@ func TestNeedsUpdate(t *testing.T) {
expectedNeedsUpdate: true,
},
{
testName: "If externel ips are different",
testName: "If external ips are different",
updateFn: func() {
oldSvc = defaultExternalService()
newSvc = defaultExternalService()

View File

@ -251,7 +251,7 @@ func TestStatefulSetControllerDeletionTimestampRace(t *testing.T) {
// It should not adopt pods.
for _, pod := range pods {
if len(pod.OwnerReferences) > 0 {
t.Errorf("unexpect pod owner references: %v", pod.OwnerReferences)
t.Errorf("unexpected pod owner references: %v", pod.OwnerReferences)
}
}
@ -265,7 +265,7 @@ func TestStatefulSetControllerDeletionTimestampRace(t *testing.T) {
}
for _, revision := range revisions {
if len(revision.OwnerReferences) > 0 {
t.Errorf("unexpect revision owner references: %v", revision.OwnerReferences)
t.Errorf("unexpected revision owner references: %v", revision.OwnerReferences)
}
}
}

View File

@ -199,7 +199,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
t.Fatalf("Error waiting for the informer caches to sync")
}
// Make sure the nodes and pods are in the inforer cache
// Make sure the nodes and pods are in the informer cache
i = 0
nodeList, err := informerFactory.Core().V1().Nodes().Lister().List(labels.Everything())
for len(nodeList) < nodesNum {

View File

@ -120,7 +120,7 @@ type ActualStateOfWorld interface {
GetAttachedVolumesPerNode() map[types.NodeName][]operationexecutor.AttachedVolume
// GetNodesForAttachedVolume returns the nodes on which the volume is attached.
// This function is used by reconciler for mutli-attach check.
// This function is used by reconciler for multi-attach check.
GetNodesForAttachedVolume(volumeName v1.UniqueVolumeName) []types.NodeName
// GetVolumesToReportAttached returns a map containing the set of nodes for

View File

@ -990,7 +990,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
// Populates data struct with one volume/node entry.
// Calls RemoveVolumeFromReportAsAttached
// Verifyies there is no valume as reported as attached
// Verifies there is no volume as reported as attached
func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
@ -1023,7 +1023,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
// Populates data struct with one volume/node entry.
// Calls RemoveVolumeFromReportAsAttached
// Calls AddVolumeToReportAsAttached to add volume back as attached
// Verifyies there is one volume as reported as attached
// Verifies there is one volume as reported as attached
func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
@ -1066,7 +1066,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
// Calls RemoveVolumeFromReportAsAttached
// Calls DeleteVolumeNode
// Calls AddVolumeNode
// Verifyies there is no volume as reported as attached
// Verifies there is no volume as reported as attached
func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)

View File

@ -263,7 +263,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
nodeName)
}
// Assert -- Timer will triger detach
// Assert -- Timer will trigger detach
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
@ -415,7 +415,7 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing.
nodeName1)
}
// Assert -- Timer will triger detach
// Assert -- Timer will trigger detach
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
@ -433,7 +433,7 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing.
nodeName2)
}
// Assert -- Timer will triger detach
// Assert -- Timer will trigger detach
waitForNewDetacherCallCount(t, 2 /* expectedCallCount */, fakePlugin)
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
waitForTotalAttachCallCount(t, 2 /* expectedAttachCallCount */, fakePlugin)
@ -692,7 +692,7 @@ func Test_ReportMultiAttachError(t *testing.T) {
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by pod(s) pod2"},
},
{
"pods in anotother namespace use the volume",
"pods in another namespace use the volume",
[]nodeWithPods{
{"node1", []string{"ns1/pod1"}},
{"node2", []string{"ns2/pod2"}},
@ -700,7 +700,7 @@ func Test_ReportMultiAttachError(t *testing.T) {
[]string{"Warning FailedAttachVolume Multi-Attach error for volume \"volume-name\" Volume is already used by 1 pod(s) in different namespaces"},
},
{
"pods both in the same and anotother namespace use the volume",
"pods both in the same and another namespace use the volume",
[]nodeWithPods{
{"node1", []string{"ns1/pod1"}},
{"node2", []string{"ns2/pod2"}},

View File

@ -769,7 +769,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil {
if err == pvtesting.ErrVersionConflict {
// Ignore version errors
klog.V(4).Infof("test intentionaly ignores version error.")
klog.V(4).Infof("test intentionally ignores version error.")
} else {
t.Errorf("Error calling syncClaim: %v", err)
// Finish the loop on the first error
@ -786,7 +786,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil {
if err == pvtesting.ErrVersionConflict {
// Ignore version errors
klog.V(4).Infof("test intentionaly ignores version error.")
klog.V(4).Infof("test intentionally ignores version error.")
} else {
t.Errorf("Error calling syncVolume: %v", err)
// Finish the loop on the first error

View File

@ -107,7 +107,7 @@ func TestPVProtectionController(t *testing.T) {
// Optional client reactors.
reactors []reaction
// PV event to simulate. This PV will be automatically added to
// initalObjects.
// initialObjects.
updatedPV *v1.PersistentVolume
// List of expected kubeclient actions that should happen during the
// test.
@ -220,7 +220,7 @@ func TestPVProtectionController(t *testing.T) {
case *v1.PersistentVolume:
pvInformer.Informer().GetStore().Add(obj)
default:
t.Fatalf("Unknown initalObject type: %+v", obj)
t.Fatalf("Unknown initialObject type: %+v", obj)
}
}

View File

@ -127,7 +127,7 @@ func (c *assumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) {
return c.indexFunc(objInfo.latestObj)
}
// NewAssumeCache creates an assume cache for genernal objects.
// NewAssumeCache creates an assume cache for general objects.
func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) AssumeCache {
c := &assumeCache{
description: description,

View File

@ -163,7 +163,7 @@ func TestRestorePV(t *testing.T) {
// Restore PV
cache.Restore(oldPV.Name)
if err := verifyPV(cache, oldPV.Name, oldPV); err != nil {
t.Fatalf("Failed to GetPV() after iniital restore: %v", err)
t.Fatalf("Failed to GetPV() after initial restore: %v", err)
}
// Assume newPV
@ -420,7 +420,7 @@ func TestRestorePVC(t *testing.T) {
// Restore PVC
cache.Restore(getPVCName(oldPVC))
if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil {
t.Fatalf("Failed to GetPVC() after iniital restore: %v", err)
t.Fatalf("Failed to GetPVC() after initial restore: %v", err)
}
// Assume newPVC

View File

@ -912,10 +912,10 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
t.Error("returned success but expected error")
}
if boundSatisfied != scenario.expectedBound {
t.Errorf("expected boundSatsified %v, got %v", scenario.expectedBound, boundSatisfied)
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
}
if unboundSatisfied != scenario.expectedUnbound {
t.Errorf("expected unboundSatsified %v, got %v", scenario.expectedUnbound, unboundSatisfied)
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
}
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, nil)
}
@ -1037,10 +1037,10 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
t.Error("returned success but expected error")
}
if boundSatisfied != scenario.expectedBound {
t.Errorf("expected boundSatsified %v, got %v", scenario.expectedBound, boundSatisfied)
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
}
if unboundSatisfied != scenario.expectedUnbound {
t.Errorf("expected unboundSatsified %v, got %v", scenario.expectedUnbound, unboundSatisfied)
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
}
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions)
}
@ -1150,10 +1150,10 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
t.Error("returned success but expected error")
}
if boundSatisfied != scenario.expectedBound {
t.Errorf("expected boundSatsified %v, got %v", scenario.expectedBound, boundSatisfied)
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
}
if unboundSatisfied != scenario.expectedUnbound {
t.Errorf("expected unboundSatsified %v, got %v", scenario.expectedUnbound, unboundSatisfied)
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
}
}