Fix pkg/controller typos in some error messages, comments etc

- applied review results by LuisSanchez
- Co-Authored-By: Luis Sanchez <sanchezl@redhat.com>

genernal -> general
iniital -> initial
initalObjects -> initialObjects
intentionaly -> intentionally
inforer -> informer
anotother -> another
triger -> trigger
mutli -> multi
Verifyies -> Verifies
valume -> volume
unexpect -> unexpected
unfulfiled -> unfulfilled
implenets -> implements
assignement -> assignment
expectataions -> expectations
nexpected -> unexpected
boundSatsified -> boundSatisfied
externel -> external
calcuates -> calculates
workes -> workers
unitialized -> uninitialized
afater -> after
Espected -> Expected
nodeMontiorGracePeriod -> NodeMonitorGracePeriod
estimateGrracefulTermination -> estimateGracefulTermination
secondrary -> secondary
ShouldRunDaemonPodOnUnscheduableNode -> ShouldRunDaemonPodOnUnschedulableNode
rrror -> error
expectatitons -> expectations
foud -> found
epackage -> package
succesfulJobs -> successfulJobs
namesapce -> namespace
ConfigMapResynce -> ConfigMapResync
This commit is contained in:
taesun_lee
2020-02-24 12:57:53 +09:00
committed by Taesun Lee
parent ac25069a05
commit 79680b5d9b
29 changed files with 54 additions and 54 deletions

View File

@@ -32,7 +32,7 @@ type NodeLifecycleControllerConfiguration struct {
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
// be unresponsive before marking it unhealthy.
NodeStartupGracePeriod metav1.Duration
// nodeMontiorGracePeriod is the amount of time which we allow a running node to be
// NodeMonitorGracePeriod is the amount of time which we allow a running node to be
// unresponsive before marking it unhealthy. Must be N times more than kubelet's
// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
// to post node status.

View File

@@ -129,7 +129,7 @@ const (
retrySleepTime = 20 * time.Millisecond
nodeNameKeyIndex = "spec.nodeName"
// podUpdateWorkerSizes assumes that in most cases pod will be handled by monitorNodeHealth pass.
// Pod update workes will only handle lagging cache pods. 4 workes should be enough.
// Pod update workers will only handle lagging cache pods. 4 workers should be enough.
podUpdateWorkerSize = 4
)

View File

@@ -734,7 +734,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
return true, 0
})
} else {
t.Fatalf("Zone %v was unitialized!", zone)
t.Fatalf("Zone %v was uninitialized!", zone)
}
}

View File

@@ -137,7 +137,7 @@ func (q *TimedWorkerQueue) CancelWork(key string) bool {
}
// GetWorkerUnsafe returns a TimedWorker corresponding to the given key.
// Unsafe method - workers have attached goroutines which can fire afater this function is called.
// Unsafe method - workers have attached goroutines which can fire after this function is called.
func (q *TimedWorkerQueue) GetWorkerUnsafe(key string) *TimedWorker {
q.Lock()
defer q.Unlock()

View File

@@ -47,7 +47,7 @@ func TestExecute(t *testing.T) {
wg.Wait()
lastVal := atomic.LoadInt32(&testVal)
if lastVal != 5 {
t.Errorf("Espected testVal = 5, got %v", lastVal)
t.Errorf("Expected testVal = 5, got %v", lastVal)
}
}
@@ -75,7 +75,7 @@ func TestExecuteDelayed(t *testing.T) {
wg.Wait()
lastVal := atomic.LoadInt32(&testVal)
if lastVal != 5 {
t.Errorf("Espected testVal = 5, got %v", lastVal)
t.Errorf("Expected testVal = 5, got %v", lastVal)
}
}
@@ -105,7 +105,7 @@ func TestCancel(t *testing.T) {
wg.Wait()
lastVal := atomic.LoadInt32(&testVal)
if lastVal != 3 {
t.Errorf("Espected testVal = 3, got %v", lastVal)
t.Errorf("Expected testVal = 3, got %v", lastVal)
}
}
@@ -136,6 +136,6 @@ func TestCancelAndReadd(t *testing.T) {
wg.Wait()
lastVal := atomic.LoadInt32(&testVal)
if lastVal != 4 {
t.Errorf("Espected testVal = 4, got %v", lastVal)
t.Errorf("Expected testVal = 4, got %v", lastVal)
}
}