From fca78228000038e8689c6aa16946caa5c1cbe61d Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 3 Jul 2015 16:29:14 -0400 Subject: [PATCH] Misc (non-code) spelling fixes --- cluster/gce/util.sh | 4 ++-- pkg/kubelet/kubelet.go | 10 +++++----- test/e2e/resize_nodes.go | 2 +- test/e2e/util.go | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index dcd4fa4e07d..efdd7b18f5a 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -769,7 +769,7 @@ function kube-down { echo "Bringing down cluster" set +e # Do not stop on error - # The gcloud APIs don't return machine parsable error codes/retry information. Therefore the best we can + # The gcloud APIs don't return machine parseable error codes/retry information. Therefore the best we can # do is parse the output and special case particular responses we are interested in. if gcloud preview managed-instance-groups --project "${PROJECT}" --zone "${ZONE}" describe "${NODE_INSTANCE_PREFIX}-group" &>/dev/null; then deleteCmdOutput=$(gcloud preview managed-instance-groups --zone "${ZONE}" delete \ @@ -777,7 +777,7 @@ function kube-down { --quiet \ "${NODE_INSTANCE_PREFIX}-group") if [[ "$deleteCmdOutput" != "" ]]; then - # Managed instance group deletion is done asyncronously, we must wait for it to complete, or subsequent steps fail + # Managed instance group deletion is done asynchronously, we must wait for it to complete, or subsequent steps fail deleteCmdOperationId=$(echo $deleteCmdOutput | grep "Operation:" | sed "s/.*Operation:[[:space:]]*\([^[:space:]]*\).*/\1/g") if [[ "$deleteCmdOperationId" != "" ]]; then deleteCmdStatus="PENDING" diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index fffaf8576c1..03680863168 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -90,7 +90,7 @@ var ( type SyncHandler interface { // Syncs current state to match the specified pods. SyncPodType specified what - // type of sync is occuring per pod. StartTime specifies the time at which + // type of sync is occurring per pod. StartTime specifies the time at which // syncing began (for use in monitoring). SyncPods(pods []*api.Pod, podSyncTypes map[types.UID]SyncPodType, mirrorPods map[string]*api.Pod, startTime time.Time) error @@ -417,7 +417,7 @@ type Kubelet struct { serviceLister serviceLister nodeLister nodeLister - // Last timestamp when runtime responsed on ping. + // Last timestamp when runtime responded on ping. // Mutex is used to protect this value. runtimeMutex sync.Mutex runtimeUpThreshold time.Duration @@ -479,7 +479,7 @@ type Kubelet struct { // will only be fresh values from Kubelet at an interval of nodeStatusUpdateFrequency. // The constant must be less than podEvictionTimeout. // 2. nodeStatusUpdateFrequency needs to be large enough for kubelet to generate node - // status. Kubelet may fail to update node status reliablly if the value is too small, + // status. Kubelet may fail to update node status reliably if the value is too small, // as it takes time to gather all necessary node information. nodeStatusUpdateFrequency time.Duration @@ -669,7 +669,7 @@ func (kl *Kubelet) GetNode() (*api.Node, error) { return nil, fmt.Errorf("node %v not found", nodeName) } -// Starts garbage collection theads. +// Starts garbage collection threads. func (kl *Kubelet) StartGarbageCollection() { go util.Forever(func() { if err := kl.containerGC.GarbageCollect(); err != nil { @@ -932,7 +932,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { ) // Get all service resources from the master (via a cache), - // and populate them into service enviroment variables. + // and populate them into service environment variables. if kl.serviceLister == nil { // Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars. return m, nil diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index 4962e214763..c0f019f0926 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -520,7 +520,7 @@ var _ = Describe("Nodes", func() { Logf("Waiting for node %s to be ready", node.Name) waitForNodeToBe(c, node.Name, true, 2*time.Minute) - By("verify wheter new pods can be created on the re-attached node") + By("verify whether new pods can be created on the re-attached node") // increasing the RC size is not a valid way to test this // since we have no guarantees the pod will be scheduled on our node. additionalPod := "additionalpod" diff --git a/test/e2e/util.go b/test/e2e/util.go index 940a3a2e60f..d4ac486dc96 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -842,7 +842,7 @@ func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err e if err != nil { return } - Logf("Asyncronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " ")) + Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " ")) err = cmd.Start() return }