From 3b207306201296a83949973e368b79dabfaa8821 Mon Sep 17 00:00:00 2001 From: aaronxu Date: Wed, 14 Dec 2016 06:03:00 -0800 Subject: [PATCH] spell check for test/* --- hack/generate-docs.sh | 2 +- hack/local-up-discovery.sh | 2 +- hack/update-generated-protobuf-dockerized.sh | 2 +- hack/verify-symbols.sh | 2 +- .../garbagecollector/garbage_collector_test.go | 4 ++-- test/integration/scheduler/scheduler_test.go | 8 ++++---- test/integration/scheduler_perf/scheduler_test.go | 2 +- test/integration/thirdparty/thirdparty_test.go | 2 +- test/integration/volume/persistent_volumes_test.go | 6 +++--- test/kubemark/run-e2e-tests.sh | 2 +- 10 files changed, 16 insertions(+), 16 deletions(-) diff --git a/hack/generate-docs.sh b/hack/generate-docs.sh index 4a5f9a4f7ca..e55defb9bcb 100755 --- a/hack/generate-docs.sh +++ b/hack/generate-docs.sh @@ -43,7 +43,7 @@ kube::util::gen-docs "${KUBE_TEMP}" # remove all of the old docs kube::util::remove-gen-docs -# Copy fresh docs into the repo. +# copy fresh docs into the repo. # the shopt is so that we get .generated_docs from the glob. shopt -s dotglob cp -af "${KUBE_TEMP}"/* "${KUBE_ROOT}" diff --git a/hack/local-up-discovery.sh b/hack/local-up-discovery.sh index 8615c0543a3..e527b36c7ac 100755 --- a/hack/local-up-discovery.sh +++ b/hack/local-up-discovery.sh @@ -91,7 +91,7 @@ function start_discovery { ${kubectl} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-discovery.kubeconfig" --certificate-authority="${CERT_DIR}/discovery-ca.crt" --embed-certs --server="https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT}" # Wait for kubernetes-discovery to come up before launching the rest of the components. - # this should work since we're creating a node port service + # This should work since we're creating a node port service. echo "Waiting for kubernetes-discovery to come up: https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT}/version" kube::util::wait_for_url "https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT}/version" "kubernetes-discovery: " 1 60 || exit 1 diff --git a/hack/update-generated-protobuf-dockerized.sh b/hack/update-generated-protobuf-dockerized.sh index 7f4f641c3ab..afa5cd4225b 100755 --- a/hack/update-generated-protobuf-dockerized.sh +++ b/hack/update-generated-protobuf-dockerized.sh @@ -49,4 +49,4 @@ PATH="${KUBE_ROOT}/_output/bin:${PATH}" \ "${gotoprotobuf}" \ --proto-import="${KUBE_ROOT}/vendor" \ --proto-import="${KUBE_ROOT}/third_party/protobuf" \ - $@ + "$@" diff --git a/hack/verify-symbols.sh b/hack/verify-symbols.sh index 43d865cef87..aa5870a2960 100755 --- a/hack/verify-symbols.sh +++ b/hack/verify-symbols.sh @@ -25,7 +25,7 @@ kube::golang::setup_env make -C "${KUBE_ROOT}" WHAT=cmd/hyperkube -# add other BADSYMBOLS here. +# Add other BADSYMBOLS here. BADSYMBOLS=( "httptest" "testify" diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index 53bd9b12ed0..904923d4d90 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -428,7 +428,7 @@ func TestOrphaning(t *testing.T) { t.Fatalf("Failed to create replication controller: %v", err) } - // these pods should be ophaned. + // these pods should be orphaned. var podUIDs []types.UID podsNum := 3 for i := 0; i < podsNum; i++ { @@ -478,7 +478,7 @@ func TestOrphaning(t *testing.T) { } for _, pod := range pods.Items { if len(pod.ObjectMeta.OwnerReferences) != 0 { - t.Errorf("pod %s still has non-empty OwnerRefereces: %v", pod.ObjectMeta.Name, pod.ObjectMeta.OwnerReferences) + t.Errorf("pod %s still has non-empty OwnerReferences: %v", pod.ObjectMeta.Name, pod.ObjectMeta.OwnerReferences) } } } diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 456013077f3..caf15da9cfb 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -174,7 +174,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names err = waitForReflection(t, s, nodeKey, func(node interface{}) bool { // An unschedulable node should still be present in the store // Nodes that are unschedulable or that are not ready or - // have their disk full (Node.Spec.Conditions) are exluded + // have their disk full (Node.Spec.Conditions) are excluded // based on NodeConditionPredicate, a separate check return node != nil && node.(*v1.Node).Spec.Unschedulable == true }) @@ -319,7 +319,7 @@ func TestMultiScheduler(t *testing.T) { 8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 - note: these two pods belong to default scheduler which no longer exists 9. **check point-3**: - - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled + - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled */ // 1. create and start default-scheduler clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}) @@ -453,7 +453,7 @@ func TestMultiScheduler(t *testing.T) { } // 9. **check point-3**: - // - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 shoule NOT be scheduled + // - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name)) if err == nil { t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodNoAnnotation2.Name, err) @@ -587,7 +587,7 @@ func TestAllocatable(t *testing.T) { // 7. Test: this test pod should not be scheduled since it request more than Allocatable err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod2.Namespace, testAllocPod2.Name)) if err == nil { - t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectly, %v", testAllocPod2.Name, err) + t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err) } else { t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name) } diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index fff58483e58..73f820784a5 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -184,7 +184,7 @@ func defaultSchedulerBenchmarkConfig(numNodes, numPods int) *testConfig { // This is used to learn the scheduling throughput on various // sizes of cluster and changes as more and more pods are scheduled. // It won't stop until all pods are scheduled. -// It retruns the minimum of throughput over whole run. +// It returns the minimum of throughput over whole run. func schedulePods(config *testConfig) int32 { defer config.destroyFunc() if err := config.nodePreparer.PrepareNodes(); err != nil { diff --git a/test/integration/thirdparty/thirdparty_test.go b/test/integration/thirdparty/thirdparty_test.go index 69d9c091863..8d17265b976 100644 --- a/test/integration/thirdparty/thirdparty_test.go +++ b/test/integration/thirdparty/thirdparty_test.go @@ -78,7 +78,7 @@ type FooList struct { Items []Foo `json:"items"` } -// installThirdParty installs a third party resoure and returns a defer func +// installThirdParty installs a third party resource and returns a defer func func installThirdParty(t *testing.T, client clientset.Interface, clientConfig *restclient.Config, tpr *extensions.ThirdPartyResource, group, version, resource string) func() { var err error _, err = client.Extensions().ThirdPartyResources().Create(tpr) diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 7fe9b2c10b6..9c425570e27 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -372,7 +372,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() - ns := framework.CreateTestingNamespace("pvc-match-expresssions", s, t) + ns := framework.CreateTestingNamespace("pvc-match-expressions", s, t) defer framework.DeleteTestingNamespace(ns, s, t) testClient, controller, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod) @@ -778,7 +778,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { } pvs[i] = newPV // Drain watchPV with all events generated by the PV until it's bound - // We don't want to catch "PV craated with Status.Phase == Pending" + // We don't want to catch "PV created with Status.Phase == Pending" // later in this test. waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound) } @@ -1135,7 +1135,7 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio } watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(v1.ListOptions{}) if err != nil { - t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err) + t.Fatalf("Failed to watch PersistentVolumeClaims: %v", err) } return testClient, ctrl, watchPV, watchPVC diff --git a/test/kubemark/run-e2e-tests.sh b/test/kubemark/run-e2e-tests.sh index feb67d973aa..3ae027087b8 100755 --- a/test/kubemark/run-e2e-tests.sh +++ b/test/kubemark/run-e2e-tests.sh @@ -40,5 +40,5 @@ else fi go run ./hack/e2e.go -v --check_version_skew=false --test --test_args="--e2e-verify-service-account=false --dump-logs-on-failure=false ${ARGS}" -# Just make local testing easier... +# Just make local test easier... # ${KUBE_ROOT}/hack/ginkgo-e2e.sh "--e2e-verify-service-account=false" "--dump-logs-on-failure=false" $ARGS