From 9e9fc2be8823554645e504e1a204be7a74e433f0 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Fri, 16 Sep 2022 16:58:13 +0530 Subject: [PATCH] various corrections in test/e2e package Signed-off-by: Humble Chirammal --- test/conformance/testdata/conformance.yaml | 10 +++++----- test/e2e/apps/job.go | 14 +++++++------- test/e2e/apps/replica_set.go | 2 +- test/e2e/apps/statefulset.go | 2 +- .../autoscaling/cluster_autoscaler_scalability.go | 2 +- test/e2e/cloud/gcp/cluster_upgrade.go | 2 +- test/e2e/common/node/container.go | 2 +- test/e2e/common/node/container_probe.go | 2 +- test/e2e/common/storage/configmap_volume.go | 2 +- test/e2e/common/storage/empty_dir.go | 4 ++-- test/e2e/common/storage/projected_configmap.go | 2 +- test/e2e/common/storage/secrets_volume.go | 2 +- test/e2e/framework/framework.go | 2 +- test/e2e/framework/ingress/ingress_utils.go | 2 +- test/e2e/framework/internal/output/output.go | 2 +- test/e2e/framework/network/utils.go | 2 +- test/e2e/framework/perf/perf.go | 2 +- test/e2e/framework/pod/wait.go | 2 +- test/e2e/framework/providers/gce/ingress.go | 2 +- test/e2e/framework/service/jig.go | 10 +++++----- test/e2e/framework/statefulset/rest.go | 2 +- test/e2e/framework/util.go | 2 +- test/e2e/network/firewall.go | 6 +++--- test/e2e/network/loadbalancer.go | 4 ++-- test/e2e/network/scale/ingress.go | 2 +- test/e2e/network/service.go | 4 ++-- test/e2e/node/taints.go | 2 +- test/e2e/storage/drivers/csi.go | 2 +- test/e2e/storage/drivers/csi_objects.go | 2 +- test/e2e/storage/testsuites/fsgroupchangepolicy.go | 4 ++-- test/e2e/storage/testsuites/provisioning.go | 2 +- test/e2e/storage/testsuites/volume_stress.go | 2 +- test/e2e/storage/testsuites/volumemode.go | 2 +- test/e2e/storage/utils/create.go | 2 +- test/e2e/storage/vsphere/vsphere_utils.go | 4 ++-- .../storage/vsphere/vsphere_volume_diskformat.go | 2 +- .../storage-csi/gce-pd/csi-controller-rbac.yaml | 2 +- .../storage-csi/hostpath/README.md | 2 +- .../hostpath/hostpath/e2e-test-rbac.yaml | 2 +- .../storage-csi/mock/csi-mock-rbac.yaml | 2 +- .../storage-csi/update-hostpath.sh | 4 ++-- test/e2e/windows/dns.go | 2 +- test/images/README.md | 2 +- 43 files changed, 65 insertions(+), 65 deletions(-) diff --git a/test/conformance/testdata/conformance.yaml b/test/conformance/testdata/conformance.yaml index decadcb358c..7044cb37163 100755 --- a/test/conformance/testdata/conformance.yaml +++ b/test/conformance/testdata/conformance.yaml @@ -826,7 +826,7 @@ file: test/e2e/apps/job.go - testname: Jobs, active pods, graceful termination codename: '[sig-apps] Job should delete a job [Conformance]' - description: Create a job. Ensure the active pods reflect paralellism in the namespace + description: Create a job. Ensure the active pods reflect parallelism in the namespace and delete the job. Job MUST be deleted successfully. release: v1.15 file: test/e2e/apps/job.go @@ -2165,7 +2165,7 @@ killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment - everytime health check fails, measure upto 5 restart. + every time health check fails, measure up to 5 restart. release: v1.9 file: test/e2e/common/node/container_probe.go - testname: Pod readiness probe, with initial delay @@ -2680,9 +2680,9 @@ codename: '[sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance]' description: A Pod created with an 'emptyDir' Volume, should share volumes between - the containeres in the pod. The two busybox image containers shoud share the volumes - mounted to the pod. The main container shoud wait until the sub container drops - a file, and main container acess the shared data. + the containeres in the pod. The two busybox image containers should share the + volumes mounted to the pod. The main container should wait until the sub container + drops a file, and main container access the shared data. release: v1.15 file: test/e2e/common/storage/empty_dir.go - testname: EmptyDir, medium default, volume mode 0644 diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 594397eba92..13c9840f1fd 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -97,7 +97,7 @@ var _ = SIGDescribe("Job", func() { successes++ } } - framework.ExpectEqual(successes, completions, "epected %d successful job pods, but got %d", completions, successes) + framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got %d", completions, successes) }) ginkgo.It("should not create pods when created in suspend state", func() { @@ -145,7 +145,7 @@ var _ = SIGDescribe("Job", func() { job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) - ginkgo.By("Ensure pods equal to paralellism count is attached to the job") + ginkgo.By("Ensure pods equal to parallelism count is attached to the job") err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) @@ -223,7 +223,7 @@ var _ = SIGDescribe("Job", func() { /* Testcase: Ensure that the pods associated with the job are removed once the job is deleted - Description: Create a job and ensure the associated pod count is equal to paralellism count. Delete the + Description: Create a job and ensure the associated pod count is equal to parallelism count. Delete the job and ensure if the pods associated with the job have been removed */ ginkgo.It("should remove pods when job is deleted", func() { @@ -232,7 +232,7 @@ var _ = SIGDescribe("Job", func() { job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) - ginkgo.By("Ensure pods equal to paralellism count is attached to the job") + ginkgo.By("Ensure pods equal to parallelism count is attached to the job") err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) @@ -304,7 +304,7 @@ var _ = SIGDescribe("Job", func() { /* Release: v1.15 Testname: Jobs, active pods, graceful termination - Description: Create a job. Ensure the active pods reflect paralellism in the namespace and delete the job. Job MUST be deleted successfully. + Description: Create a job. Ensure the active pods reflect parallelism in the namespace and delete the job. Job MUST be deleted successfully. */ framework.ConformanceIt("should delete a job", func() { ginkgo.By("Creating a job") @@ -432,7 +432,7 @@ var _ = SIGDescribe("Job", func() { job.Spec.Template.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": testNodeName} } - framework.Logf("Creating job %q with a node hostname selector %q wth cpu request %q", job.Name, testNodeName, cpuRequest) + framework.Logf("Creating job %q with a node hostname selector %q with cpu request %q", job.Name, testNodeName, cpuRequest) job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) @@ -471,7 +471,7 @@ var _ = SIGDescribe("Job", func() { job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) - ginkgo.By("Ensure pods equal to paralellism count is attached to the job") + ginkgo.By("Ensure pods equal to parallelism count is attached to the job") err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism) framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index fc8c459407c..0a5d0adce9e 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -185,7 +185,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s replicas := int32(1) // Create a ReplicaSet for a service that serves its hostname. - // The source for the Docker containter kubernetes/serve_hostname is + // The source for the Docker container kubernetes/serve_hostname is // in contrib/for-demos/serve_hostname framework.Logf("Creating ReplicaSet %s", name) newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 819bdf039d5..a67559b56aa 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -1838,7 +1838,7 @@ func verifyStatefulSetPVCsExistWithOwnerRefs(c clientset.Interface, ss *appsv1.S set := getStatefulSet(c, ss.Namespace, ss.Name) setUID := set.GetUID() if setUID == "" { - framework.Failf("Statefulset %s mising UID", ss.Name) + framework.Failf("Statefulset %s missing UID", ss.Name) } return wait.PollImmediate(e2estatefulset.StatefulSetPoll, e2estatefulset.StatefulSetTimeout, func() (bool, error) { pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()}) diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index 2a49e8409e3..55c56045490 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -501,7 +501,7 @@ type podBatch struct { // 1. Create replication controllers that eat up all the space that should be // empty after setup, making sure they end up on different nodes by specifying // conflicting host port -// 2. Create targer RC that will generate the load on the cluster +// 2. Create target RC that will generate the load on the cluster // 3. Remove the rcs created in 1. func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch, podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) func() error { diff --git a/test/e2e/cloud/gcp/cluster_upgrade.go b/test/e2e/cloud/gcp/cluster_upgrade.go index fe38d5bf643..eeed66f1dbb 100644 --- a/test/e2e/cloud/gcp/cluster_upgrade.go +++ b/test/e2e/cloud/gcp/cluster_upgrade.go @@ -31,7 +31,7 @@ import ( "github.com/onsi/ginkgo/v2" ) -// TODO: Those tests should be splitted by SIG and moved to SIG-owned directories, +// TODO: Those tests should be split by SIG and moved to SIG-owned directories, // // however that involves also splitting the actual upgrade jobs too. // Figure out the eventual solution for it. diff --git a/test/e2e/common/node/container.go b/test/e2e/common/node/container.go index 642bf5b3d89..c6d6ed93e38 100644 --- a/test/e2e/common/node/container.go +++ b/test/e2e/common/node/container.go @@ -136,7 +136,7 @@ const ( ContainerStateUnknown ContainerState = "Unknown" ) -// GetContainerState returns current state the container represents among its lifecyle +// GetContainerState returns current state the container represents among its lifecycle func GetContainerState(state v1.ContainerState) ContainerState { if state.Waiting != nil { return ContainerStateWaiting diff --git a/test/e2e/common/node/container_probe.go b/test/e2e/common/node/container_probe.go index b75bd4f6364..8443c4146ac 100644 --- a/test/e2e/common/node/container_probe.go +++ b/test/e2e/common/node/container_probe.go @@ -190,7 +190,7 @@ var _ = SIGDescribe("Probing container", func() { /* Release: v1.9 Testname: Pod liveness probe, using http endpoint, multiple restarts (slow) - Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment everytime health check fails, measure upto 5 restart. + Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment every time health check fails, measure up to 5 restart. */ framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func() { livenessProbe := &v1.Probe{ diff --git a/test/e2e/common/storage/configmap_volume.go b/test/e2e/common/storage/configmap_volume.go index e423e2ec965..9c60cf7cc40 100644 --- a/test/e2e/common/storage/configmap_volume.go +++ b/test/e2e/common/storage/configmap_volume.go @@ -551,7 +551,7 @@ var _ = SIGDescribe("ConfigMap", func() { }) // The pod is in pending during volume creation until the configMap objects are available - // or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional. + // or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional. // Slow (~5 mins) ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() { volumeMountPath := "/etc/configmap-volumes" diff --git a/test/e2e/common/storage/empty_dir.go b/test/e2e/common/storage/empty_dir.go index 107a595cfef..8c676c206bd 100644 --- a/test/e2e/common/storage/empty_dir.go +++ b/test/e2e/common/storage/empty_dir.go @@ -220,8 +220,8 @@ var _ = SIGDescribe("EmptyDir volumes", func() { /* Release: v1.15 Testname: EmptyDir, Shared volumes between containers - Description: A Pod created with an 'emptyDir' Volume, should share volumes between the containeres in the pod. The two busybox image containers shoud share the volumes mounted to the pod. - The main container shoud wait until the sub container drops a file, and main container acess the shared data. + Description: A Pod created with an 'emptyDir' Volume, should share volumes between the containeres in the pod. The two busybox image containers should share the volumes mounted to the pod. + The main container should wait until the sub container drops a file, and main container access the shared data. */ framework.ConformanceIt("pod should support shared volumes between containers", func() { var ( diff --git a/test/e2e/common/storage/projected_configmap.go b/test/e2e/common/storage/projected_configmap.go index 8fa4b92cfdd..9fc348babaa 100644 --- a/test/e2e/common/storage/projected_configmap.go +++ b/test/e2e/common/storage/projected_configmap.go @@ -457,7 +457,7 @@ var _ = SIGDescribe("Projected configMap", func() { }) //The pod is in pending during volume creation until the configMap objects are available - //or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional. + //or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional. //Slow (~5 mins) ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() { volumeMountPath := "/etc/projected-configmap-volumes" diff --git a/test/e2e/common/storage/secrets_volume.go b/test/e2e/common/storage/secrets_volume.go index b1cb1fba088..2eb7bf408fc 100644 --- a/test/e2e/common/storage/secrets_volume.go +++ b/test/e2e/common/storage/secrets_volume.go @@ -433,7 +433,7 @@ var _ = SIGDescribe("Secrets", func() { }) // The secret is in pending during volume creation until the secret objects are available - // or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timout exception unless it is marked optional. + // or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional. // Slow (~5 mins) ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() { volumeMountPath := "/etc/secret-volumes" diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 9c12d96ee65..915ef6686be 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -162,7 +162,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface) // BeforeEach gets a client and makes a namespace. func (f *Framework) BeforeEach() { - // DeferCleanup, in constrast to AfterEach, triggers execution in + // DeferCleanup, in contrast to AfterEach, triggers execution in // first-in-last-out order. This ensures that the framework instance // remains valid as long as possible. // diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 0ab8d5c18fd..f1a01b48ba3 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -796,7 +796,7 @@ func (j *TestJig) WaitForIngressToStable() { } return true, nil }); err != nil { - framework.Failf("error in waiting for ingress to stablize: %v", err) + framework.Failf("error in waiting for ingress to stabilize: %v", err) } } diff --git a/test/e2e/framework/internal/output/output.go b/test/e2e/framework/internal/output/output.go index 0fc847b0141..cc1d503dc19 100644 --- a/test/e2e/framework/internal/output/output.go +++ b/test/e2e/framework/internal/output/output.go @@ -70,7 +70,7 @@ type TestResult struct { Output string // Failure is SpecSummary.Failure.Message with varying parts stripped. Failure string - // Stack is a normalized version (just file names, function parametes stripped) of + // Stack is a normalized version (just file names, function parameters stripped) of // Ginkgo's FullStackTrace of a failure. Empty if no failure. Stack string // Called to normalize the actual output string before comparison if non-nil. diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index d51fefcf138..3dfd28435b7 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -436,7 +436,7 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP // success. If 0, then we return as soon as all endpoints succeed. // - There is no logical change to test results if faillures happen AFTER endpoints have succeeded, // hence over-padding minTries will NOT reverse a successful result and is thus not very useful yet -// (See the TODO about checking probability, which isnt implemented yet). +// (See the TODO about checking probability, which isn't implemented yet). // - maxTries is the maximum number of curl/echo attempts before an error is returned. The // smaller this number is, the less 'slack' there is for declaring success. // - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints won't be hit. diff --git a/test/e2e/framework/perf/perf.go b/test/e2e/framework/perf/perf.go index 6208b43c32d..5853761fb62 100644 --- a/test/e2e/framework/perf/perf.go +++ b/test/e2e/framework/perf/perf.go @@ -24,7 +24,7 @@ import ( ) // CurrentKubeletPerfMetricsVersion is the current kubelet performance metrics -// version. This is used by mutiple perf related data structures. We should +// version. This is used by multiple perf related data structures. We should // bump up the version each time we make an incompatible change to the metrics. const CurrentKubeletPerfMetricsVersion = "v2" diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index da490106f0c..be649522a2d 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -688,7 +688,7 @@ func WaitForContainerRunning(c clientset.Interface, namespace, podName, containe // handleWaitingAPIErrror handles an error from an API request in the context of a Wait function. // If the error is retryable, sleep the recommended delay and ignore the error. -// If the erorr is terminal, return it. +// If the error is terminal, return it. func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) { taskDescription := fmt.Sprintf(taskFormat, taskArgs...) if retryNotFound && apierrors.IsNotFound(err) { diff --git a/test/e2e/framework/providers/gce/ingress.go b/test/e2e/framework/providers/gce/ingress.go index be7895bd65c..fa8d15df553 100644 --- a/test/e2e/framework/providers/gce/ingress.go +++ b/test/e2e/framework/providers/gce/ingress.go @@ -788,7 +788,7 @@ func (cont *IngressController) CreateStaticIP(name string) string { return ip.Address } -// deleteStaticIPs delets all static-ips allocated through calls to +// deleteStaticIPs deletes all static-ips allocated through calls to // CreateStaticIP. func (cont *IngressController) deleteStaticIPs() error { if cont.staticIPName != "" { diff --git a/test/e2e/framework/service/jig.go b/test/e2e/framework/service/jig.go index 8e87e2a6c37..f1da3839d83 100644 --- a/test/e2e/framework/service/jig.go +++ b/test/e2e/framework/service/jig.go @@ -417,7 +417,7 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error { cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { if es, ok := obj.(*discoveryv1.EndpointSlice); ok { - // TODO: currently we only consider addreses in 1 slice, but services with + // TODO: currently we only consider addresses in 1 slice, but services with // a large number of endpoints (>1000) may have multiple slices. Some slices // with only a few addresses. We should check the addresses in all slices. if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 { @@ -427,7 +427,7 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error { }, UpdateFunc: func(old, cur interface{}) { if es, ok := cur.(*discoveryv1.EndpointSlice); ok { - // TODO: currently we only consider addreses in 1 slice, but services with + // TODO: currently we only consider addresses in 1 slice, but services with // a large number of endpoints (>1000) may have multiple slices. Some slices // with only a few addresses. We should check the addresses in all slices. if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 { @@ -854,12 +854,12 @@ func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v // If the node's internal address points to localhost, then we are not // able to test the service reachability via that address if isInvalidOrLocalhostAddress(internalAddr) { - framework.Logf("skipping testEndpointReachability() for internal adddress %s", internalAddr) + framework.Logf("skipping testEndpointReachability() for internal address %s", internalAddr) continue } // Check service reachability on the node internalIP which is same family as clusterIP if isClusterIPV4 != netutils.IsIPv4String(internalAddr) { - framework.Logf("skipping testEndpointReachability() for internal adddress %s as it does not match clusterIP (%s) family", internalAddr, clusterIP) + framework.Logf("skipping testEndpointReachability() for internal address %s as it does not match clusterIP (%s) family", internalAddr, clusterIP) continue } @@ -872,7 +872,7 @@ func testReachabilityOverNodePorts(nodes *v1.NodeList, sp v1.ServicePort, pod *v externalAddrs := e2enode.CollectAddresses(nodes, v1.NodeExternalIP) for _, externalAddr := range externalAddrs { if isClusterIPV4 != netutils.IsIPv4String(externalAddr) { - framework.Logf("skipping testEndpointReachability() for external adddress %s as it does not match clusterIP (%s) family", externalAddr, clusterIP) + framework.Logf("skipping testEndpointReachability() for external address %s as it does not match clusterIP (%s) family", externalAddr, clusterIP) continue } err := testEndpointReachability(externalAddr, sp.NodePort, sp.Protocol, pod) diff --git a/test/e2e/framework/statefulset/rest.go b/test/e2e/framework/statefulset/rest.go index d6356db7d4b..65aaac0cb66 100644 --- a/test/e2e/framework/statefulset/rest.go +++ b/test/e2e/framework/statefulset/rest.go @@ -245,7 +245,7 @@ func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd strin return nil } -// udpate updates a statefulset, and it is only used within rest.go +// update updates a statefulset, and it is only used within rest.go func update(c clientset.Interface, ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet { for i := 0; i < 3; i++ { ss, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 53c4d1a409b..4940e771d0e 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -1367,7 +1367,7 @@ func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool { // WatchEventSequenceVerifier ... // manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure // -// testContext cancelation signal across API boundries, e.g: context.TODO() +// testContext cancellation signal across API boundaries, e.g: context.TODO() // dc sets up a client to the API // resourceType specify the type of resource // namespace select a namespace diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index 4c102f6bee2..137d6d99942 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -199,7 +199,7 @@ var _ = common.SIGDescribe("Firewall rule", func() { framework.ExpectNoError(err) }() - ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags") + ginkgo.By("Accessing service through the external ip and examine got no response from the node without tags") err = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet, 15) framework.ExpectNoError(err) }) @@ -244,12 +244,12 @@ func assertNotReachableHTTPTimeout(ip, path string, port int, timeout time.Durat } } -// testHitNodesFromOutside checkes HTTP connectivity from outside. +// testHitNodesFromOutside checks HTTP connectivity from outside. func testHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error { return testHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1) } -// testHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count. +// testHitNodesFromOutsideWithCount checks HTTP connectivity from outside with count. func testHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String, countToSucceed int) error { framework.Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed) diff --git a/test/e2e/network/loadbalancer.go b/test/e2e/network/loadbalancer.go index 2b0b02d9b52..521df2d45ba 100644 --- a/test/e2e/network/loadbalancer.go +++ b/test/e2e/network/loadbalancer.go @@ -512,7 +512,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response _, err := jig.Run(nil) framework.ExpectNoError(err) - // Make sure acceptPod is running. There are certain chances that pod might be teminated due to unexpected reasons. + // Make sure acceptPod is running. There are certain chances that pod might be terminated due to unexpected reasons. acceptPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), acceptPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name) framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning) @@ -542,7 +542,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, acceptPod.Name, svcIP) checkReachabilityFromPod(false, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP) - // Make sure dropPod is running. There are certain chances that the pod might be teminated due to unexpected reasons. + // Make sure dropPod is running. There are certain chances that the pod might be terminated due to unexpected reasons. dropPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), dropPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod %s", dropPod.Name) framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning) diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index 7dd16bea8d6..d2b3cc6764e 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -152,7 +152,7 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { if f.ScaleTestDeploy != nil { f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name) if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(context.TODO(), f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil { - errs = append(errs, fmt.Errorf("error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err)) + errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err)) } } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 4fadf6a120f..7a596eb14ab 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -3754,7 +3754,7 @@ var _ = common.SIGDescribe("Services", func() { }) // execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of -// affinity test for non-load-balancer services. Session afinity will be +// affinity test for non-load-balancer services. Session affinity will be // enabled when the service is created and a short timeout will be configured so // session affinity must change after the timeout expirese. func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs clientset.Interface, svc *v1.Service) { @@ -3853,7 +3853,7 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf } // execAffinityTestForNonLBServiceWithOptionalTransition is a helper function that wrap the logic of -// affinity test for non-load-balancer services. Session afinity will be +// affinity test for non-load-balancer services. Session affinity will be // enabled when the service is created. If parameter isTransitionTest is true, // session affinity will be switched off/on and test if the service converges // to a stable affinity state. diff --git a/test/e2e/node/taints.go b/test/e2e/node/taints.go index 43bead1554c..e8d76318969 100644 --- a/test/e2e/node/taints.go +++ b/test/e2e/node/taints.go @@ -404,7 +404,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { if podName == podGroup+"1" { framework.Logf("Noticed Pod %q gets evicted.", podName) } else if podName == podGroup+"2" { - framework.Failf("Unexepected Pod %q gets evicted.", podName) + framework.Failf("Unexpected Pod %q gets evicted.", podName) return } } diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 72ba241df79..dc0ac629709 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -394,7 +394,7 @@ func (c *MockCSICalls) Get() []MockCSICall { return c.calls[:] } -// Add appens one new call at the end. +// Add appends one new call at the end. func (c *MockCSICalls) Add(call MockCSICall) { c.mutex.Lock() defer c.mutex.Unlock() diff --git a/test/e2e/storage/drivers/csi_objects.go b/test/e2e/storage/drivers/csi_objects.go index 4fce259b3ef..28a7c4397b7 100644 --- a/test/e2e/storage/drivers/csi_objects.go +++ b/test/e2e/storage/drivers/csi_objects.go @@ -49,7 +49,7 @@ func shredFile(filePath string) { framework.Logf("File %v successfully shredded", filePath) return } - // Shred failed Try to remove the file for good meausure + // Shred failed Try to remove the file for good measure err = os.Remove(filePath) framework.ExpectNoError(err, "Failed to remove service account file %s", filePath) diff --git a/test/e2e/storage/testsuites/fsgroupchangepolicy.go b/test/e2e/storage/testsuites/fsgroupchangepolicy.go index 184f62d5f90..12f0f8a8cf4 100644 --- a/test/e2e/storage/testsuites/fsgroupchangepolicy.go +++ b/test/e2e/storage/testsuites/fsgroupchangepolicy.go @@ -141,8 +141,8 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD changedRootDirFileOwnership int // Change the ownership of the file in the root directory (/mnt/volume1/file1), as part of the initial pod changedSubDirFileOwnership int // Change the ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the initial pod secondPodFsGroup int // FsGroup of the second pod - finalExpectedRootDirFileOwnership int // Final expcted ownership of the file in the root directory (/mnt/volume1/file1), as part of the second pod - finalExpectedSubDirFileOwnership int // Final expcted ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the second pod + finalExpectedRootDirFileOwnership int // Final expected ownership of the file in the root directory (/mnt/volume1/file1), as part of the second pod + finalExpectedSubDirFileOwnership int // Final expected ownership of the file in the sub directory (/mnt/volume1/subdir/file2), as part of the second pod // Whether the test can run for drivers that support volumeMountGroup capability. // For CSI drivers that support volumeMountGroup: // * OnRootMismatch policy is not supported. diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 1491880d67d..93b4696b235 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -670,7 +670,7 @@ func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1. return pv, err } -// checkProvisioning verifies that the claim is bound and has the correct properities +// checkProvisioning verifies that the claim is bound and has the correct properties func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume { err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) framework.ExpectNoError(err) diff --git a/test/e2e/storage/testsuites/volume_stress.go b/test/e2e/storage/testsuites/volume_stress.go index a67741f7a30..4f035f9ef9d 100644 --- a/test/e2e/storage/testsuites/volume_stress.go +++ b/test/e2e/storage/testsuites/volume_stress.go @@ -225,7 +225,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err) } - // TODO: write data per pod and validate it everytime + // TODO: write data per pod and validate it every time err = e2epod.DeletePodWithWait(f.ClientSet, pod) if err != nil { diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index d5a09ba2c0d..75a7b98eac4 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -283,7 +283,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision) // Events are unreliable, don't depend on the event. It's used only to speed up the test. if err != nil { - framework.Logf("Warning: did not get event about provisioing failed") + framework.Logf("Warning: did not get event about provisioning failed") } // Check the pvc is still pending diff --git a/test/e2e/storage/utils/create.go b/test/e2e/storage/utils/create.go index e23a191d4b0..d8424cb628d 100644 --- a/test/e2e/storage/utils/create.go +++ b/test/e2e/storage/utils/create.go @@ -316,7 +316,7 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace, case *rbacv1.RoleRef: // TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles // which contains all role names that are defined cluster-wide before the test starts? - // All those names are excempt from renaming. That list could be populated by querying + // All those names are exempt from renaming. That list could be populated by querying // and get extended by tests. if item.Name != "e2e-test-privileged-psp" { PatchName(f, &item.Name) diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index ce5b7c4a59b..8c7c28230e7 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -306,7 +306,7 @@ func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]stri return pod } -// func to get pod spec with given volume paths, node selector lables and container commands +// func to get pod spec with given volume paths, node selector labels and container commands func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod { var volumeMounts []v1.VolumeMount var volumes []v1.Volume @@ -613,7 +613,7 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) { return vmxPath } -// verify ready node count. Try upto 3 minutes. Return true if count is expected count +// verify ready node count. Try up to 3 minutes. Return true if count is expected count func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool { numNodes := 0 for i := 0; i < 36; i++ { diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index 3d1836a5177..45718102b9b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -159,7 +159,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st } func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool { - ginkgo.By("Verifing disk format") + ginkgo.By("Verifying disk format") eagerlyScrub := false thinProvisioned := false diskFound := false diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml index be74afaff21..69994a1f41e 100644 --- a/test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml @@ -184,7 +184,7 @@ roleRef: apiGroup: rbac.authorization.k8s.io --- -# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding() +# privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding() kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/test/e2e/testing-manifests/storage-csi/hostpath/README.md b/test/e2e/testing-manifests/storage-csi/hostpath/README.md index 4890f7d302d..399316db2b8 100644 --- a/test/e2e/testing-manifests/storage-csi/hostpath/README.md +++ b/test/e2e/testing-manifests/storage-csi/hostpath/README.md @@ -1,4 +1,4 @@ -The files in this directory are exact copys of "kubernetes-latest" in +The files in this directory are exact copies of "kubernetes-latest" in https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/ Do not edit manually. Run ./update-hostpath.sh to refresh the content. diff --git a/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml index 21e84f90714..c0fa86c0389 100644 --- a/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml +++ b/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml @@ -1,4 +1,4 @@ -# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding() +# privileged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding() kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml index 833a8ff934c..f85d72e7043 100644 --- a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml +++ b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml @@ -46,7 +46,7 @@ roleRef: apiGroup: rbac.authorization.k8s.io --- -# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding() +# privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding() kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/test/e2e/testing-manifests/storage-csi/update-hostpath.sh b/test/e2e/testing-manifests/storage-csi/update-hostpath.sh index af94b9b7025..ce60b39bc36 100755 --- a/test/e2e/testing-manifests/storage-csi/update-hostpath.sh +++ b/test/e2e/testing-manifests/storage-csi/update-hostpath.sh @@ -47,14 +47,14 @@ trap "rm -rf csi-driver-host-path" EXIT # Main YAML files. mkdir hostpath cat >hostpath/README.md <hostpath/hostpath/e2e-test-rbac.yaml <