From 2b427f7d59e37ef5cb638380ffc344419118bb52 Mon Sep 17 00:00:00 2001 From: Ivan Shvedunov Date: Fri, 14 Oct 2016 00:29:50 +0300 Subject: [PATCH] Fix typos --- docs/proposals/resource-metrics-api.md | 8 ++++---- examples/javaweb-tomcat-sidecar/README.md | 2 +- pkg/cloudprovider/providers/azure/azure_test.go | 2 +- plugin/pkg/admission/antiaffinity/admission_test.go | 2 +- test/e2e/kubelet_perf.go | 2 +- test/e2e/security_context.go | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/proposals/resource-metrics-api.md b/docs/proposals/resource-metrics-api.md index 5dda4124f4a..e1cba18c58e 100644 --- a/docs/proposals/resource-metrics-api.md +++ b/docs/proposals/resource-metrics-api.md @@ -60,7 +60,7 @@ due to performance issues. #### Scheduler Scheduler in order to schedule best-effort pods requires node level resource usage metrics -as an average aggreated across 1 minute (the window may change in the future). +as an average aggregated across 1 minute (the window may change in the future). The metrics should be available for all resources supported in the scheduler. Currently the scheduler does not need this information, because it schedules best-effort pods without considering node usage. But having the metrics available in the API server is a blocker @@ -98,7 +98,7 @@ it will be probably possible to provide a reasonable implementation of the featu #### Kubernetes dashboard [Kubernetes dashboard](https://github.com/kubernetes/dashboard) in order to draw graphs requires resource usage -in timeseries format from relatively long period of time. The aggreations should be also possible on various levels +in timeseries format from relatively long period of time. The aggregations should be also possible on various levels including replication controllers, deployments, services, etc. Since the use case is complicated it will not be supported initally in the API and they will query Heapster @@ -168,7 +168,7 @@ The following query parameters are supported: - `labelSelector` - restrict the list of returned objects by labels (list endpoints only) In the future we may want to introduce the following params: -`aggreator` (`max`, `min`, `95th`, etc.) and `window` (`1h`, `1d`, `1w`, etc.) +`aggregator` (`max`, `min`, `95th`, etc.) and `window` (`1h`, `1d`, `1w`, etc.) which will allow to get the other aggregates over the custom time window. ## Further improvements @@ -177,7 +177,7 @@ Depending on the further requirements the following features may be added: - support for more metrics - support for application level metrics - watch for metrics -- possibility to query for window sizes and aggreation functions (though single window size/aggregation function per request) +- possibility to query for window sizes and aggregation functions (though single window size/aggregation function per request) - cluster level metrics diff --git a/examples/javaweb-tomcat-sidecar/README.md b/examples/javaweb-tomcat-sidecar/README.md index 01d669906de..666af39f5d0 100644 --- a/examples/javaweb-tomcat-sidecar/README.md +++ b/examples/javaweb-tomcat-sidecar/README.md @@ -32,7 +32,7 @@ Documentation for other releases can be found at -## Java Web Application with Tomcat and Sidercar Container +## Java Web Application with Tomcat and Sidecar Container The following document describes the deployment of a Java Web application using Tomcat. Instead of packaging `war` file inside the Tomcat image or mount the `war` as a volume, we use a sidecar container as `war` file provider. diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index c6890d1e9ca..e7f8707553e 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -79,7 +79,7 @@ func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) t.Error("Expected the loadbalancer to need an update") } - // ensure we abandonded the frontend ip configuration + // ensure we abandoned the frontend ip configuration if len(*lb.Properties.FrontendIPConfigurations) != 0 { t.Error("Expected the loadbalancer to have no frontend ip configuration") } diff --git a/plugin/pkg/admission/antiaffinity/admission_test.go b/plugin/pkg/admission/antiaffinity/admission_test.go index 3c8d2b9e550..d27178571de 100644 --- a/plugin/pkg/admission/antiaffinity/admission_test.go +++ b/plugin/pkg/admission/antiaffinity/admission_test.go @@ -207,7 +207,7 @@ func TestInterPodAffinityAdmission(t *testing.T) { "thisIsAInvalidAffinity": [{} }}`, }, - // however, we should not got error here + // however, we should not get error here errorExpected: false, }, } diff --git a/test/e2e/kubelet_perf.go b/test/e2e/kubelet_perf.go index d7cb3cf340a..023f678caf6 100644 --- a/test/e2e/kubelet_perf.go +++ b/test/e2e/kubelet_perf.go @@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() { // affect the runtime cpu usage. Fail the test if prepulling cannot // finish in time. if err := framework.WaitForPodsSuccess(f.Client, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil { - framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout) + framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adulterated", imagePrePullingLongTimeout) } nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodeNames = sets.NewString() diff --git a/test/e2e/security_context.go b/test/e2e/security_context.go index 349aaaa8598..a678535e001 100644 --- a/test/e2e/security_context.go +++ b/test/e2e/security_context.go @@ -16,7 +16,7 @@ limitations under the License. /* This test check that SecurityContext parameters specified at the * pod or the container level work as intended. These tests cannot be - * run when the 'SecurityContextDeny' addmissioin controller is not used + * run when the 'SecurityContextDeny' admission controller is not used * so they are skipped by default. */