diff --git a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py index c421aa51b2d..8ed601e3cd3 100644 --- a/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py +++ b/cluster/juju/layers/kubeapi-load-balancer/reactive/load_balancer.py @@ -72,7 +72,7 @@ def install_load_balancer(apiserver, tls): cert_exists = server_cert_path and os.path.isfile(server_cert_path) server_key_path = layer_options.get('server_key_path') key_exists = server_key_path and os.path.isfile(server_key_path) - # Do both the the key and certificate exist? + # Do both the key and certificate exist? if cert_exists and key_exists: # At this point the cert and key exist, and they are owned by root. chown = ['chown', 'www-data:www-data', server_cert_path] diff --git a/cluster/photon-controller/util.sh b/cluster/photon-controller/util.sh index f31990bacee..55ec52ff9cc 100755 --- a/cluster/photon-controller/util.sh +++ b/cluster/photon-controller/util.sh @@ -636,7 +636,7 @@ function install-kubernetes-on-master { } # -# Install Kubernetes on the the nodes in parallel +# Install Kubernetes on the nodes in parallel # This uses the kubernetes-master-salt.sh script created by gen-node-salt # That script uses salt to install Kubernetes # diff --git a/cmd/kubeadm/app/discovery/discovery.go b/cmd/kubeadm/app/discovery/discovery.go index a1bff386449..a1ff8188295 100644 --- a/cmd/kubeadm/app/discovery/discovery.go +++ b/cmd/kubeadm/app/discovery/discovery.go @@ -33,7 +33,7 @@ const TokenUser = "tls-bootstrap-token-user" // For returns a KubeConfig object that can be used for doing the TLS Bootstrap with the right credentials // Also, before returning anything, it makes sure it can trust the API Server func For(cfg *kubeadmapi.NodeConfiguration) (*clientcmdapi.Config, error) { - // TODO: Print summary info about the CA certificate, along with the the checksum signature + // TODO: Print summary info about the CA certificate, along with the checksum signature // we also need an ability for the user to configure the client to validate received CA cert against a checksum clusterinfo, err := GetValidatedClusterInfoObject(cfg) if err != nil { diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index e17359bd820..282da22b265 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -263,7 +263,7 @@ kube::golang::current_platform() { echo "$os/$arch" } -# Takes the the platform name ($1) and sets the appropriate golang env variables +# Takes the platform name ($1) and sets the appropriate golang env variables # for that platform. kube::golang::set_platform_envs() { [[ -n ${1-} ]] || { diff --git a/pkg/apis/componentconfig/v1alpha1/defaults.go b/pkg/apis/componentconfig/v1alpha1/defaults.go index 921ab959e19..2e7686be898 100644 --- a/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -96,7 +96,7 @@ func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) { if obj.Conntrack.TCPCloseWaitTimeout == zero { // See https://github.com/kubernetes/kubernetes/issues/32551. // - // CLOSE_WAIT conntrack state occurs when the the Linux kernel + // CLOSE_WAIT conntrack state occurs when the Linux kernel // sees a FIN from the remote server. Note: this is a half-close // condition that persists as long as the local side keeps the // socket open. The condition is rare as it is typical in most diff --git a/pkg/controller/deployment/rollback.go b/pkg/controller/deployment/rollback.go index 4e0e2ab0a77..eccaab88aac 100644 --- a/pkg/controller/deployment/rollback.go +++ b/pkg/controller/deployment/rollback.go @@ -55,7 +55,7 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v) // rollback by copying podTemplate.Spec from the replica set // revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call - // no-op if the the spec matches current deployment's podTemplate.Spec + // no-op if the spec matches current deployment's podTemplate.Spec performedRollback, err := dc.rollbackToTemplate(d, rs) if performedRollback && err == nil { dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, *toRevision)) diff --git a/pkg/controller/podautoscaler/metrics/utilization.go b/pkg/controller/podautoscaler/metrics/utilization.go index 946a43bb125..0417b9a0e3d 100644 --- a/pkg/controller/podautoscaler/metrics/utilization.go +++ b/pkg/controller/podautoscaler/metrics/utilization.go @@ -21,7 +21,7 @@ import ( ) // GetResourceUtilizationRatio takes in a set of metrics, a set of matching requests, -// and a target utilization percentage, and calculates the the ratio of +// and a target utilization percentage, and calculates the ratio of // desired to actual utilization (returning that, the actual utilization, and the raw average value) func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int64, targetUtilization int32) (utilizationRatio float64, currentUtilization int32, rawAverageValue int64, err error) { metricsTotal := int64(0) diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index 2cde7ab3fe4..0fa0e947aad 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -400,7 +400,7 @@ func (ssc *StatefulSetController) processNextWorkItem() bool { return true } -// worker runs a worker goroutine that invokes processNextWorkItem until the the controller's queue is closed +// worker runs a worker goroutine that invokes processNextWorkItem until the controller's queue is closed func (ssc *StatefulSetController) worker() { for ssc.processNextWorkItem() { } diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index 4ee4e26c9b4..09032c3a0e9 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -173,7 +173,7 @@ type actualStateOfWorld struct { sync.RWMutex } -// The volume object represents a volume the the attach/detach controller +// The volume object represents a volume the attach/detach controller // believes to be successfully attached to a node it is managing. type attachedVolume struct { // volumeName contains the unique identifier for this volume. diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index fea87a2da88..59a874473ea 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -250,7 +250,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I return nil, fmt.Errorf("invalid configuration: cgroup-root %q doesn't exist: %v", cgroupRoot, err) } glog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot) - // Include the the top level cgroup for enforcing node allocatable into cgroup-root. + // Include the top level cgroup for enforcing node allocatable into cgroup-root. // This way, all sub modules can avoid having to understand the concept of node allocatable. cgroupRoot = path.Join(cgroupRoot, defaultNodeAllocatableCgroupName) } diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index e242398ff5a..df1b274f598 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -48,7 +48,7 @@ type HandlerRunner interface { type RuntimeHelper interface { GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error) GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, useClusterFirstPolicy bool, err error) - // GetPodCgroupParent returns the the CgroupName identifer, and its literal cgroupfs form on the host + // GetPodCgroupParent returns the CgroupName identifer, and its literal cgroupfs form on the host // of a pod. GetPodCgroupParent(pod *v1.Pod) string GetPodDir(podUID types.UID) string diff --git a/pkg/proxy/userspace/proxier.go b/pkg/proxy/userspace/proxier.go index a6304db35f4..cf004cfa2d8 100644 --- a/pkg/proxy/userspace/proxier.go +++ b/pkg/proxy/userspace/proxier.go @@ -48,7 +48,7 @@ type portal struct { // ServiceInfo contains information and state for a particular proxied service type ServiceInfo struct { - // Timeout is the the read/write timeout (used for UDP connections) + // Timeout is the read/write timeout (used for UDP connections) Timeout time.Duration // ActiveClients is the cache of active UDP clients being proxied by this proxy for this service ActiveClients *ClientCache diff --git a/staging/src/k8s.io/apimachinery/pkg/labels/selector.go b/staging/src/k8s.io/apimachinery/pkg/labels/selector.go index ac123033a0f..b301b428403 100644 --- a/staging/src/k8s.io/apimachinery/pkg/labels/selector.go +++ b/staging/src/k8s.io/apimachinery/pkg/labels/selector.go @@ -550,7 +550,7 @@ func (p *Parser) lookahead(context ParserContext) (Token, string) { return tok, lit } -// consume returns current token and string. Increments the the position +// consume returns current token and string. Increments the position func (p *Parser) consume(context ParserContext) (Token, string) { p.position++ tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index 69c0e4497eb..ae6ac19933e 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -67,7 +67,7 @@ import ( ) const ( - // DefaultLegacyAPIPrefix is where the the legacy APIs will be located. + // DefaultLegacyAPIPrefix is where the legacy APIs will be located. DefaultLegacyAPIPrefix = "/api" // APIGroupPrefix is where non-legacy API group will be located. diff --git a/staging/src/k8s.io/apiserver/pkg/server/handler.go b/staging/src/k8s.io/apiserver/pkg/server/handler.go index 48d1408a45c..5368e2e0fbf 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/handler.go +++ b/staging/src/k8s.io/apiserver/pkg/server/handler.go @@ -62,7 +62,7 @@ type APIServerHandler struct { // which we don't fit into and it still muddies up swagger. Trying to switch the webservices into a route doesn't work because the // containing webservice faces all the same problems listed above. // This leads to the crazy thing done here. Our mux does what we need, so we'll place it in front of gorestful. It will introspect to - // decide if the the route is likely to be handled by goresful and route there if needed. Otherwise, it goes to PostGoRestful mux in + // decide if the route is likely to be handled by goresful and route there if needed. Otherwise, it goes to PostGoRestful mux in // order to handle "normal" paths and delegation. Hopefully no API consumers will ever have to deal with this level of detail. I think // we should consider completely removing gorestful. // Other servers should only use this opaquely to delegate to an API server. diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go index f53aa2c3701..0eaa6282457 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go @@ -27,7 +27,7 @@ import ( ) // secretbox implements at rest encryption of the provided values given a 32 byte secret key. -// Uses a standard 24 byte nonce (placed at the the beginning of the cipher text) generated +// Uses a standard 24 byte nonce (placed at the beginning of the cipher text) generated // from crypto/rand. Does not perform authentication of the data at rest. type secretboxTransformer struct { key [32]byte diff --git a/staging/src/k8s.io/client-go/dynamic/client_pool.go b/staging/src/k8s.io/client-go/dynamic/client_pool.go index 277dad49cc6..77c1b3d046e 100644 --- a/staging/src/k8s.io/client-go/dynamic/client_pool.go +++ b/staging/src/k8s.io/client-go/dynamic/client_pool.go @@ -56,7 +56,7 @@ type clientPoolImpl struct { mapper meta.RESTMapper } -// NewClientPool returns a ClientPool from the specified config. It reuses clients for the the same +// NewClientPool returns a ClientPool from the specified config. It reuses clients for the same // group version. It is expected this type may be wrapped by specific logic that special cases certain // resources or groups. func NewClientPool(config *restclient.Config, mapper meta.RESTMapper, apiPathResolverFunc APIPathResolverFunc) ClientPool { diff --git a/staging/src/k8s.io/client-go/tools/cache/fifo.go b/staging/src/k8s.io/client-go/tools/cache/fifo.go index 3f6e2a9480a..ef70b7aca1d 100644 --- a/staging/src/k8s.io/client-go/tools/cache/fifo.go +++ b/staging/src/k8s.io/client-go/tools/cache/fifo.go @@ -169,7 +169,7 @@ func (f *FIFO) AddIfNotPresent(obj interface{}) error { return nil } -// addIfNotPresent assumes the fifo lock is already held and adds the the provided +// addIfNotPresent assumes the fifo lock is already held and adds the provided // item to the queue under id if it does not already exist. func (f *FIFO) addIfNotPresent(id string, obj interface{}) { f.populated = true diff --git a/third_party/forked/gonum/graph/traverse/traverse.go b/third_party/forked/gonum/graph/traverse/traverse.go index cc361c85fe3..105c8f6e14c 100644 --- a/third_party/forked/gonum/graph/traverse/traverse.go +++ b/third_party/forked/gonum/graph/traverse/traverse.go @@ -21,7 +21,7 @@ type BreadthFirst struct { } // Walk performs a breadth-first traversal of the graph g starting from the given node, -// depending on the the EdgeFilter field and the until parameter if they are non-nil. The +// depending on the EdgeFilter field and the until parameter if they are non-nil. The // traversal follows edges for which EdgeFilter(edge) is true and returns the first node // for which until(node, depth) is true. During the traversal, if the Visit field is // non-nil, it is called with the nodes joined by each followed edge. @@ -113,7 +113,7 @@ type DepthFirst struct { } // Walk performs a depth-first traversal of the graph g starting from the given node, -// depending on the the EdgeFilter field and the until parameter if they are non-nil. The +// depending on the EdgeFilter field and the until parameter if they are non-nil. The // traversal follows edges for which EdgeFilter(edge) is true and returns the first node // for which until(node) is true. During the traversal, if the Visit field is non-nil, it // is called with the nodes joined by each followed edge. diff --git a/third_party/forked/gonum/graph/traverse/visit_depth_first.go b/third_party/forked/gonum/graph/traverse/visit_depth_first.go index b7f45a7b321..89df3c69026 100644 --- a/third_party/forked/gonum/graph/traverse/visit_depth_first.go +++ b/third_party/forked/gonum/graph/traverse/visit_depth_first.go @@ -30,7 +30,7 @@ type VisitingDepthFirst struct { } // Walk performs a depth-first traversal of the graph g starting from the given node, -// depending on the the EdgeFilter field and the until parameter if they are non-nil. The +// depending on the EdgeFilter field and the until parameter if they are non-nil. The // traversal follows edges for which EdgeFilter(edge) is true and returns the first node // for which until(node) is true. During the traversal, if the Visit field is non-nil, it // is called with the nodes joined by each followed edge.