Merge pull request #27625 from goltermann/spell2

Automatic merge from submit-queue

Fix several spelling errors in comments

What the title says
This commit is contained in:
k8s-merge-robot 2016-06-18 23:08:28 -07:00 committed by GitHub
commit 090e6ff660
19 changed files with 22 additions and 22 deletions

View File

@ -113,7 +113,7 @@ func (cc *clusterClientCache) syncService(key, clusterName string, clusterCache
// processServiceDeletion is triggered when a service is delete from underlying k8s cluster // processServiceDeletion is triggered when a service is delete from underlying k8s cluster
// the deletion function will wip out the cached ingress info of the service from federation service ingress // the deletion function will wip out the cached ingress info of the service from federation service ingress
// the function returns a bool to indicate if actual update happend on federation service cache // the function returns a bool to indicate if actual update happened on federation service cache
// and if the federation service cache is updated, the updated info should be post to federation apiserver // and if the federation service cache is updated, the updated info should be post to federation apiserver
func (cc *clusterClientCache) processServiceDeletion(cachedService *cachedService, clusterName string) bool { func (cc *clusterClientCache) processServiceDeletion(cachedService *cachedService, clusterName string) bool {
cachedService.rwlock.Lock() cachedService.rwlock.Lock()
@ -147,7 +147,7 @@ func (cc *clusterClientCache) processServiceDeletion(cachedService *cachedServic
} }
// processServiceUpdate Update ingress info when service updated // processServiceUpdate Update ingress info when service updated
// the function returns a bool to indicate if actual update happend on federation service cache // the function returns a bool to indicate if actual update happened on federation service cache
// and if the federation service cache is updated, the updated info should be post to federation apiserver // and if the federation service cache is updated, the updated info should be post to federation apiserver
func (cc *clusterClientCache) processServiceUpdate(cachedService *cachedService, service *v1.Service, clusterName string) bool { func (cc *clusterClientCache) processServiceUpdate(cachedService *cachedService, service *v1.Service, clusterName string) bool {
glog.V(4).Infof("Processing service update for %s/%s, cluster %s", service.Namespace, service.Name, clusterName) glog.V(4).Infof("Processing service update for %s/%s, cluster %s", service.Namespace, service.Name, clusterName)

View File

@ -52,7 +52,7 @@ var (
zeroBytes = []byte("0") zeroBytes = []byte("0")
) )
// int64Amount represents a fixed precision numerator and arbitary scale exponent. It is faster // int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster
// than operations on inf.Dec for values that can be represented as int64. // than operations on inf.Dec for values that can be represented as int64.
type int64Amount struct { type int64Amount struct {
value int64 value int64

View File

@ -224,7 +224,7 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName stri
// Special case for runtime.RawExtension and runtime.Objects because they always fail to validate // Special case for runtime.RawExtension and runtime.Objects because they always fail to validate
// This is because the actual values will be of some sub-type (e.g. Deployment) not the expected // This is because the actual values will be of some sub-type (e.g. Deployment) not the expected
// super-type (RawExtention) // super-type (RawExtension)
if s.isGenericArray(details) { if s.isGenericArray(details) {
errs := s.validateItems(value) errs := s.validateItems(value)
if len(errs) > 0 { if len(errs) > 0 {

View File

@ -884,7 +884,7 @@ type NetworkPolicyPeer struct {
// Selects Namespaces using cluster scoped-labels. This // Selects Namespaces using cluster scoped-labels. This
// matches all pods in all namespaces selected by this label selector. // matches all pods in all namespaces selected by this label selector.
// This field follows standard label selector semantics. // This field follows standard label selector semantics.
// If omited, this selector selects no namespaces. // If omitted, this selector selects no namespaces.
// If present but empty, this selector selects all namespaces. // If present but empty, this selector selects all namespaces.
NamespaceSelector *unversioned.LabelSelector `json:"namespaceSelector,omitempty"` NamespaceSelector *unversioned.LabelSelector `json:"namespaceSelector,omitempty"`
} }

View File

@ -675,7 +675,7 @@ message NetworkPolicyPeer {
// Selects Namespaces using cluster scoped-labels. This // Selects Namespaces using cluster scoped-labels. This
// matches all pods in all namespaces selected by this label selector. // matches all pods in all namespaces selected by this label selector.
// This field follows standard label selector semantics. // This field follows standard label selector semantics.
// If omited, this selector selects no namespaces. // If omitted, this selector selects no namespaces.
// If present but empty, this selector selects all namespaces. // If present but empty, this selector selects all namespaces.
optional LabelSelector namespaceSelector = 2; optional LabelSelector namespaceSelector = 2;
} }

View File

@ -1178,7 +1178,7 @@ type NetworkPolicyPeer struct {
// Selects Namespaces using cluster scoped-labels. This // Selects Namespaces using cluster scoped-labels. This
// matches all pods in all namespaces selected by this label selector. // matches all pods in all namespaces selected by this label selector.
// This field follows standard label selector semantics. // This field follows standard label selector semantics.
// If omited, this selector selects no namespaces. // If omitted, this selector selects no namespaces.
// If present but empty, this selector selects all namespaces. // If present but empty, this selector selects all namespaces.
NamespaceSelector *LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` NamespaceSelector *LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"`
} }

View File

@ -487,7 +487,7 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string {
var map_NetworkPolicyPeer = map[string]string{ var map_NetworkPolicyPeer = map[string]string{
"podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.", "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.",
"namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omited, this selector selects no namespaces. If present but empty, this selector selects all namespaces.", "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omitted, this selector selects no namespaces. If present but empty, this selector selects all namespaces.",
} }
func (NetworkPolicyPeer) SwaggerDoc() map[string]string { func (NetworkPolicyPeer) SwaggerDoc() map[string]string {

View File

@ -120,7 +120,7 @@ type Disks interface {
// DeleteDisk deletes PD. // DeleteDisk deletes PD.
DeleteDisk(diskToDelete string) error DeleteDisk(diskToDelete string) error
// GetAutoLabelsForPD returns labels to apply to PeristentVolume // GetAutoLabelsForPD returns labels to apply to PersistentVolume
// representing this PD, namely failure domain and zone. // representing this PD, namely failure domain and zone.
GetAutoLabelsForPD(name string) (map[string]string, error) GetAutoLabelsForPD(name string) (map[string]string, error)
} }

View File

@ -61,7 +61,7 @@ const (
podCIDRUpdateRetry = 5 podCIDRUpdateRetry = 5
// controls how often NodeController will try to evict Pods from non-responsive Nodes. // controls how often NodeController will try to evict Pods from non-responsive Nodes.
nodeEvictionPeriod = 100 * time.Millisecond nodeEvictionPeriod = 100 * time.Millisecond
// controlls how many NodeSpec updates NC can process in any moment. // controls how many NodeSpec updates NC can process in any moment.
cidrUpdateWorkers = 10 cidrUpdateWorkers = 10
cidrUpdateQueueSize = 5000 cidrUpdateQueueSize = 5000
) )

View File

@ -268,7 +268,7 @@ func TestSync(t *testing.T) {
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim, []string{"Warning ClaimLost"}, noerrors, testSyncClaim,
}, },
{ {
// syncClaim with claim bound to non-exising volume. Check it's // syncClaim with claim bound to non-existing volume. Check it's
// marked as Lost. // marked as Lost.
"3-2 - bound claim with missing volume", "3-2 - bound claim with missing volume",
novolumes, novolumes,

View File

@ -774,7 +774,7 @@ func (ctrl *PersistentVolumeController) bind(volume *api.PersistentVolume, claim
func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolume) error { func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolume) error {
glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
// Save the PV only when any modification is neccesary. // Save the PV only when any modification is neccessary.
clone, err := conversion.NewCloner().DeepCopy(volume) clone, err := conversion.NewCloner().DeepCopy(volume)
if err != nil { if err != nil {
return fmt.Errorf("Error cloning pv: %v", err) return fmt.Errorf("Error cloning pv: %v", err)

View File

@ -128,7 +128,7 @@ func NewPersistentVolumeController(
return controller return controller
} }
// initalizeCaches fills all controller caches with initial data from etcd in // initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to // order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller. // perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSource cache.ListerWatcher) { func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSource cache.ListerWatcher) {

View File

@ -110,7 +110,7 @@ func TestDeleteSync(t *testing.T) {
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
reactor.lock.Lock() reactor.lock.Lock()
defer reactor.lock.Unlock() defer reactor.lock.Unlock()
// Bind the volume to ressurected claim (this should never // Bind the volume to resurrected claim (this should never
// happen) // happen)
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound) claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound)
reactor.claims[claim.Name] = claim reactor.claims[claim.Name] = claim

View File

@ -137,7 +137,7 @@ type actualStateOfWorld struct {
} }
// The volume object represents a volume the the attach/detach controller // The volume object represents a volume the the attach/detach controller
// believes to be succesfully attached to a node it is managing. // believes to be successfully attached to a node it is managing.
type attachedVolume struct { type attachedVolume struct {
// volumeName contains the unique identifier for this volume. // volumeName contains the unique identifier for this volume.
volumeName api.UniqueVolumeName volumeName api.UniqueVolumeName

View File

@ -161,7 +161,7 @@ func isLess(i, j reflect.Value) (bool, error) {
if t, ok := in.(unversioned.Time); ok { if t, ok := in.(unversioned.Time); ok {
return t.Before(j.Interface().(unversioned.Time)), nil return t.Before(j.Interface().(unversioned.Time)), nil
} }
// fallback to the fields comparision // fallback to the fields comparison
for idx := 0; idx < i.NumField(); idx++ { for idx := 0; idx < i.NumField(); idx++ {
less, err := isLess(i.Field(idx), j.Field(idx)) less, err := isLess(i.Field(idx), j.Field(idx))
if err != nil || !less { if err != nil || !less {

View File

@ -39,7 +39,7 @@ func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume
return volumesToReturn, len(volumesToReturn) > 0 return volumesToReturn, len(volumesToReturn) > 0
} }
// podVolumesExist checks wiht the volume manager and returns true any of the // podVolumesExist checks with the volume manager and returns true any of the
// pods for the specified volume are mounted. // pods for the specified volume are mounted.
func (kl *Kubelet) podVolumesExist(podUID types.UID) bool { func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
if mountedVolumes := if mountedVolumes :=

View File

@ -1213,7 +1213,7 @@ func netnsPathFromName(netnsName string) string {
// setupPodNetwork creates a network namespace for the given pod and calls // setupPodNetwork creates a network namespace for the given pod and calls
// configured NetworkPlugin's setup function on it. // configured NetworkPlugin's setup function on it.
// It returns the namespace name, configured IP (if available), and an error if // It returns the namespace name, configured IP (if available), and an error if
// one occured. // one occurred.
// //
// If the pod is running in host network or is running using the no-op plugin, then nothing will be done. // If the pod is running in host network or is running using the no-op plugin, then nothing will be done.
func (r *Runtime) setupPodNetwork(pod *api.Pod) (string, string, error) { func (r *Runtime) setupPodNetwork(pod *api.Pod) (string, string, error) {

View File

@ -135,7 +135,7 @@ func (s *Scheduler) scheduleOne() {
} }
bindingStart := time.Now() bindingStart := time.Now()
// If binding succeded then PodScheduled condition will be updated in apiserver so that // If binding succeeded then PodScheduled condition will be updated in apiserver so that
// it's atomic with setting host. // it's atomic with setting host.
err := s.config.Binder.Bind(b) err := s.config.Binder.Bind(b)
if err != nil { if err != nil {