Merge pull request #27625 from goltermann/spell2

Automatic merge from submit-queue

Fix several spelling errors in comments

What the title says
This commit is contained in:
k8s-merge-robot 2016-06-18 23:08:28 -07:00 committed by GitHub
commit 090e6ff660
19 changed files with 22 additions and 22 deletions

View File

@ -113,7 +113,7 @@ func (cc *clusterClientCache) syncService(key, clusterName string, clusterCache
// processServiceDeletion is triggered when a service is delete from underlying k8s cluster
// the deletion function will wip out the cached ingress info of the service from federation service ingress
// the function returns a bool to indicate if actual update happend on federation service cache
// the function returns a bool to indicate if actual update happened on federation service cache
// and if the federation service cache is updated, the updated info should be post to federation apiserver
func (cc *clusterClientCache) processServiceDeletion(cachedService *cachedService, clusterName string) bool {
cachedService.rwlock.Lock()
@ -147,7 +147,7 @@ func (cc *clusterClientCache) processServiceDeletion(cachedService *cachedServic
}
// processServiceUpdate Update ingress info when service updated
// the function returns a bool to indicate if actual update happend on federation service cache
// the function returns a bool to indicate if actual update happened on federation service cache
// and if the federation service cache is updated, the updated info should be post to federation apiserver
func (cc *clusterClientCache) processServiceUpdate(cachedService *cachedService, service *v1.Service, clusterName string) bool {
glog.V(4).Infof("Processing service update for %s/%s, cluster %s", service.Namespace, service.Name, clusterName)

View File

@ -52,7 +52,7 @@ var (
zeroBytes = []byte("0")
)
// int64Amount represents a fixed precision numerator and arbitary scale exponent. It is faster
// int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster
// than operations on inf.Dec for values that can be represented as int64.
type int64Amount struct {
value int64

View File

@ -224,7 +224,7 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName stri
// Special case for runtime.RawExtension and runtime.Objects because they always fail to validate
// This is because the actual values will be of some sub-type (e.g. Deployment) not the expected
// super-type (RawExtention)
// super-type (RawExtension)
if s.isGenericArray(details) {
errs := s.validateItems(value)
if len(errs) > 0 {

View File

@ -884,7 +884,7 @@ type NetworkPolicyPeer struct {
// Selects Namespaces using cluster scoped-labels. This
// matches all pods in all namespaces selected by this label selector.
// This field follows standard label selector semantics.
// If omited, this selector selects no namespaces.
// If omitted, this selector selects no namespaces.
// If present but empty, this selector selects all namespaces.
NamespaceSelector *unversioned.LabelSelector `json:"namespaceSelector,omitempty"`
}

View File

@ -675,7 +675,7 @@ message NetworkPolicyPeer {
// Selects Namespaces using cluster scoped-labels. This
// matches all pods in all namespaces selected by this label selector.
// This field follows standard label selector semantics.
// If omited, this selector selects no namespaces.
// If omitted, this selector selects no namespaces.
// If present but empty, this selector selects all namespaces.
optional LabelSelector namespaceSelector = 2;
}

View File

@ -1178,7 +1178,7 @@ type NetworkPolicyPeer struct {
// Selects Namespaces using cluster scoped-labels. This
// matches all pods in all namespaces selected by this label selector.
// This field follows standard label selector semantics.
// If omited, this selector selects no namespaces.
// If omitted, this selector selects no namespaces.
// If present but empty, this selector selects all namespaces.
NamespaceSelector *LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"`
}

View File

@ -487,7 +487,7 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string {
var map_NetworkPolicyPeer = map[string]string{
"podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.",
"namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omited, this selector selects no namespaces. If present but empty, this selector selects all namespaces.",
"namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omitted, this selector selects no namespaces. If present but empty, this selector selects all namespaces.",
}
func (NetworkPolicyPeer) SwaggerDoc() map[string]string {

View File

@ -218,7 +218,7 @@ func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadB
// NOTE The documentation for the AWS API indicates we could get an HTTP 400
// back if a policy of the same name already exists. However, the aws-sdk does not
// seem to return an error to us in these cases. Therefore this will issue an API
// request everytime.
// request every time.
err := s.createProxyProtocolPolicy(loadBalancerName)
if err != nil {
return nil, err

View File

@ -120,7 +120,7 @@ type Disks interface {
// DeleteDisk deletes PD.
DeleteDisk(diskToDelete string) error
// GetAutoLabelsForPD returns labels to apply to PeristentVolume
// GetAutoLabelsForPD returns labels to apply to PersistentVolume
// representing this PD, namely failure domain and zone.
GetAutoLabelsForPD(name string) (map[string]string, error)
}

View File

@ -61,7 +61,7 @@ const (
podCIDRUpdateRetry = 5
// controls how often NodeController will try to evict Pods from non-responsive Nodes.
nodeEvictionPeriod = 100 * time.Millisecond
// controlls how many NodeSpec updates NC can process in any moment.
// controls how many NodeSpec updates NC can process in any moment.
cidrUpdateWorkers = 10
cidrUpdateQueueSize = 5000
)

View File

@ -268,7 +268,7 @@ func TestSync(t *testing.T) {
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to non-exising volume. Check it's
// syncClaim with claim bound to non-existing volume. Check it's
// marked as Lost.
"3-2 - bound claim with missing volume",
novolumes,

View File

@ -530,7 +530,7 @@ func (ctrl *PersistentVolumeController) updateClaimPhaseWithEvent(claim *api.Per
return nil, err
}
// Emit the event only when the status change happens, not everytime
// Emit the event only when the status change happens, not every time
// syncClaim is called.
glog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message)
ctrl.eventRecorder.Event(newClaim, eventtype, reason, message)
@ -587,7 +587,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *api.P
return nil, err
}
// Emit the event only when the status change happens, not everytime
// Emit the event only when the status change happens, not every time
// syncClaim is called.
glog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message)
ctrl.eventRecorder.Event(newVol, eventtype, reason, message)
@ -774,7 +774,7 @@ func (ctrl *PersistentVolumeController) bind(volume *api.PersistentVolume, claim
func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolume) error {
glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
// Save the PV only when any modification is neccesary.
// Save the PV only when any modification is neccessary.
clone, err := conversion.NewCloner().DeepCopy(volume)
if err != nil {
return fmt.Errorf("Error cloning pv: %v", err)

View File

@ -128,7 +128,7 @@ func NewPersistentVolumeController(
return controller
}
// initalizeCaches fills all controller caches with initial data from etcd in
// initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSource cache.ListerWatcher) {

View File

@ -110,7 +110,7 @@ func TestDeleteSync(t *testing.T) {
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
reactor.lock.Lock()
defer reactor.lock.Unlock()
// Bind the volume to ressurected claim (this should never
// Bind the volume to resurrected claim (this should never
// happen)
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound)
reactor.claims[claim.Name] = claim

View File

@ -137,7 +137,7 @@ type actualStateOfWorld struct {
}
// The volume object represents a volume the the attach/detach controller
// believes to be succesfully attached to a node it is managing.
// believes to be successfully attached to a node it is managing.
type attachedVolume struct {
// volumeName contains the unique identifier for this volume.
volumeName api.UniqueVolumeName

View File

@ -161,7 +161,7 @@ func isLess(i, j reflect.Value) (bool, error) {
if t, ok := in.(unversioned.Time); ok {
return t.Before(j.Interface().(unversioned.Time)), nil
}
// fallback to the fields comparision
// fallback to the fields comparison
for idx := 0; idx < i.NumField(); idx++ {
less, err := isLess(i.Field(idx), j.Field(idx))
if err != nil || !less {

View File

@ -39,7 +39,7 @@ func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume
return volumesToReturn, len(volumesToReturn) > 0
}
// podVolumesExist checks wiht the volume manager and returns true any of the
// podVolumesExist checks with the volume manager and returns true any of the
// pods for the specified volume are mounted.
func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
if mountedVolumes :=

View File

@ -1213,7 +1213,7 @@ func netnsPathFromName(netnsName string) string {
// setupPodNetwork creates a network namespace for the given pod and calls
// configured NetworkPlugin's setup function on it.
// It returns the namespace name, configured IP (if available), and an error if
// one occured.
// one occurred.
//
// If the pod is running in host network or is running using the no-op plugin, then nothing will be done.
func (r *Runtime) setupPodNetwork(pod *api.Pod) (string, string, error) {

View File

@ -135,7 +135,7 @@ func (s *Scheduler) scheduleOne() {
}
bindingStart := time.Now()
// If binding succeded then PodScheduled condition will be updated in apiserver so that
// If binding succeeded then PodScheduled condition will be updated in apiserver so that
// it's atomic with setting host.
err := s.config.Binder.Bind(b)
if err != nil {