Fixed several spelling mistakes

This commit is contained in:
Niekvdplas 2021-03-30 15:28:23 +02:00
parent 6572fe4d90
commit fec272a7b2
26 changed files with 27 additions and 27 deletions

View File

@ -169,7 +169,7 @@ func (jm *ControllerV2) sync(cronJobKey string) (*time.Duration, error) {
cronJob, err := jm.cronJobLister.CronJobs(ns).Get(name) cronJob, err := jm.cronJobLister.CronJobs(ns).Get(name)
switch { switch {
case errors.IsNotFound(err): case errors.IsNotFound(err):
// may be cronjob is deleted, dont need to requeue this key // may be cronjob is deleted, don't need to requeue this key
klog.V(4).InfoS("cronjob not found, may be it is deleted", "cronjob", klog.KRef(ns, name), "err", err) klog.V(4).InfoS("cronjob not found, may be it is deleted", "cronjob", klog.KRef(ns, name), "err", err)
return nil, nil return nil, nil
case err != nil: case err != nil:

View File

@ -889,7 +889,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
// induce disk pressure! // induce disk pressure!
fakeClock.Step(1 * time.Minute) fakeClock.Step(1 * time.Minute)
summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats) summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats)
// Dont reclaim any disk // Don't reclaim any disk
diskGC.summaryAfterGC = summaryStatsMaker("400Mi", "200Gi", podStats) diskGC.summaryAfterGC = summaryStatsMaker("400Mi", "200Gi", podStats)
manager.synchronize(diskInfoProvider, activePodsFunc) manager.synchronize(diskInfoProvider, activePodsFunc)

View File

@ -464,7 +464,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeInformers.Start(wait.NeverStop) kubeInformers.Start(wait.NeverStop)
klog.InfoS("Kubelet client is not nil") klog.InfoS("Kubelet client is not nil")
} else { } else {
// we dont have a client to sync! // we don't have a client to sync!
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeLister = corelisters.NewNodeLister(nodeIndexer) nodeLister = corelisters.NewNodeLister(nodeIndexer)
nodeHasSynced = func() bool { return true } nodeHasSynced = func() bool { return true }

View File

@ -144,7 +144,7 @@ func (p *Provider) RootFsStats() (*statsapi.FsStats, error) {
} }
// Get the root container stats's timestamp, which will be used as the // Get the root container stats's timestamp, which will be used as the
// imageFs stats timestamp. Dont force a stats update, as we only want the timestamp. // imageFs stats timestamp. Don't force a stats update, as we only want the timestamp.
rootStats, err := getCgroupStats(p.cadvisor, "/", false) rootStats, err := getCgroupStats(p.cadvisor, "/", false)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get root container stats: %v", err) return nil, fmt.Errorf("failed to get root container stats: %v", err)

View File

@ -111,7 +111,7 @@ func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.Scope
} }
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope // It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil return []corev1.ScopedResourceSelectorRequirement{}, nil
} }

View File

@ -190,7 +190,7 @@ func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []core
} }
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope // It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
uncoveredScopes := []corev1.ScopedResourceSelectorRequirement{} uncoveredScopes := []corev1.ScopedResourceSelectorRequirement{}
for _, selector := range limitedScopes { for _, selector := range limitedScopes {

View File

@ -90,7 +90,7 @@ func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.S
} }
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope // It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil return []corev1.ScopedResourceSelectorRequirement{}, nil
} }

View File

@ -386,7 +386,7 @@ func (pl *ServiceAffinity) ScoreExtensions() framework.ScoreExtensions {
// addUnsetLabelsToMap backfills missing values with values we find in a map. // addUnsetLabelsToMap backfills missing values with values we find in a map.
func addUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set) { func addUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set) {
for _, l := range labelsToAdd { for _, l := range labelsToAdd {
// if the label is already there, dont overwrite it. // if the label is already there, don't overwrite it.
if _, exists := aL[l]; exists { if _, exists := aL[l]; exists {
continue continue
} }

View File

@ -719,7 +719,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc) queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := internalcache.New(10*time.Minute, stop) scache := internalcache.New(10*time.Minute, stop)
// Design the baseline for the pods, and we will make nodes that dont fit it later. // Design the baseline for the pods, and we will make nodes that don't fit it later.
var cpu = int64(4) var cpu = int64(4)
var mem = int64(500) var mem = int64(500)
podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{ podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{

View File

@ -117,7 +117,7 @@ func calculateEmptyDirMemorySize(nodeAllocatableMemory *resource.Quantity, spec
return sizeLimit return sizeLimit
} }
// size limit defaults to node allocatable (pods cant consume more memory than all pods) // size limit defaults to node allocatable (pods can't consume more memory than all pods)
sizeLimit = nodeAllocatableMemory sizeLimit = nodeAllocatableMemory
zero := resource.MustParse("0") zero := resource.MustParse("0")

View File

@ -477,7 +477,7 @@ func (c *sioClient) WaitForDetachedDevice(token string) error {
go func() { go func() {
klog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token)) klog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token))
}() }()
// cant find vol id, then ok. // can't find vol id, then ok.
if _, ok := devMap[token]; !ok { if _, ok := devMap[token]; !ok {
return nil return nil
} }

View File

@ -43,7 +43,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/vsphere-volume/volumeDevices/ // /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/vsphere-volume/volumeDevices/
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume") tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil { if err != nil {
t.Fatalf("cant' make a temp dir: %s", err) t.Fatalf("can't make a temp dir: %s", err)
} }
// deferred clean up // deferred clean up
defer os.RemoveAll(tmpVDir) defer os.RemoveAll(tmpVDir)
@ -80,7 +80,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
func TestGetPodAndPluginMapPaths(t *testing.T) { func TestGetPodAndPluginMapPaths(t *testing.T) {
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume") tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil { if err != nil {
t.Fatalf("cant' make a temp dir: %s", err) t.Fatalf("can't make a temp dir: %s", err)
} }
// deferred clean up // deferred clean up
defer os.RemoveAll(tmpVDir) defer os.RemoveAll(tmpVDir)

View File

@ -175,7 +175,7 @@ func (l *persistentVolumeLabel) findVolumeLabels(volume *api.PersistentVolume) (
topologyLabelGA := true topologyLabelGA := true
domain, domainOK := existingLabels[v1.LabelTopologyZone] domain, domainOK := existingLabels[v1.LabelTopologyZone]
region, regionOK := existingLabels[v1.LabelTopologyRegion] region, regionOK := existingLabels[v1.LabelTopologyRegion]
// If they dont have GA labels we should check for failuredomain beta labels // If they don't have GA labels we should check for failuredomain beta labels
// TODO: remove this once all the cloud provider change to GA topology labels // TODO: remove this once all the cloud provider change to GA topology labels
if !domainOK || !regionOK { if !domainOK || !regionOK {
topologyLabelGA = false topologyLabelGA = false

View File

@ -40,7 +40,7 @@ func (a statusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set
fieldpath.APIVersion(a.customResourceStrategy.kind.GroupVersion().String()): fieldpath.NewSet( fieldpath.APIVersion(a.customResourceStrategy.kind.GroupVersion().String()): fieldpath.NewSet(
// Note that if there are other top level fields unique to CRDs, // Note that if there are other top level fields unique to CRDs,
// those will also get removed by the apiserver prior to persisting, // those will also get removed by the apiserver prior to persisting,
// but wont be added to the resetFields set. // but won't be added to the resetFields set.
// This isn't an issue now, but if it becomes an issue in the future // This isn't an issue now, but if it becomes an issue in the future
// we might need a mechanism that is the inverse of resetFields where // we might need a mechanism that is the inverse of resetFields where

View File

@ -270,7 +270,7 @@ func (o *objectCountEvaluator) MatchingScopes(item runtime.Object, scopes []core
} }
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope // It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil return []corev1.ScopedResourceSelectorRequirement{}, nil
} }

View File

@ -54,7 +54,7 @@ type Evaluator interface {
Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error)
// MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object. // MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object.
MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error)
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error)
// MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches. // MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches.
MatchingResources(input []corev1.ResourceName) []corev1.ResourceName MatchingResources(input []corev1.ResourceName) []corev1.ResourceName

View File

@ -359,7 +359,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
} }
} else { } else {
// User has not specified any override for this group version. // User has not specified any override for this group version.
// filter out types which dont have genclient. // filter out types which don't have genclient.
if tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)); !tags.GenerateClient { if tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)); !tags.GenerateClient {
continue continue
} }

View File

@ -162,7 +162,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) { func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) {
generatingForPackage := false generatingForPackage := false
for _, t := range p.Types { for _, t := range p.Types {
// filter out types which dont have genclient. // filter out types which don't have genclient.
if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient { if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient {
continue continue
} }

View File

@ -1404,7 +1404,7 @@ func getAvailabilitySetName(az *Cloud, vmIndex int, numAS int) string {
} }
// test supporting on 1 nic per vm // test supporting on 1 nic per vm
// we really dont care about the name of the nic // we really don't care about the name of the nic
// just using the vm name for testing purposes // just using the vm name for testing purposes
func getNICName(vmIndex int) string { func getNICName(vmIndex int) string {
return getVMName(vmIndex) return getVMName(vmIndex)

View File

@ -47,7 +47,7 @@ var _ = SIGDescribe("Networking", func() {
// Second time, we pass through pods more carefully... // Second time, we pass through pods more carefully...
framework.Logf("Going to retry %v out of %v pods....", len(failedPodsByHost), len(config.EndpointPods)) framework.Logf("Going to retry %v out of %v pods....", len(failedPodsByHost), len(config.EndpointPods))
for host, failedPods := range failedPodsByHost { for host, failedPods := range failedPodsByHost {
framework.Logf("Doublechecking %v pods in host %v which werent seen the first time.", len(failedPods), host) framework.Logf("Doublechecking %v pods in host %v which weren't seen the first time.", len(failedPods), host)
for _, endpointPod := range failedPods { for _, endpointPod := range failedPods {
framework.Logf("Now attempting to probe pod [[[ %v ]]]", endpointPod.Status.PodIP) framework.Logf("Now attempting to probe pod [[[ %v ]]]", endpointPod.Status.PodIP)
if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil { if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil {

View File

@ -438,7 +438,7 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP
// (See the TODO about checking probability, which isnt implemented yet). // (See the TODO about checking probability, which isnt implemented yet).
// - maxTries is the maximum number of curl/echo attempts before an error is returned. The // - maxTries is the maximum number of curl/echo attempts before an error is returned. The
// smaller this number is, the less 'slack' there is for declaring success. // smaller this number is, the less 'slack' there is for declaring success.
// - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints wont be hit. // - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints won't be hit.
// - maxTries == minTries will return as soon as all endpoints succeed (or fail once maxTries is reached without // - maxTries == minTries will return as soon as all endpoints succeed (or fail once maxTries is reached without
// success on all endpoints). // success on all endpoints).
// In general its prudent to have a high enough level of minTries to guarantee that all pods get a fair chance at receiving traffic. // In general its prudent to have a high enough level of minTries to guarantee that all pods get a fair chance at receiving traffic.

View File

@ -636,7 +636,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() {
{ {
Ports: []networkingv1.NetworkPolicyPort{ Ports: []networkingv1.NetworkPolicyPort{
{ {
// dont use named ports // don't use named ports
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80}, Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80},
}, },
{ {

View File

@ -43,7 +43,7 @@ import (
// Maximum number of forwarded connections. In practice we don't // Maximum number of forwarded connections. In practice we don't
// need more than one per sidecar and kubelet. Keeping this reasonably // need more than one per sidecar and kubelet. Keeping this reasonably
// small ensures that we don't establish connections through the apiserver // small ensures that we don't establish connections through the apiserver
// and the remote kernel which then arent' needed. // and the remote kernel which then aren't needed.
const maxConcurrentConnections = 10 const maxConcurrentConnections = 10
// Listen creates a listener which returns new connections whenever someone connects // Listen creates a listener which returns new connections whenever someone connects

View File

@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() {
ginkgo.By("Verifying that curl queries FAIL for wrong URLs") ginkgo.By("Verifying that curl queries FAIL for wrong URLs")
// the below tests use curl because nslookup doesnt seem to use ndots properly // the below tests use curl because nslookup doesn't seem to use ndots properly
// ideally we'd use the powershell native ResolveDns but, that is not a part of agnhost images (as of k8s 1.20) // ideally we'd use the powershell native ResolveDns but, that is not a part of agnhost images (as of k8s 1.20)
// TODO @jayunit100 add ResolveHost to agn images // TODO @jayunit100 add ResolveHost to agn images

View File

@ -597,7 +597,7 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase) framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
} }
ginkgo.By("checking eviction ordering and ensuring important pods dont fail") ginkgo.By("checking eviction ordering and ensuring important pods don't fail")
done := true done := true
for _, priorityPodSpec := range testSpecs { for _, priorityPodSpec := range testSpecs {
var priorityPod v1.Pod var priorityPod v1.Pod

View File

@ -102,7 +102,7 @@ func setupWithResourcesWithOptions(t *testing.T, opts *framework.MasterConfigOpt
} }
func verifyStatusCode(t *testing.T, verb, URL, body string, expectedStatusCode int) { func verifyStatusCode(t *testing.T, verb, URL, body string, expectedStatusCode int) {
// We dont use the typed Go client to send this request to be able to verify the response status code. // We don't use the typed Go client to send this request to be able to verify the response status code.
bodyBytes := bytes.NewReader([]byte(body)) bodyBytes := bytes.NewReader([]byte(body))
req, err := http.NewRequest(verb, URL, bodyBytes) req, err := http.NewRequest(verb, URL, bodyBytes)
if err != nil { if err != nil {