diff --git a/build/common.sh b/build/common.sh index a68e0f26d47..2ae17c9f1a9 100755 --- a/build/common.sh +++ b/build/common.sh @@ -306,7 +306,7 @@ function kube::build::has_ip() { # Detect if a specific image exists # # $1 - image repo name -# #2 - image tag +# $2 - image tag function kube::build::docker_image_exists() { [[ -n $1 && -n $2 ]] || { kube::log::error "Internal error. Image not specified in docker_image_exists." diff --git a/pkg/api/testing/unstructured_test.go b/pkg/api/testing/unstructured_test.go index 33d7d480b65..922be30b54a 100644 --- a/pkg/api/testing/unstructured_test.go +++ b/pkg/api/testing/unstructured_test.go @@ -52,7 +52,7 @@ func doRoundTrip(t *testing.T, internalVersion schema.GroupVersion, externalVers fuzzer.FuzzerFor(FuzzerFuncs, rand.NewSource(seed), legacyscheme.Codecs). // We are explicitly overwriting custom fuzzing functions, to ensure // that InitContainers and their statuses are not generated. This is - // because in thise test we are simply doing json operations, in which + // because in this test we are simply doing json operations, in which // those disappear. Funcs( func(s *api.PodSpec, c fuzz.Continue) { diff --git a/pkg/controller/certificates/authority/policies.go b/pkg/controller/certificates/authority/policies.go index d83f89ddea3..5c84e3ad005 100644 --- a/pkg/controller/certificates/authority/policies.go +++ b/pkg/controller/certificates/authority/policies.go @@ -47,7 +47,7 @@ type PermissiveSigningPolicy struct { // TTL is the certificate TTL. It's used to calculate the NotAfter value of // the certificate. TTL time.Duration - // Usages are the allowed usages of a certficate. + // Usages are the allowed usages of a certificate. Usages []capi.KeyUsage } diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 443e4b79752..21d61dcac43 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -893,7 +893,7 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, nodeList []*v1.Node, } // syncNodes deletes given pods and creates new daemon set pods on the given nodes -// returns slice with erros if any +// returns slice with errors if any func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error { // We need to set expectations before creating/deleting pods to avoid race conditions. dsKey, err := controller.KeyFunc(ds) diff --git a/pkg/controller/nodeipam/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go index 69ed4b63cf2..b86b407da3a 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -23,7 +23,7 @@ import ( "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -61,7 +61,7 @@ type rangeAllocator struct { // This increases a throughput of CIDR assignment by not blocking on long operations. nodeCIDRUpdateChannel chan nodeReservedCIDRs recorder record.EventRecorder - // Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation + // Keep a set of nodes that are currently being processed to avoid races in CIDR allocation lock sync.Mutex nodesInProcessing sets.String } diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager.go b/pkg/controller/nodelifecycle/scheduler/taint_manager.go index 05eb5d2d0df..d3eaab63049 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -42,7 +42,7 @@ import ( const ( // TODO (k82cn): Figure out a reasonable number of workers/channels and propagate - // the number of workers up making it a paramater of Run() function. + // the number of workers up making it a parameter of Run() function. // NodeUpdateChannelSize defines the size of channel for node update events. NodeUpdateChannelSize = 10 diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go b/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go index bd9f6977956..6bff92e803a 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go @@ -222,7 +222,7 @@ func TestCreatePod(t *testing.T) { } } if podDeleted != item.expectDelete { - t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) + t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) } close(stopCh) } @@ -319,7 +319,7 @@ func TestUpdatePod(t *testing.T) { } } if podDeleted != item.expectDelete { - t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) + t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) } close(stopCh) } @@ -375,7 +375,7 @@ func TestCreateNode(t *testing.T) { } } if podDeleted != item.expectDelete { - t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) + t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) } close(stopCh) } @@ -499,7 +499,7 @@ func TestUpdateNode(t *testing.T) { } } if podDeleted != item.expectDelete { - t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) + t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) } close(stopCh) } diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index fb1f6a900e5..b588b569ab4 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -348,7 +348,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil { return true, nil, err } - // TODO: it's ok not to sort these becaues statusOk + // TODO: it's ok not to sort these because statusOk // contains all the conditions, so we'll never be appending. // Default to statusOk when missing any specific conditions if tc.expectedConditions == nil { diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index 4f6dafe3eba..12aef209480 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -1081,7 +1081,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { manager.syncReplicaSet(GetKey(rs, t)) if _, exists, err = manager.expectations.GetExpectations(rsKey); exists { - t.Errorf("Found expectaions, expected none since the ReplicaSet has been deleted.") + t.Errorf("Found expectations, expected none since the ReplicaSet has been deleted.") } // This should have no effect, since we've deleted the ReplicaSet. diff --git a/pkg/controller/service/controller.go b/pkg/controller/service/controller.go index 390104cf44c..04f98131b2c 100644 --- a/pkg/controller/service/controller.go +++ b/pkg/controller/service/controller.go @@ -81,7 +81,7 @@ const ( // serviceLoadBalancerFinalizerFeature is the feature gate name that // enables Finalizer Protection for Service LoadBalancers. - // orginated from: https://github.com/kubernetes/kubernetes/blob/28e800245e/pkg/features/kube_features.go#L433 + // originated from: https://github.com/kubernetes/kubernetes/blob/28e800245e/pkg/features/kube_features.go#L433 serviceLoadBalancerFinalizerFeature = "ServiceLoadBalancerFinalizer" // legacyNodeRoleBehaviro is the feature gate name that enables legacy diff --git a/pkg/controller/statefulset/stateful_set_utils_test.go b/pkg/controller/statefulset/stateful_set_utils_test.go index 8dee5cadea6..2b07708b7fd 100644 --- a/pkg/controller/statefulset/stateful_set_utils_test.go +++ b/pkg/controller/statefulset/stateful_set_utils_test.go @@ -57,7 +57,7 @@ func TestIsMemberOf(t *testing.T) { set2.Name = "foo2" pod := newStatefulSetPod(set, 1) if !isMemberOf(set, pod) { - t.Error("isMemberOf retruned false negative") + t.Error("isMemberOf returned false negative") } if isMemberOf(set2, pod) { t.Error("isMemberOf returned false positive") @@ -90,7 +90,7 @@ func TestStorageMatches(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 1) if !storageMatches(set, pod) { - t.Error("Newly created Pod has a invalid stroage") + t.Error("Newly created Pod has a invalid storage") } pod.Spec.Volumes = nil if storageMatches(set, pod) { @@ -144,7 +144,7 @@ func TestUpdateStorage(t *testing.T) { set := newStatefulSet(3) pod := newStatefulSetPod(set, 1) if !storageMatches(set, pod) { - t.Error("Newly created Pod has a invalid stroage") + t.Error("Newly created Pod has a invalid storage") } pod.Spec.Volumes = nil if storageMatches(set, pod) { diff --git a/pkg/controller/util/node/controller_utils.go b/pkg/controller/util/node/controller_utils.go index 415867f798f..55939295608 100644 --- a/pkg/controller/util/node/controller_utils.go +++ b/pkg/controller/util/node/controller_utils.go @@ -56,7 +56,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record. continue } - // Pod will be modified, so making copy is requiered. + // Pod will be modified, so making copy is required. pod := pods[i].DeepCopy() // Set reason and message in the pod object. if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil { @@ -122,7 +122,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s continue } - // Pod will be modified, so making copy is requiered. + // Pod will be modified, so making copy is required. pod := pods[i].DeepCopy() for _, cond := range pod.Status.Conditions { if cond.Type == v1.PodReady { diff --git a/pkg/controller/volume/attachdetach/reconciler/reconciler.go b/pkg/controller/volume/attachdetach/reconciler/reconciler.go index 24c4e802a96..a2ec9be2a93 100644 --- a/pkg/controller/volume/attachdetach/reconciler/reconciler.go +++ b/pkg/controller/volume/attachdetach/reconciler/reconciler.go @@ -219,7 +219,7 @@ func (rc *reconciler) reconcile() { continue } - // Trigger detach volume which requires verifing safe to detach step + // Trigger detach volume which requires verifying safe to detach step // If timeout is true, skip verifySafeToDetach check klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", "")) verifySafeToDetach := !timeout diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index 860f4b3e7c5..a09f40eaeeb 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -486,7 +486,7 @@ var ( // wrapTestWithPluginCalls returns a testCall that: // - configures controller with a volume plugin that implements recycler, -// deleter and provisioner. The plugin retunrs provided errors when a volume +// deleter and provisioner. The plugin returns provided errors when a volume // is deleted, recycled or provisioned. // - calls given testCall func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, expectedProvisionCalls []provisionCall, toWrap testCall) testCall { diff --git a/pkg/controller/volume/persistentvolume/index_test.go b/pkg/controller/volume/persistentvolume/index_test.go index f69e109cc2e..848ed6655f0 100644 --- a/pkg/controller/volume/persistentvolume/index_test.go +++ b/pkg/controller/volume/persistentvolume/index_test.go @@ -1093,7 +1093,7 @@ func TestVolumeModeCheck(t *testing.T) { pvc: makeVolumeModePVC("8G", &filesystemMode, nil), enableBlock: true, }, - "feature enabled - pvc nil and pv filesytem": { + "feature enabled - pvc nil and pv filesystem": { isExpectedMismatch: false, vol: createVolumeModeFilesystemTestVolume(), pvc: makeVolumeModePVC("8G", nil, nil), diff --git a/pkg/controller/volume/persistentvolume/metrics/metrics.go b/pkg/controller/volume/persistentvolume/metrics/metrics.go index 629745ddf57..bde25eeed5c 100644 --- a/pkg/controller/volume/persistentvolume/metrics/metrics.go +++ b/pkg/controller/volume/persistentvolume/metrics/metrics.go @@ -98,7 +98,7 @@ var ( volumeOperationErrorsMetric = metrics.NewCounterVec( &metrics.CounterOpts{ Name: "volume_operation_total_errors", - Help: "Total volume operation erros", + Help: "Total volume operation errors", StabilityLevel: metrics.ALPHA, }, []string{"plugin_name", "operation_name"}) diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 3d64ac00488..fec61c4f245 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -206,7 +206,7 @@ type PersistentVolumeController struct { // operationTimestamps caches start timestamp of operations // (currently provision + binding/deletion) for metric recording. - // Detailed lifecyle/key for each operation + // Detailed lifecycle/key for each operation // 1. provision + binding // key: claimKey // start time: user has NOT provide any volume ref in the claim AND @@ -1071,7 +1071,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis } // Verify the claim is in cache: if so, then it is a different PVC with the same name - // since the volume is known to be released at this moment. Ths new (cached) PVC must use + // since the volume is known to be released at this moment. The new (cached) PVC must use // a different PV -- we checked that the PV is unused in isVolumeReleased. // So the old PV is safe to be recycled. claimName := claimrefToClaimKey(volume.Spec.ClaimRef) diff --git a/pkg/kubelet/certificate/bootstrap/bootstrap.go b/pkg/kubelet/certificate/bootstrap/bootstrap.go index 3e579e6bf63..7bdc42b3cbe 100644 --- a/pkg/kubelet/certificate/bootstrap/bootstrap.go +++ b/pkg/kubelet/certificate/bootstrap/bootstrap.go @@ -356,7 +356,7 @@ func requestNodeCertificate(client certificatesv1beta1.CertificateSigningRequest } // This digest should include all the relevant pieces of the CSR we care about. -// We can't direcly hash the serialized CSR because of random padding that we +// We can't directly hash the serialized CSR because of random padding that we // regenerate every loop and we include usages which are not contained in the // CSR. This needs to be kept up to date as we add new fields to the node // certificates and with ensureCompatible. diff --git a/pkg/kubelet/cloudresource/cloud_request_manager_test.go b/pkg/kubelet/cloudresource/cloud_request_manager_test.go index 9c74d737b79..7bf186c1328 100644 --- a/pkg/kubelet/cloudresource/cloud_request_manager_test.go +++ b/pkg/kubelet/cloudresource/cloud_request_manager_test.go @@ -85,7 +85,7 @@ func TestNodeAddressesUsesLastSuccess(t *testing.T) { cloud := &fake.Cloud{} manager := NewSyncManager(cloud, "defaultNode", 0).(*cloudResourceSyncManager) - // These tests are stateful and order dependant. + // These tests are stateful and order dependent. tests := []struct { name string addrs []v1.NodeAddress diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index f3dd259fe9a..c2c67105c7c 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -297,7 +297,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I return nil, err } - klog.Infof("[topologymanager] Initilizing Topology Manager with %s policy", nodeConfig.ExperimentalTopologyManagerPolicy) + klog.Infof("[topologymanager] Initializing Topology Manager with %s policy", nodeConfig.ExperimentalTopologyManagerPolicy) } else { cm.topologyManager = topologymanager.NewFakeManager() } diff --git a/pkg/kubelet/cm/cpumanager/state/state.go b/pkg/kubelet/cm/cpumanager/state/state.go index 0550b644d57..be32509279b 100644 --- a/pkg/kubelet/cm/cpumanager/state/state.go +++ b/pkg/kubelet/cm/cpumanager/state/state.go @@ -20,7 +20,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) -// ContainerCPUAssignments type used in cpu manger state +// ContainerCPUAssignments type used in cpu manager state type ContainerCPUAssignments map[string]cpuset.CPUSet // Clone returns a copy of ContainerCPUAssignments diff --git a/pkg/kubelet/cm/cpumanager/state/state_file_test.go b/pkg/kubelet/cm/cpumanager/state/state_file_test.go index 232c47f3875..fb83eac5966 100644 --- a/pkg/kubelet/cm/cpumanager/state/state_file_test.go +++ b/pkg/kubelet/cm/cpumanager/state/state_file_test.go @@ -465,11 +465,11 @@ func TestClearStateStateFile(t *testing.T) { state.ClearState() if !cpuset.NewCPUSet().Equals(state.GetDefaultCPUSet()) { - t.Error("cleared state shoudn't has got information about available cpuset") + t.Error("cleared state shouldn't has got information about available cpuset") } for containerName := range testCase.containers { if !cpuset.NewCPUSet().Equals(state.GetCPUSetOrDefault(containerName)) { - t.Error("cleared state shoudn't has got information about containers") + t.Error("cleared state shouldn't has got information about containers") } } }) diff --git a/pkg/kubelet/dockershim/helpers_unsupported.go b/pkg/kubelet/dockershim/helpers_unsupported.go index 04003d9fd51..623e0b56b44 100644 --- a/pkg/kubelet/dockershim/helpers_unsupported.go +++ b/pkg/kubelet/dockershim/helpers_unsupported.go @@ -54,6 +54,6 @@ func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) { return "", fmt.Errorf("unsupported platform") } -// applyExperimentalCreateConfig applys experimental configures from sandbox annotations. +// applyExperimentalCreateConfig applies experimental configures from sandbox annotations. func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) { } diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index dd79d809e01..130e85bc85f 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -293,7 +293,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act // determine the set of thresholds we need to drive eviction behavior (i.e. all grace periods are met) thresholds = thresholdsMetGracePeriod(thresholdsFirstObservedAt, now) - debugLogThresholdsWithObservation("thresholds - grace periods satisified", thresholds, observations) + debugLogThresholdsWithObservation("thresholds - grace periods satisfied", thresholds, observations) // update internal state m.Lock() diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index 2087002da7a..fe30006e8af 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -927,7 +927,7 @@ func TestNodeReclaimFuncs(t *testing.T) { // no image gc should have occurred if diskGC.imageGCInvoked || diskGC.containerGCInvoked { - t.Errorf("Manager chose to perform image gc when it was not neeed") + t.Errorf("Manager chose to perform image gc when it was not needed") } // no pod should have been killed @@ -950,7 +950,7 @@ func TestNodeReclaimFuncs(t *testing.T) { // no image gc should have occurred if diskGC.imageGCInvoked || diskGC.containerGCInvoked { - t.Errorf("Manager chose to perform image gc when it was not neeed") + t.Errorf("Manager chose to perform image gc when it was not needed") } // no pod should have been killed diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index d008f9af395..65a00b32951 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -329,7 +329,7 @@ func memoryUsage(memStats *statsapi.MemoryStats) *resource.Quantity { } // localVolumeNames returns the set of volumes for the pod that are local -// TODO: sumamry API should report what volumes consume local storage rather than hard-code here. +// TODO: summary API should report what volumes consume local storage rather than hard-code here. func localVolumeNames(pod *v1.Pod) []string { result := []string{} for _, volume := range pod.Spec.Volumes { diff --git a/pkg/kubelet/eviction/threshold_notifier_linux.go b/pkg/kubelet/eviction/threshold_notifier_linux.go index 8ac1ac6cf25..7ca6704d2db 100644 --- a/pkg/kubelet/eviction/threshold_notifier_linux.go +++ b/pkg/kubelet/eviction/threshold_notifier_linux.go @@ -133,7 +133,7 @@ func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) { } // wait waits up to notifierRefreshInterval for an event on the Epoll FD for the -// eventfd we are concerned about. It returns an error if one occurrs, and true +// eventfd we are concerned about. It returns an error if one occurs, and true // if the consumer should read from the eventfd. func wait(epfd, eventfd int, timeout time.Duration) (bool, error) { events := make([]unix.EpollEvent, numFdEvents+1) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 013d0f55aea..a9a01477fc6 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -899,7 +899,7 @@ func (kl *Kubelet) IsPodDeleted(uid types.UID) bool { // been reclaimed by the kubelet. Reclaiming resources is a prerequisite to deleting a pod from the API server. func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool { if !notRunning(status.ContainerStatuses) { - // We shouldnt delete pods that still have running containers + // We shouldn't delete pods that still have running containers klog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) return false } @@ -918,7 +918,7 @@ func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bo return false } if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes { - // We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes + // We shouldn't delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes klog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod)) return false } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go index 621b1e4cd35..ef6c62a23f3 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go @@ -327,7 +327,7 @@ func TestLifeCycleHook(t *testing.T) { // Now try to create a container, which should in turn invoke PostStart Hook _, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, testContainer, testPod, fakePodStatus, nil, "") if err != nil { - t.Errorf("startContainer erro =%v", err) + t.Errorf("startContainer error =%v", err) } if fakeRunner.Cmd[0] != cmdPostStart.PostStart.Exec.Command[0] { t.Errorf("CMD PostStart hook was not invoked") diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index 166eb794547..69a0b7c1ba5 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -719,7 +719,7 @@ func TestComputePodActions(t *testing.T) { _, _, m, err := createTestRuntimeManager() require.NoError(t, err) - // Createing a pair reference pod and status for the test cases to refer + // Creating a pair reference pod and status for the test cases to refer // the specific fields. basePod, baseStatus := makeBasePodAndStatus() noAction := podActions{ diff --git a/pkg/kubelet/logs/container_log_manager.go b/pkg/kubelet/logs/container_log_manager.go index 0d37b123fc5..ac1b3d93513 100644 --- a/pkg/kubelet/logs/container_log_manager.go +++ b/pkg/kubelet/logs/container_log_manager.go @@ -295,7 +295,7 @@ func isInUse(l string, logs []string) bool { if strings.HasSuffix(l, tmpSuffix) { return false } - // All compresed logs are in use. + // All compressed logs are in use. if strings.HasSuffix(l, compressSuffix) { return true } diff --git a/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go b/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go index 667d8e907b5..8ef7e13894c 100644 --- a/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go +++ b/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go @@ -63,7 +63,7 @@ func NewOperationExecutor( } // ActualStateOfWorldUpdater defines a set of operations updating the actual -// state of the world cache after successful registeration/deregistration. +// state of the world cache after successful registration/deregistration. type ActualStateOfWorldUpdater interface { // AddPlugin add the given plugin in the cache if no existing plugin // in the cache has the same socket path. diff --git a/pkg/kubelet/pod/pod_manager.go b/pkg/kubelet/pod/pod_manager.go index 23f4e8258b3..98f8f00f74f 100644 --- a/pkg/kubelet/pod/pod_manager.go +++ b/pkg/kubelet/pod/pod_manager.go @@ -59,7 +59,7 @@ type Manager interface { // whether the pod is found. GetPodByUID(types.UID) (*v1.Pod, bool) // GetPodByMirrorPod returns the static pod for the given mirror pod and - // whether it was known to the pod manger. + // whether it was known to the pod manager. GetPodByMirrorPod(*v1.Pod) (*v1.Pod, bool) // GetMirrorPodByPod returns the mirror pod for the given static pod and // whether it was known to the pod manager. diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index 7c23166be36..bdce37a4cc7 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -672,7 +672,7 @@ func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(stats *runtimeapi }() if err != nil { - // This should not happen. Log now to raise visiblity + // This should not happen. Log now to raise visibility klog.Errorf("failed updating cpu usage nano core: %v", err) } return usage diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index 27468352981..5649937ce40 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -335,9 +335,9 @@ func TestSyncPodNoDeadlock(t *testing.T) { client.AddReactor("*", "pods", func(action core.Action) (bool, runtime.Object, error) { switch action := action.(type) { case core.GetAction: - assert.Equal(t, pod.Name, action.GetName(), "Unexpeted GetAction: %+v", action) + assert.Equal(t, pod.Name, action.GetName(), "Unexpected GetAction: %+v", action) case core.UpdateAction: - assert.Equal(t, pod.Name, action.GetObject().(*v1.Pod).Name, "Unexpeted UpdateAction: %+v", action) + assert.Equal(t, pod.Name, action.GetObject().(*v1.Pod).Name, "Unexpected UpdateAction: %+v", action) default: assert.Fail(t, "Unexpected Action: %+v", action) } diff --git a/pkg/kubelet/util/manager/cache_based_manager_test.go b/pkg/kubelet/util/manager/cache_based_manager_test.go index 159ef4f74de..fb88c003074 100644 --- a/pkg/kubelet/util/manager/cache_based_manager_test.go +++ b/pkg/kubelet/util/manager/cache_based_manager_test.go @@ -389,7 +389,7 @@ func TestCacheInvalidation(t *testing.T) { }, } manager.RegisterPod(podWithSecrets("ns1", "name1", s1)) - // Fetch both secrets - this should triggger get operations. + // Fetch both secrets - this should trigger get operations. store.Get("ns1", "s1") store.Get("ns1", "s10") store.Get("ns1", "s2") diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 1e03306e6bf..8365ea07cad 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -62,7 +62,7 @@ import ( "github.com/stretchr/testify/assert" ) -// setUp is a convience function for setting up for (most) tests. +// setUp is a convenience function for setting up for (most) tests. func setUp(t *testing.T) (*etcd3testing.EtcdTestServer, Config, *assert.Assertions) { server, storageConfig := etcd3testing.NewUnsecuredEtcd3TestClientServer(t) diff --git a/pkg/proxy/apis/config/types.go b/pkg/proxy/apis/config/types.go index 991a9096b6e..b1314fc2e88 100644 --- a/pkg/proxy/apis/config/types.go +++ b/pkg/proxy/apis/config/types.go @@ -84,7 +84,7 @@ type KubeProxyWinkernelConfiguration struct { // networkName is the name of the network kube-proxy will use // to create endpoints and policies NetworkName string - // sourceVip is the IP address of the source VIP endoint used for + // sourceVip is the IP address of the source VIP endpoint used for // NAT when loadbalancing SourceVip string // enableDSR tells kube-proxy whether HNS policies should be created diff --git a/pkg/registry/core/pod/strategy.go b/pkg/registry/core/pod/strategy.go index 8282cc7bec5..4569a9ec413 100644 --- a/pkg/registry/core/pod/strategy.go +++ b/pkg/registry/core/pod/strategy.go @@ -152,7 +152,7 @@ func (podStrategyWithoutGraceful) CheckGracefulDelete(ctx context.Context, obj r return false } -// StrategyWithoutGraceful implements the legacy instant delele behavior. +// StrategyWithoutGraceful implements the legacy instant delete behavior. var StrategyWithoutGraceful = podStrategyWithoutGraceful{Strategy} type podStatusStrategy struct { diff --git a/pkg/registry/rbac/rest/storage_rbac.go b/pkg/registry/rbac/rest/storage_rbac.go index a887cdeb8dc..13d120fc629 100644 --- a/pkg/registry/rbac/rest/storage_rbac.go +++ b/pkg/registry/rbac/rest/storage_rbac.go @@ -161,7 +161,7 @@ type PolicyData struct { func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { return func(hookContext genericapiserver.PostStartHookContext) error { - // intializing roles is really important. On some e2e runs, we've seen cases where etcd is down when the server + // initializing roles is really important. On some e2e runs, we've seen cases where etcd is down when the server // starts, the roles don't initialize, and nothing works. err := wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) { diff --git a/pkg/registry/rbac/validation/rule.go b/pkg/registry/rbac/validation/rule.go index ba5471df709..858ec5509ce 100644 --- a/pkg/registry/rbac/validation/rule.go +++ b/pkg/registry/rbac/validation/rule.go @@ -34,7 +34,7 @@ import ( ) type AuthorizationRuleResolver interface { - // GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namepsace + // GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namespace // of the role binding, the empty string if a cluster role binding. GetRoleReferenceRules(roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error) diff --git a/pkg/registry/storage/volumeattachment/strategy_test.go b/pkg/registry/storage/volumeattachment/strategy_test.go index ed33a9da014..244e56b817c 100644 --- a/pkg/registry/storage/volumeattachment/strategy_test.go +++ b/pkg/registry/storage/volumeattachment/strategy_test.go @@ -114,7 +114,7 @@ func TestVolumeAttachmentStrategy(t *testing.T) { Strategy.PrepareForUpdate(ctx, statusVolumeAttachment, volumeAttachment) if !apiequality.Semantic.DeepEqual(statusVolumeAttachment, volumeAttachment) { - t.Errorf("unexpected objects difference after modfying status: %v", diff.ObjectDiff(statusVolumeAttachment, volumeAttachment)) + t.Errorf("unexpected objects difference after modifying status: %v", diff.ObjectDiff(statusVolumeAttachment, volumeAttachment)) } } diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 1337293f61d..a0ce21e0399 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -1037,7 +1037,7 @@ func NewServiceAffinityPredicate(nodeInfoLister schedulerlisters.NodeInfoLister, } // checkServiceAffinity is a predicate which matches nodes in such a way to force that -// ServiceAffinity.labels are homogenous for pods that are scheduled to a node. +// ServiceAffinity.labels are homogeneous for pods that are scheduled to a node. // (i.e. it returns true IFF this pod can be added to this node such that all other pods in // the same service are running on nodes with the exact same ServiceAffinity.label values). // diff --git a/pkg/scheduler/algorithm/predicates/predicates_test.go b/pkg/scheduler/algorithm/predicates/predicates_test.go index f1a69baad76..0a3a8616323 100644 --- a/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -4005,7 +4005,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { "nodeA": false, "nodeB": false, }, - name: "Test incoming pod's affinity: firstly check if all affinityTerms match, and then check if all topologyKeys match, and the match logic should be satified on the same pod", + name: "Test incoming pod's affinity: firstly check if all affinityTerms match, and then check if all topologyKeys match, and the match logic should be satisfied on the same pod", }, } @@ -4758,7 +4758,7 @@ func TestEvenPodsSpreadPredicate_SingleConstraint(t *testing.T) { }, }, { - name: "existing pods with mis-matched namespace doens't count", + name: "existing pods with mis-matched namespace doesn't count", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( 1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), diff --git a/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go index e074e321eae..b4010200a3d 100644 --- a/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go +++ b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -357,7 +357,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}}, - name: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", + name: "Affinity symmetry: considered only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, @@ -371,7 +371,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}}, - name: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", + name: "Affinity symmetry: considered RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", }, // The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity. diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity_test.go b/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity_test.go index 6092698be07..778a6d067a5 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity_test.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity_test.go @@ -1426,7 +1426,7 @@ func TestMultipleNodes(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, }, wantStatuses: []*framework.Status{unschedulableAndUnresolvable, unschedulableAndUnresolvable}, - name: "Test incoming pod's affinity: firstly check if all affinityTerms match, and then check if all topologyKeys match, and the match logic should be satified on the same pod", + name: "Test incoming pod's affinity: firstly check if all affinityTerms match, and then check if all topologyKeys match, and the match logic should be satisfied on the same pod", }, } diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread_test.go index 282dbb9cea0..80fc846de98 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread_test.go @@ -77,7 +77,7 @@ func TestPodTopologySpread_Filter_SingleConstraint(t *testing.T) { }, }, { - name: "existing pods with mis-matched namespace doens't count", + name: "existing pods with mis-matched namespace doesn't count", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( 1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), diff --git a/pkg/scheduler/internal/cache/cache_test.go b/pkg/scheduler/internal/cache/cache_test.go index 77e1099bb5a..6f9fb23d28c 100644 --- a/pkg/scheduler/internal/cache/cache_test.go +++ b/pkg/scheduler/internal/cache/cache_test.go @@ -37,7 +37,7 @@ import ( func deepEqualWithoutGeneration(t *testing.T, testcase int, actual *nodeInfoListItem, expected *schedulernodeinfo.NodeInfo) { if (actual == nil) != (expected == nil) { - t.Error("One of the actual or expeted is nil and the other is not!") + t.Error("One of the actual or expected is nil and the other is not!") } // Ignore generation field. if actual != nil { @@ -386,7 +386,7 @@ func TestSnapshot(t *testing.T) { snapshot := cache.Snapshot() if len(snapshot.Nodes) != len(cache.nodes) { - t.Errorf("Unequal number of nodes in the cache and its snapshot. expeted: %v, got: %v", len(cache.nodes), len(snapshot.Nodes)) + t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes)) } for name, ni := range snapshot.Nodes { nItem := cache.nodes[name]