diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index 1fe329edcb2..94ee2edb595 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -184,7 +184,7 @@ func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.ClusterConfigurati return err } - // If we're dry-running or there are no scheduable nodes available, we don't need to wait for the new DNS addon to become ready + // If we're dry-running or there are no schedulable nodes available, we don't need to wait for the new DNS addon to become ready if !dryRun && len(nodes.Items) != 0 { dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(context.TODO(), installedDeploymentName, metav1.GetOptions{}) if err != nil { diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go index 4050392eb76..9b9aa0eec44 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/pkg/scheduler/internal/queue/scheduling_queue.go @@ -137,7 +137,7 @@ type PriorityQueue struct { // when a pod is popped. schedulingCycle int64 // moveRequestCycle caches the sequence number of scheduling cycle when we - // received a move request. Unscheduable pods in and before this scheduling + // received a move request. Unschedulable pods in and before this scheduling // cycle will be put back to activeQueue if we were trying to schedule them // when we received move request. moveRequestCycle int64 diff --git a/staging/src/k8s.io/api/core/v1/well_known_taints.go b/staging/src/k8s.io/api/core/v1/well_known_taints.go index fc8068e16de..84d268197c6 100644 --- a/staging/src/k8s.io/api/core/v1/well_known_taints.go +++ b/staging/src/k8s.io/api/core/v1/well_known_taints.go @@ -27,7 +27,7 @@ const ( TaintNodeUnreachable = "node.kubernetes.io/unreachable" // TaintNodeUnschedulable will be added when node becomes unschedulable - // and removed when node becomes scheduable. + // and removed when node becomes schedulable. TaintNodeUnschedulable = "node.kubernetes.io/unschedulable" // TaintNodeMemoryPressure will be added when node has memory pressure diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index f81f22a8a28..721a6ee123a 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -243,13 +243,13 @@ func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) replicasFromNodes = math.Ceil(float64(len(nodes.Items)) / params.nodesPerReplica) } if params.coresPerReplica > 0 { - replicasFromCores = math.Ceil(float64(getScheduableCores(nodes.Items)) / params.coresPerReplica) + replicasFromCores = math.Ceil(float64(getSchedulableCores(nodes.Items)) / params.coresPerReplica) } return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores))) } } -func getScheduableCores(nodes []v1.Node) int64 { +func getSchedulableCores(nodes []v1.Node) int64 { var sc resource.Quantity for _, node := range nodes { if !node.Spec.Unschedulable { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 1a87fdf74f6..a239aa0586e 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -1022,7 +1022,7 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event { } // WaitForAllNodesSchedulable waits up to timeout for all -// (but TestContext.AllowedNotReadyNodes) to become scheduable. +// (but TestContext.AllowedNotReadyNodes) to become schedulable. func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error { Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes) diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 27e1c2bb2d4..ea86b608e30 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -959,7 +959,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, // Disruptive test run serially, we can cache all voluem global mount // points and verify after the test that we do not leak any global mount point. nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) - framework.ExpectNoError(err, "while listing scheduable nodes") + framework.ExpectNoError(err, "while listing schedulable nodes") globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items)) for _, node := range nodeList.Items { globalMountPointsByNode[node.Name] = utils.FindVolumeGlobalMountPoints(hostExec, &node) @@ -993,7 +993,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, podNode = &nodeList.Items[i] } } - framework.ExpectNotEqual(podNode, nil, "pod node should exist in scheduable nodes") + framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes") utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true) diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 3451ee1badf..a6a4d5f5d6f 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -762,7 +762,7 @@ func GetReadySchedulableNodeInfos() []*NodeInfo { } // GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node. -// if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly +// if multiple nodes are present with Ready and Schedulable state then one of the Node is selected randomly // and it's associated NodeInfo object is returned. func GetReadySchedulableRandomNodeInfo() *NodeInfo { nodesInfo := GetReadySchedulableNodeInfos() diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index 2f944471b21..35bf07bb79a 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -381,7 +381,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { // Trigger test.trigger(config) - // Wait for pod is scheduled or unscheduable. + // Wait for pod is scheduled or unschedulable. if !test.shouldFail { klog.Infof("Waiting for pod is scheduled") if err := waitForPodToSchedule(config.client, test.pod); err != nil {