Merge pull request #97880 from justinsb/spelling

Fix spelling typos: scheduable -> schedulable
This commit is contained in:
Kubernetes Prow Robot 2021-01-15 03:43:51 -08:00 committed by GitHub
commit e1c1fd9edf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 10 additions and 10 deletions

View File

@ -184,7 +184,7 @@ func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.ClusterConfigurati
return err
}
// If we're dry-running or there are no scheduable nodes available, we don't need to wait for the new DNS addon to become ready
// If we're dry-running or there are no schedulable nodes available, we don't need to wait for the new DNS addon to become ready
if !dryRun && len(nodes.Items) != 0 {
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(context.TODO(), installedDeploymentName, metav1.GetOptions{})
if err != nil {

View File

@ -137,7 +137,7 @@ type PriorityQueue struct {
// when a pod is popped.
schedulingCycle int64
// moveRequestCycle caches the sequence number of scheduling cycle when we
// received a move request. Unscheduable pods in and before this scheduling
// received a move request. Unschedulable pods in and before this scheduling
// cycle will be put back to activeQueue if we were trying to schedule them
// when we received move request.
moveRequestCycle int64

View File

@ -27,7 +27,7 @@ const (
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
// TaintNodeUnschedulable will be added when node becomes unschedulable
// and removed when node becomes scheduable.
// and removed when node becomes schedulable.
TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"
// TaintNodeMemoryPressure will be added when node has memory pressure

View File

@ -243,13 +243,13 @@ func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear)
replicasFromNodes = math.Ceil(float64(len(nodes.Items)) / params.nodesPerReplica)
}
if params.coresPerReplica > 0 {
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes.Items)) / params.coresPerReplica)
replicasFromCores = math.Ceil(float64(getSchedulableCores(nodes.Items)) / params.coresPerReplica)
}
return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
}
}
func getScheduableCores(nodes []v1.Node) int64 {
func getSchedulableCores(nodes []v1.Node) int64 {
var sc resource.Quantity
for _, node := range nodes {
if !node.Spec.Unschedulable {

View File

@ -1022,7 +1022,7 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
}
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)

View File

@ -959,7 +959,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
// Disruptive test run serially, we can cache all voluem global mount
// points and verify after the test that we do not leak any global mount point.
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err, "while listing scheduable nodes")
framework.ExpectNoError(err, "while listing schedulable nodes")
globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items))
for _, node := range nodeList.Items {
globalMountPointsByNode[node.Name] = utils.FindVolumeGlobalMountPoints(hostExec, &node)
@ -993,7 +993,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
podNode = &nodeList.Items[i]
}
}
framework.ExpectNotEqual(podNode, nil, "pod node should exist in scheduable nodes")
framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes")
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)

View File

@ -762,7 +762,7 @@ func GetReadySchedulableNodeInfos() []*NodeInfo {
}
// GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
// if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly
// if multiple nodes are present with Ready and Schedulable state then one of the Node is selected randomly
// and it's associated NodeInfo object is returned.
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodesInfo := GetReadySchedulableNodeInfos()

View File

@ -381,7 +381,7 @@ func TestVolumeBindingRescheduling(t *testing.T) {
// Trigger
test.trigger(config)
// Wait for pod is scheduled or unscheduable.
// Wait for pod is scheduled or unschedulable.
if !test.shouldFail {
klog.Infof("Waiting for pod is scheduled")
if err := waitForPodToSchedule(config.client, test.pod); err != nil {