mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #93982 from jayunit100/wait_func_cleanup
wait function and util comments and cleanup
This commit is contained in:
commit
54d7adc1f5
@ -207,19 +207,18 @@ func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error)
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// CheckReadyForTests returns a method usable in polling methods which will check that the nodes are
|
||||
// in a testable state based on schedulability.
|
||||
// CheckReadyForTests returns a function which will return 'true' once the number of ready nodes is above the allowedNotReadyNodes threshold (i.e. to be used as a global gate for starting the tests).
|
||||
func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
|
||||
attempt := 0
|
||||
var notSchedulable []*v1.Node
|
||||
return func() (bool, error) {
|
||||
attempt++
|
||||
notSchedulable = nil
|
||||
var nodesNotReadyYet []v1.Node
|
||||
opts := metav1.ListOptions{
|
||||
ResourceVersion: "0",
|
||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
|
||||
// remove uncordoned nodes from our calculation, TODO refactor if node v2 API removes that semantic.
|
||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
|
||||
}
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
|
||||
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
e2elog.Logf("Unexpected error listing nodes: %v", err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
@ -227,10 +226,9 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
if !readyForTests(node, nonblockingTaints) {
|
||||
notSchedulable = append(notSchedulable, node)
|
||||
for _, node := range allNodes.Items {
|
||||
if !readyForTests(&node, nonblockingTaints) {
|
||||
nodesNotReadyYet = append(nodesNotReadyYet, node)
|
||||
}
|
||||
}
|
||||
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
|
||||
@ -239,25 +237,29 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
// provisioned correctly at startup will never become ready (e.g. when something
|
||||
// won't install correctly), so we can't expect them to be ready at any point.
|
||||
//
|
||||
// However, we only allow non-ready nodes with some specific reasons.
|
||||
if len(notSchedulable) > 0 {
|
||||
// We log the *reason* why nodes are not schedulable, specifically, its usually the network not being available.
|
||||
if len(nodesNotReadyYet) > 0 {
|
||||
// In large clusters, log them only every 10th pass.
|
||||
if len(nodes.Items) < largeClusterThreshold || attempt%10 == 0 {
|
||||
e2elog.Logf("Unschedulable nodes:")
|
||||
for i := range notSchedulable {
|
||||
e2elog.Logf("-> %s Ready=%t Network=%t Taints=%v NonblockingTaints:%v",
|
||||
notSchedulable[i].Name,
|
||||
IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
|
||||
IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
|
||||
notSchedulable[i].Spec.Taints,
|
||||
if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 {
|
||||
e2elog.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
|
||||
for _, node := range nodesNotReadyYet {
|
||||
e2elog.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
|
||||
node.Name,
|
||||
IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true),
|
||||
IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false),
|
||||
node.Spec.Taints,
|
||||
nonblockingTaints,
|
||||
)
|
||||
|
||||
}
|
||||
e2elog.Logf("================================")
|
||||
if len(nodesNotReadyYet) > allowedNotReadyNodes {
|
||||
ready := len(allNodes.Items) - len(nodesNotReadyYet)
|
||||
remaining := len(nodesNotReadyYet) - allowedNotReadyNodes
|
||||
e2elog.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(notSchedulable) <= allowedNotReadyNodes, nil
|
||||
return len(nodesNotReadyYet) <= allowedNotReadyNodes, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -166,6 +166,7 @@ func isConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// isNodeConditionSetAsExpected checks a node for a condition, and returns 'true' if the wanted value is the same as the condition value, useful for polling until a condition on a node is met.
|
||||
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
|
||||
// Check the node readiness condition (logging all).
|
||||
for _, cond := range node.Status.Conditions {
|
||||
|
Loading…
Reference in New Issue
Block a user