mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Merge pull request #93982 from jayunit100/wait_func_cleanup
wait function and util comments and cleanup
This commit is contained in:
commit
54d7adc1f5
@ -207,19 +207,18 @@ func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error)
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckReadyForTests returns a method usable in polling methods which will check that the nodes are
|
// CheckReadyForTests returns a function which will return 'true' once the number of ready nodes is above the allowedNotReadyNodes threshold (i.e. to be used as a global gate for starting the tests).
|
||||||
// in a testable state based on schedulability.
|
|
||||||
func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
|
func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
|
||||||
attempt := 0
|
attempt := 0
|
||||||
var notSchedulable []*v1.Node
|
|
||||||
return func() (bool, error) {
|
return func() (bool, error) {
|
||||||
attempt++
|
attempt++
|
||||||
notSchedulable = nil
|
var nodesNotReadyYet []v1.Node
|
||||||
opts := metav1.ListOptions{
|
opts := metav1.ListOptions{
|
||||||
ResourceVersion: "0",
|
ResourceVersion: "0",
|
||||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
|
// remove uncordoned nodes from our calculation, TODO refactor if node v2 API removes that semantic.
|
||||||
|
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
|
||||||
}
|
}
|
||||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
|
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Unexpected error listing nodes: %v", err)
|
e2elog.Logf("Unexpected error listing nodes: %v", err)
|
||||||
if testutils.IsRetryableAPIError(err) {
|
if testutils.IsRetryableAPIError(err) {
|
||||||
@ -227,10 +226,9 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
|||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
for i := range nodes.Items {
|
for _, node := range allNodes.Items {
|
||||||
node := &nodes.Items[i]
|
if !readyForTests(&node, nonblockingTaints) {
|
||||||
if !readyForTests(node, nonblockingTaints) {
|
nodesNotReadyYet = append(nodesNotReadyYet, node)
|
||||||
notSchedulable = append(notSchedulable, node)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
|
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
|
||||||
@ -239,25 +237,29 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
|||||||
// provisioned correctly at startup will never become ready (e.g. when something
|
// provisioned correctly at startup will never become ready (e.g. when something
|
||||||
// won't install correctly), so we can't expect them to be ready at any point.
|
// won't install correctly), so we can't expect them to be ready at any point.
|
||||||
//
|
//
|
||||||
// However, we only allow non-ready nodes with some specific reasons.
|
// We log the *reason* why nodes are not schedulable, specifically, its usually the network not being available.
|
||||||
if len(notSchedulable) > 0 {
|
if len(nodesNotReadyYet) > 0 {
|
||||||
// In large clusters, log them only every 10th pass.
|
// In large clusters, log them only every 10th pass.
|
||||||
if len(nodes.Items) < largeClusterThreshold || attempt%10 == 0 {
|
if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 {
|
||||||
e2elog.Logf("Unschedulable nodes:")
|
e2elog.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
|
||||||
for i := range notSchedulable {
|
for _, node := range nodesNotReadyYet {
|
||||||
e2elog.Logf("-> %s Ready=%t Network=%t Taints=%v NonblockingTaints:%v",
|
e2elog.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
|
||||||
notSchedulable[i].Name,
|
node.Name,
|
||||||
IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
|
IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true),
|
||||||
IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
|
IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false),
|
||||||
notSchedulable[i].Spec.Taints,
|
node.Spec.Taints,
|
||||||
nonblockingTaints,
|
nonblockingTaints,
|
||||||
)
|
)
|
||||||
|
|
||||||
}
|
}
|
||||||
e2elog.Logf("================================")
|
if len(nodesNotReadyYet) > allowedNotReadyNodes {
|
||||||
|
ready := len(allNodes.Items) - len(nodesNotReadyYet)
|
||||||
|
remaining := len(nodesNotReadyYet) - allowedNotReadyNodes
|
||||||
|
e2elog.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return len(notSchedulable) <= allowedNotReadyNodes, nil
|
return len(nodesNotReadyYet) <= allowedNotReadyNodes, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,6 +166,7 @@ func isConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isNodeConditionSetAsExpected checks a node for a condition, and returns 'true' if the wanted value is the same as the condition value, useful for polling until a condition on a node is met.
|
||||||
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
|
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
|
||||||
// Check the node readiness condition (logging all).
|
// Check the node readiness condition (logging all).
|
||||||
for _, cond := range node.Status.Conditions {
|
for _, cond := range node.Status.Conditions {
|
||||||
|
Loading…
Reference in New Issue
Block a user