mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-12-15 23:53:09 +00:00
e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -39,16 +39,17 @@ const (
|
||||
|
||||
// WaitForAllNodesSchedulable waits up to timeout for all
|
||||
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
|
||||
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
|
||||
func WaitForAllNodesSchedulable(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
|
||||
if framework.TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes)
|
||||
return wait.PollImmediate(
|
||||
return wait.PollImmediateWithContext(
|
||||
ctx,
|
||||
30*time.Second,
|
||||
timeout,
|
||||
CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
|
||||
CheckReadyForTests(ctx, c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -58,9 +59,9 @@ func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, la
|
||||
}
|
||||
|
||||
// ExpectNodeHasLabel expects that the given node has the given label pair.
|
||||
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
|
||||
func ExpectNodeHasLabel(ctx context.Context, c clientset.Interface, nodeName string, labelKey string, labelValue string) {
|
||||
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(node.Labels[labelKey], labelValue)
|
||||
}
|
||||
@@ -76,17 +77,17 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string)
|
||||
}
|
||||
|
||||
// ExpectNodeHasTaint expects that the node has the given taint.
|
||||
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
|
||||
func ExpectNodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) {
|
||||
ginkgo.By("verifying the node has the taint " + taint.ToString())
|
||||
if has, err := NodeHasTaint(c, nodeName, taint); !has {
|
||||
if has, err := NodeHasTaint(ctx, c, nodeName, taint); !has {
|
||||
framework.ExpectNoError(err)
|
||||
framework.Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// NodeHasTaint returns true if the node has the given taint, else returns false.
|
||||
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
func NodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -104,14 +105,14 @@ func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool
|
||||
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
|
||||
// and figure out how to do it in a configurable way, as we can't expect all setups to run
|
||||
// default test add-ons.
|
||||
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
if err := allNodesReady(c, timeout); err != nil {
|
||||
func AllNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
|
||||
if err := allNodesReady(ctx, c, timeout); err != nil {
|
||||
return fmt.Errorf("checking for ready nodes: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func allNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
|
||||
if framework.TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
@@ -119,10 +120,10 @@ func allNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
|
||||
|
||||
var notReady []*v1.Node
|
||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
|
||||
notReady = nil
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user