e2e framework: move node helper code into sub package

This reduces the size of the test/e2e/framework itself. Because it does not
check nodes anymore by default, E2E test suites must set their own check
function or set the original one by importing
"k8s.io/kubernetes/test/e2e/framework/todo/node/init".
This commit is contained in:
Patrick Ohly 2022-08-25 13:43:04 +02:00
parent c45a924c5e
commit b8d28cb6c3
9 changed files with 210 additions and 138 deletions

View File

@ -65,6 +65,7 @@ import (
// reconfigure framework
_ "k8s.io/kubernetes/test/e2e/framework/debug/init"
_ "k8s.io/kubernetes/test/e2e/framework/todo/node/init"
)
// handleFlags sets up all flags and parses the command line.

View File

@ -471,13 +471,6 @@ func (f *Framework) AfterEach() {
}
printSummaries(f.TestSummaries, f.BaseName)
// Check whether all nodes are ready after the test.
// This is explicitly done at the very end of the test, to avoid
// e.g. not removing namespace in case of this failure.
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
Failf("All nodes should be ready after test, %v", err)
}
}
// DeleteNamespace can be used to delete a namespace. Additionally it can be used to

View File

@ -46,6 +46,7 @@ import (
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
e2etodokubectl "k8s.io/kubernetes/test/e2e/framework/todo/kubectl"
e2etodonode "k8s.io/kubernetes/test/e2e/framework/todo/node"
e2etodopod "k8s.io/kubernetes/test/e2e/framework/todo/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
netutils "k8s.io/utils/net"
@ -778,7 +779,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector)
ginkgo.By("Getting node addresses")
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
framework.ExpectNoError(e2etodonode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetReadySchedulableNodes(config.f.ClientSet)
framework.ExpectNoError(err)
@ -838,7 +839,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
}
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
framework.ExpectNoError(e2etodonode.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetBoundedReadySchedulableNodes(config.f.ClientSet, maxNetProxyPodsCount)
framework.ExpectNoError(err)
nodes := nodeList.Items

View File

@ -26,10 +26,10 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
testutils "k8s.io/kubernetes/test/utils"
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
)
// ByNameContainer returns a ReplicationController with specified name and container

View File

@ -0,0 +1,166 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
const (
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
)
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
if framework.TestContext.AllowedNotReadyNodes == -1 {
return nil
}
framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes)
return wait.PollImmediate(
30*time.Second,
timeout,
e2enode.CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
)
}
// AddOrUpdateLabelOnNode adds the given label key and value to the given node or updates value.
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
framework.ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
}
// ExpectNodeHasLabel expects that the given node has the given label pair.
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(node.Labels[labelKey], labelValue)
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
ginkgo.By("removing the label " + labelKey + " off the node " + nodeName)
framework.ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
ginkgo.By("verifying the node doesn't have the label " + labelKey)
framework.ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
// ExpectNodeHasTaint expects that the node has the given taint.
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
framework.ExpectNoError(err)
framework.Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
// NodeHasTaint returns true if the node has the given taint, else returns false.
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
nodeTaints := node.Spec.Taints
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
return false, nil
}
return true, nil
}
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
// framework.TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
if err := allNodesReady(c, timeout); err != nil {
return fmt.Errorf("checking for ready nodes: %v", err)
}
return nil
}
func allNodesReady(c clientset.Interface, timeout time.Duration) error {
if framework.TestContext.AllowedNotReadyNodes == -1 {
return nil
}
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > framework.TestContext.AllowedNotReadyNodes {
msg := ""
for _, node := range notReady {
msg = fmt.Sprintf("%s, %s", msg, node.Name)
}
return fmt.Errorf("Not ready nodes: %#v", msg)
}
return nil
}
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}

View File

@ -0,0 +1,37 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package init registers node.AllNodesReady.
package init
import (
"time"
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/todo/node"
)
func init() {
framework.NewFrameworkExtensions = append(framework.NewFrameworkExtensions,
func(f *framework.Framework) {
ginkgo.AfterEach(func() {
node.AllNodesReady(f.ClientSet, 3*time.Minute)
})
},
)
}

View File

@ -52,19 +52,11 @@ import (
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
watchtools "k8s.io/client-go/tools/watch"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
netutils "k8s.io/utils/net"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
const (
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
)
@ -553,116 +545,6 @@ func TryKill(cmd *exec.Cmd) {
}
}
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
if TestContext.AllowedNotReadyNodes == -1 {
return nil
}
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
return wait.PollImmediate(
30*time.Second,
timeout,
e2enode.CheckReadyForTests(c, TestContext.NonblockingTaints, TestContext.AllowedNotReadyNodes, largeClusterThreshold),
)
}
// AddOrUpdateLabelOnNode adds the given label key and value to the given node or updates value.
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
}
// ExpectNodeHasLabel expects that the given node has the given label pair.
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
ExpectNoError(err)
ExpectEqual(node.Labels[labelKey], labelValue)
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
ginkgo.By("removing the label " + labelKey + " off the node " + nodeName)
ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
ginkgo.By("verifying the node doesn't have the label " + labelKey)
ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
// ExpectNodeHasTaint expects that the node has the given taint.
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
ExpectNoError(err)
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
// NodeHasTaint returns true if the node has the given taint, else returns false.
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
nodeTaints := node.Spec.Taints
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
return false, nil
}
return true, nil
}
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
// TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
if TestContext.AllowedNotReadyNodes == -1 {
return nil
}
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
return len(notReady) <= TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > TestContext.AllowedNotReadyNodes {
msg := ""
for _, node := range notReady {
msg = fmt.Sprintf("%s, %s", msg, node.Name)
}
return fmt.Errorf("Not ready nodes: %#v", msg)
}
return nil
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
@ -800,16 +682,6 @@ func PrettyPrintJSON(metrics interface{}) string {
return formatted.String()
}
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}
// WatchEventSequenceVerifier ...
// manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure
//

View File

@ -30,6 +30,7 @@ import (
// reconfigure framework
_ "k8s.io/kubernetes/test/e2e/framework/debug/init"
_ "k8s.io/kubernetes/test/e2e/framework/todo/node/init"
)
func TestMain(m *testing.M) {

View File

@ -55,6 +55,7 @@ import (
// reconfigure framework
_ "k8s.io/kubernetes/test/e2e/framework/debug/init"
_ "k8s.io/kubernetes/test/e2e/framework/todo/node/init"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"