Remove dependency for node integration tests from e2e fw

This commit is contained in:
drfish 2020-03-18 15:12:27 +08:00
parent 22a4c2cda1
commit af1e1f4435
2 changed files with 36 additions and 4 deletions

View File

@ -14,6 +14,7 @@ go_test(
],
tags = ["integration"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//plugin/pkg/admission/defaulttolerationseconds:go_default_library",
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
@ -27,7 +28,6 @@ go_test(
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/integration/framework:go_default_library",
"//test/integration/util:go_default_library",
"//test/utils/image:go_default_library",

View File

@ -30,16 +30,22 @@ import (
"k8s.io/apiserver/pkg/admission"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
"k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/integration/util"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// poll is how often to poll pods, nodes and claims.
const poll = 2 * time.Second
type podCondition func(pod *v1.Pod) (bool, error)
// TestTaintBasedEvictions tests related cases for the TaintBasedEvictions feature
func TestTaintBasedEvictions(t *testing.T) {
// we need at least 2 nodes to prevent lifecycle manager from entering "fully-disrupted" mode
@ -275,7 +281,7 @@ func TestTaintBasedEvictions(t *testing.T) {
}
if test.pod != nil {
err = pod.WaitForPodCondition(cs, testCtx.NS.Name, test.pod.Name, test.waitForPodCondition, time.Second*15, func(pod *v1.Pod) (bool, error) {
err = waitForPodCondition(cs, testCtx.NS.Name, test.pod.Name, test.waitForPodCondition, time.Second*15, func(pod *v1.Pod) (bool, error) {
// as node is unreachable, pod0 is expected to be in Terminating status
// rather than getting deleted
if tolerationSeconds[i] == 0 {
@ -285,7 +291,7 @@ func TestTaintBasedEvictions(t *testing.T) {
return seconds == tolerationSeconds[i], nil
}
return false, nil
})
}, t)
if err != nil {
pod, _ := cs.CoreV1().Pods(testCtx.NS.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
t.Fatalf("Error: %v, Expected test pod to be %s but it's %v", err, test.waitForPodCondition, pod)
@ -297,3 +303,29 @@ func TestTaintBasedEvictions(t *testing.T) {
})
}
}
// waitForPodCondition waits a pods to be matched to the given condition.
func waitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition, t *testing.T) error {
t.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
t.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
return err
}
t.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err)
continue
}
// log now so that current pod info is reported before calling `condition()`
t.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start))
if done, err := condition(pod); done {
if err == nil {
t.Logf("Pod %q satisfied condition %q", podName, desc)
}
return err
}
}
return fmt.Errorf("gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
}