From d45107a00da3d827dd162f70d95d1c4c63e8d95a Mon Sep 17 00:00:00 2001 From: hwdef Date: Thu, 12 Dec 2019 14:02:30 +0800 Subject: [PATCH] test/e2e: move funcs from test/e2e/pod to other folders --- test/e2e/framework/pod/BUILD | 2 - test/e2e/framework/pod/create.go | 35 ------------ test/e2e/framework/pod/resource.go | 74 -------------------------- test/e2e/framework/pod/wait.go | 44 --------------- test/e2e/framework/util.go | 47 +++++++++++++++- test/e2e/scheduling/BUILD | 1 + test/e2e/scheduling/framework.go | 3 +- test/e2e/scheduling/predicates.go | 26 ++++++++- test/e2e/storage/persistent_volumes.go | 38 ++++++++++++- test/e2e/storage/utils/create.go | 24 +++++++-- 10 files changed, 127 insertions(+), 167 deletions(-) diff --git a/test/e2e/framework/pod/BUILD b/test/e2e/framework/pod/BUILD index 27dfc95d377..22c8d836dec 100644 --- a/test/e2e/framework/pod/BUILD +++ b/test/e2e/framework/pod/BUILD @@ -14,7 +14,6 @@ go_library( deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/client/conditions:go_default_library", - "//pkg/controller:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -23,7 +22,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/e2e/framework/pod/create.go b/test/e2e/framework/pod/create.go index 417ec2f83ad..f3c5dda1d7b 100644 --- a/test/e2e/framework/pod/create.go +++ b/test/e2e/framework/pod/create.go @@ -24,7 +24,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -33,40 +32,6 @@ var ( BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox) ) -// CreateWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod. -// Note: need named return value so that the err assignment in the defer sets the returned error. -// Has been shown to be necessary using Go 1.7. -func CreateWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) { - e2elog.Logf("Creating nfs test pod") - pod := MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command) - runPod, err := c.CoreV1().Pods(ns).Create(pod) - if err != nil { - return fmt.Errorf("pod Create API error: %v", err) - } - defer func() { - delErr := DeletePodWithWait(c, runPod) - if err == nil { // don't override previous err value - err = delErr // assign to returned err, can be nil - } - }() - - err = testPodSuccessOrFail(c, ns, runPod) - if err != nil { - return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err) - } - return // note: named return value -} - -// testPodSuccessOrFail tests whether the pod's exit code is zero. -func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error { - e2elog.Logf("Pod should terminate with exitcode 0 (success)") - if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil { - return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err) - } - e2elog.Logf("Pod %v succeeded ", pod.Name) - return nil -} - // CreateUnschedulablePod with given claims based on node selector func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) diff --git a/test/e2e/framework/pod/resource.go b/test/e2e/framework/pod/resource.go index d07c32b4587..bef9b31431c 100644 --- a/test/e2e/framework/pod/resource.go +++ b/test/e2e/framework/pod/resource.go @@ -30,7 +30,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -147,33 +146,6 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) { return true, nil } -// CountRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp. -func CountRemainingPods(c clientset.Interface, namespace string) (int, int, error) { - // check for remaining pods - pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) - if err != nil { - return 0, 0, err - } - - // nothing remains! - if len(pods.Items) == 0 { - return 0, 0, nil - } - - // stuff remains, log about it - LogPodStates(pods.Items) - - // check if there were any pods with missing deletion timestamp - numPods := len(pods.Items) - missingTimestamp := 0 - for _, pod := range pods.Items { - if pod.DeletionTimestamp == nil { - missingTimestamp++ - } - } - return numPods, missingTimestamp, nil -} - func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) @@ -321,15 +293,6 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error { return e } -// DumpAllPodInfo logs basic info for all pods. -func DumpAllPodInfo(c clientset.Interface) { - pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{}) - if err != nil { - e2elog.Logf("unable to fetch pod debug info: %v", err) - } - LogPodStates(pods.Items) -} - // LogPodStates logs basic info of provided pods for debugging. func LogPodStates(pods []v1.Pod) { // Find maximum widths for pod, node, and phase strings for column printing. @@ -578,40 +541,3 @@ func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[strin } return filtered, nil } - -// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods. -func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { - for _, pod := range pods.Items { - if !masterNodes.Has(pod.Spec.NodeName) { - if pod.Spec.NodeName != "" { - _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) - gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true)) - gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionTrue)) - scheduledPods = append(scheduledPods, pod) - } else { - _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) - gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true)) - gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionFalse)) - if scheduledCondition.Reason == "Unschedulable" { - - notScheduledPods = append(notScheduledPods, pod) - } - } - } - } - return -} - -// PatchContainerImages replaces the specified Container Registry with a custom -// one provided via the KUBE_TEST_REPO_LIST env variable -func PatchContainerImages(containers []v1.Container) error { - var err error - for _, c := range containers { - c.Image, err = imageutils.ReplaceRegistryInImageURL(c.Image) - if err != nil { - return err - } - } - - return nil -} diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index 36503d88b55..409d736054b 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubelet/util/format" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" @@ -536,49 +535,6 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la return pods, err } -// WaitForPodsInactive waits until there are no active pods left in the PodStore. -// This is to make a fair comparison of deletion time between DeleteRCAndPods -// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas -// when the pod is inactvie. -func WaitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error { - var activePods []*v1.Pod - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - pods := ps.List() - activePods = controller.FilterActivePods(pods) - if len(activePods) != 0 { - return false, nil - } - return true, nil - }) - - if err == wait.ErrWaitTimeout { - for _, pod := range activePods { - e2elog.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName) - } - return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName) - } - return err -} - -// WaitForPodsGone waits until there are no pods left in the PodStore. -func WaitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error { - var pods []*v1.Pod - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - if pods = ps.List(); len(pods) == 0 { - return true, nil - } - return false, nil - }) - - if err == wait.ErrWaitTimeout { - for _, pod := range pods { - e2elog.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName) - } - return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName) - } - return err -} - // WaitForPodsReady waits for the pods to become ready. func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 1fca4a956e4..ef919e7d740 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -1237,7 +1237,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns timeout = timeout + 3*time.Minute } - err = e2epod.WaitForPodsInactive(ps, interval, timeout) + err = waitForPodsInactive(ps, interval, timeout) if err != nil { return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) } @@ -1247,13 +1247,56 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns // In gce, at any point, small percentage of nodes can disappear for // ~10 minutes due to hostError. 20 minutes should be long enough to // restart VM in that case and delete the pod. - err = e2epod.WaitForPodsGone(ps, interval, 20*time.Minute) + err = waitForPodsGone(ps, interval, 20*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } return nil } +// waitForPodsGone waits until there are no pods left in the PodStore. +func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error { + var pods []*v1.Pod + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + if pods = ps.List(); len(pods) == 0 { + return true, nil + } + return false, nil + }) + + if err == wait.ErrWaitTimeout { + for _, pod := range pods { + Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName) + } + return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName) + } + return err +} + +// waitForPodsInactive waits until there are no active pods left in the PodStore. +// This is to make a fair comparison of deletion time between DeleteRCAndPods +// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas +// when the pod is inactvie. +func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error { + var activePods []*v1.Pod + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + pods := ps.List() + activePods = controller.FilterActivePods(pods) + if len(activePods) != 0 { + return false, nil + } + return true, nil + }) + + if err == wait.ErrWaitTimeout { + for _, pod := range activePods { + Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName) + } + return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName) + } + return err +} + // RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec` // inside of a shell. func RunHostCmd(ns, name, cmd string) (string, error) { diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index c4285cadf2e..e42305776f3 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -17,6 +17,7 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e/scheduling", visibility = ["//visibility:public"], deps = [ + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/scheduling:go_default_library", diff --git a/test/e2e/scheduling/framework.go b/test/e2e/scheduling/framework.go index 6f4a553880d..40d28f2b1cc 100644 --- a/test/e2e/scheduling/framework.go +++ b/test/e2e/scheduling/framework.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) var ( @@ -90,7 +89,7 @@ func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.Stri } } pods.Items = filteredPods - return e2epod.GetPodsScheduled(masterNodes, pods) + return GetPodsScheduled(masterNodes, pods) } // getDeletingPods returns whether there are any pods marked for deletion. diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 1d69aaa448f..f1991147eaf 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -18,6 +18,7 @@ package scheduling import ( "fmt" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "time" v1 "k8s.io/api/core/v1" @@ -730,7 +731,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, n func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) { allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) framework.ExpectNoError(err) - scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods) + scheduledPods, notScheduledPods := GetPodsScheduled(masterNodes, allPods) framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)) framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)) @@ -817,3 +818,26 @@ func translateIPv4ToIPv6(ip string) string { } return ip } + +// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods. +func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { + for _, pod := range pods.Items { + if !masterNodes.Has(pod.Spec.NodeName) { + if pod.Spec.NodeName != "" { + _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) + framework.ExpectEqual(scheduledCondition != nil, true) + framework.ExpectEqual(scheduledCondition.Status, v1.ConditionTrue) + scheduledPods = append(scheduledPods, pod) + } else { + _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) + framework.ExpectEqual(scheduledCondition != nil, true) + framework.ExpectEqual(scheduledCondition.Status, v1.ConditionFalse) + if scheduledCondition.Reason == "Unschedulable" { + + notScheduledPods = append(notScheduledPods, pod) + } + } + } + } + return +} diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 098c98e3d4d..ebff16dcf38 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -47,7 +47,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv * // 2. create the nfs writer pod, test if the write was successful, // then delete the pod and verify that it was deleted ginkgo.By("Checking pod has write access to PersistentVolume") - framework.ExpectNoError(e2epod.CreateWaitAndDeletePod(c, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")) + framework.ExpectNoError(createWaitAndDeletePod(c, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")) // 3. delete the PVC, wait for PV to become "Released" ginkgo.By("Deleting the PVC to invoke the reclaim policy.") @@ -78,7 +78,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName) } // TODO: currently a serialized test of each PV - if err = e2epod.CreateWaitAndDeletePod(c, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil { + if err = createWaitAndDeletePod(c, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil { return err } } @@ -426,3 +426,37 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v }, } } + +// createWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod. +// Note: need named return value so that the err assignment in the defer sets the returned error. +// Has been shown to be necessary using Go 1.7. +func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) { + framework.Logf("Creating nfs test pod") + pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command) + runPod, err := c.CoreV1().Pods(ns).Create(pod) + if err != nil { + return fmt.Errorf("pod Create API error: %v", err) + } + defer func() { + delErr := e2epod.DeletePodWithWait(c, runPod) + if err == nil { // don't override previous err value + err = delErr // assign to returned err, can be nil + } + }() + + err = testPodSuccessOrFail(c, ns, runPod) + if err != nil { + return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err) + } + return // note: named return value +} + +// testPodSuccessOrFail tests whether the pod's exit code is zero. +func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error { + framework.Logf("Pod should terminate with exitcode 0 (success)") + if err := e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil { + return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err) + } + framework.Logf("Pod %v succeeded ", pod.Name) + return nil +} diff --git a/test/e2e/storage/utils/create.go b/test/e2e/storage/utils/create.go index d1b7ab6f26a..97625159b20 100644 --- a/test/e2e/storage/utils/create.go +++ b/test/e2e/storage/utils/create.go @@ -20,6 +20,7 @@ import ( "bytes" "encoding/json" "fmt" + imageutils "k8s.io/kubernetes/test/utils/image" "github.com/pkg/errors" @@ -35,7 +36,6 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/test/e2e/framework" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/testfiles" ) @@ -360,18 +360,18 @@ func patchItemRecursively(f *framework.Framework, item interface{}) error { PatchNamespace(f, &item.ObjectMeta.Namespace) case *appsv1.StatefulSet: PatchNamespace(f, &item.ObjectMeta.Namespace) - if err := e2epod.PatchContainerImages(item.Spec.Template.Spec.Containers); err != nil { + if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil { return err } - if err := e2epod.PatchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil { + if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil { return err } case *appsv1.DaemonSet: PatchNamespace(f, &item.ObjectMeta.Namespace) - if err := e2epod.PatchContainerImages(item.Spec.Template.Spec.Containers); err != nil { + if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil { return err } - if err := e2epod.PatchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil { + if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil { return err } default: @@ -624,3 +624,17 @@ func PrettyPrint(item interface{}) string { } return fmt.Sprintf("%+v", item) } + +// patchContainerImages replaces the specified Container Registry with a custom +// one provided via the KUBE_TEST_REPO_LIST env variable +func patchContainerImages(containers []v1.Container) error { + var err error + for _, c := range containers { + c.Image, err = imageutils.ReplaceRegistryInImageURL(c.Image) + if err != nil { + return err + } + } + + return nil +}