Node tests fixes (#106371)

* capture loop variable

* capture the loop variable and don't fail on not found errors

* capture loop variable

* Revert "Mark restart_test as flaky"

This reverts commit 990e9506de.

* skip e2e node restart test with dockershim

* Update test/e2e_node/restart_test.go

Co-authored-by: Mike Miranda <mikemp96@gmail.com>

* capture loop using index

Co-authored-by: Mike Miranda <mikemp96@gmail.com>
This commit is contained in:
Antonio Ojea 2021-11-15 04:54:47 +01:00 committed by GitHub
parent 991bb65ecc
commit 5eb584d1cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 16 additions and 17 deletions

View File

@ -447,7 +447,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
// between creations there is an interval for throughput control
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
createTimes := make(map[string]metav1.Time)
for _, pod := range pods {
for i := range pods {
pod := pods[i]
createTimes[pod.ObjectMeta.Name] = metav1.Now()
go f.PodClient().Create(pod)
time.Sleep(interval)

View File

@ -36,7 +36,8 @@ import (
cadvisorclient "github.com/google/cadvisor/client/v2"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/opencontainers/runc/libcontainer/cgroups"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/runtime"
@ -371,18 +372,21 @@ func getCadvisorPod() *v1.Pod {
// deletePodsSync deletes a list of pods and block until pods disappear.
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
var wg sync.WaitGroup
for _, pod := range pods {
for i := range pods {
pod := pods[i]
wg.Add(1)
go func(pod *v1.Pod) {
go func() {
defer ginkgo.GinkgoRecover()
defer wg.Done()
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err)
if apierrors.IsNotFound(err) {
framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err)
}
gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred())
}(pod)
}()
}
wg.Wait()
return

View File

@ -22,12 +22,10 @@ package e2enode
import (
"context"
"fmt"
"io/ioutil"
"os/exec"
"strings"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
@ -50,7 +48,8 @@ func waitForPods(f *framework.Framework, podCount int, timeout time.Duration) (r
}
runningPods = []*v1.Pod{}
for _, pod := range podList.Items {
for i := range podList.Items {
pod := podList.Items[i]
if r, err := testutils.PodRunningReadyOrSucceeded(&pod); err != nil || !r {
continue
}
@ -84,14 +83,9 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("restart-test")
ginkgo.Context("Container Runtime", func() {
ginkgo.Context("Network", func() {
ginkgo.It("should recover from ip leak [Flaky]", func() {
ginkgo.It("should recover from ip leak", func() {
if framework.TestContext.ContainerRuntime == "docker" {
bytes, err := ioutil.ReadFile("/etc/os-release")
if err != nil {
if strings.Contains(string(bytes), "ubuntu") {
ginkgo.Skip("Test fails with in-tree docker + ubuntu. Skipping test.")
}
}
ginkgo.Skip("Test fails with in-tree docker. Skipping test.")
}
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")