diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index 139f37d54be..61d0f60893f 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -447,7 +447,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de // between creations there is an interval for throughput control func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time { createTimes := make(map[string]metav1.Time) - for _, pod := range pods { + for i := range pods { + pod := pods[i] createTimes[pod.ObjectMeta.Name] = metav1.Now() go f.PodClient().Create(pod) time.Sleep(interval) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index ebc17096258..9837a239c93 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -36,7 +36,8 @@ import ( cadvisorclient "github.com/google/cadvisor/client/v2" cadvisorapiv2 "github.com/google/cadvisor/info/v2" "github.com/opencontainers/runc/libcontainer/cgroups" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/runtime" @@ -371,18 +372,21 @@ func getCadvisorPod() *v1.Pod { // deletePodsSync deletes a list of pods and block until pods disappear. func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { var wg sync.WaitGroup - for _, pod := range pods { + for i := range pods { + pod := pods[i] wg.Add(1) - go func(pod *v1.Pod) { + go func() { defer ginkgo.GinkgoRecover() defer wg.Done() err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) - framework.ExpectNoError(err) + if apierrors.IsNotFound(err) { + framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err) + } gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), 30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred()) - }(pod) + }() } wg.Wait() return diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 9601e1ad027..38c0a0ce675 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -22,12 +22,10 @@ package e2enode import ( "context" "fmt" - "io/ioutil" "os/exec" - "strings" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" @@ -50,7 +48,8 @@ func waitForPods(f *framework.Framework, podCount int, timeout time.Duration) (r } runningPods = []*v1.Pod{} - for _, pod := range podList.Items { + for i := range podList.Items { + pod := podList.Items[i] if r, err := testutils.PodRunningReadyOrSucceeded(&pod); err != nil || !r { continue } @@ -84,14 +83,9 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { f := framework.NewDefaultFramework("restart-test") ginkgo.Context("Container Runtime", func() { ginkgo.Context("Network", func() { - ginkgo.It("should recover from ip leak [Flaky]", func() { + ginkgo.It("should recover from ip leak", func() { if framework.TestContext.ContainerRuntime == "docker" { - bytes, err := ioutil.ReadFile("/etc/os-release") - if err != nil { - if strings.Contains(string(bytes), "ubuntu") { - ginkgo.Skip("Test fails with in-tree docker + ubuntu. Skipping test.") - } - } + ginkgo.Skip("Test fails with in-tree docker. Skipping test.") } pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")