mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
Node tests fixes (#106371)
* capture loop variable
* capture the loop variable and don't fail on not found errors
* capture loop variable
* Revert "Mark restart_test as flaky"
This reverts commit 990e9506de
.
* skip e2e node restart test with dockershim
* Update test/e2e_node/restart_test.go
Co-authored-by: Mike Miranda <mikemp96@gmail.com>
* capture loop using index
Co-authored-by: Mike Miranda <mikemp96@gmail.com>
This commit is contained in:
parent
991bb65ecc
commit
5eb584d1cb
@ -447,7 +447,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
|||||||
// between creations there is an interval for throughput control
|
// between creations there is an interval for throughput control
|
||||||
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
|
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
|
||||||
createTimes := make(map[string]metav1.Time)
|
createTimes := make(map[string]metav1.Time)
|
||||||
for _, pod := range pods {
|
for i := range pods {
|
||||||
|
pod := pods[i]
|
||||||
createTimes[pod.ObjectMeta.Name] = metav1.Now()
|
createTimes[pod.ObjectMeta.Name] = metav1.Now()
|
||||||
go f.PodClient().Create(pod)
|
go f.PodClient().Create(pod)
|
||||||
time.Sleep(interval)
|
time.Sleep(interval)
|
||||||
|
@ -36,7 +36,8 @@ import (
|
|||||||
cadvisorclient "github.com/google/cadvisor/client/v2"
|
cadvisorclient "github.com/google/cadvisor/client/v2"
|
||||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
"k8s.io/apimachinery/pkg/util/runtime"
|
||||||
@ -371,18 +372,21 @@ func getCadvisorPod() *v1.Pod {
|
|||||||
// deletePodsSync deletes a list of pods and block until pods disappear.
|
// deletePodsSync deletes a list of pods and block until pods disappear.
|
||||||
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for _, pod := range pods {
|
for i := range pods {
|
||||||
|
pod := pods[i]
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(pod *v1.Pod) {
|
go func() {
|
||||||
defer ginkgo.GinkgoRecover()
|
defer ginkgo.GinkgoRecover()
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
|
err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
|
||||||
framework.ExpectNoError(err)
|
if apierrors.IsNotFound(err) {
|
||||||
|
framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
|
gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
|
||||||
30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred())
|
30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred())
|
||||||
}(pod)
|
}()
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
return
|
return
|
||||||
|
@ -22,12 +22,10 @@ package e2enode
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -50,7 +48,8 @@ func waitForPods(f *framework.Framework, podCount int, timeout time.Duration) (r
|
|||||||
}
|
}
|
||||||
|
|
||||||
runningPods = []*v1.Pod{}
|
runningPods = []*v1.Pod{}
|
||||||
for _, pod := range podList.Items {
|
for i := range podList.Items {
|
||||||
|
pod := podList.Items[i]
|
||||||
if r, err := testutils.PodRunningReadyOrSucceeded(&pod); err != nil || !r {
|
if r, err := testutils.PodRunningReadyOrSucceeded(&pod); err != nil || !r {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -84,14 +83,9 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
|||||||
f := framework.NewDefaultFramework("restart-test")
|
f := framework.NewDefaultFramework("restart-test")
|
||||||
ginkgo.Context("Container Runtime", func() {
|
ginkgo.Context("Container Runtime", func() {
|
||||||
ginkgo.Context("Network", func() {
|
ginkgo.Context("Network", func() {
|
||||||
ginkgo.It("should recover from ip leak [Flaky]", func() {
|
ginkgo.It("should recover from ip leak", func() {
|
||||||
if framework.TestContext.ContainerRuntime == "docker" {
|
if framework.TestContext.ContainerRuntime == "docker" {
|
||||||
bytes, err := ioutil.ReadFile("/etc/os-release")
|
ginkgo.Skip("Test fails with in-tree docker. Skipping test.")
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(string(bytes), "ubuntu") {
|
|
||||||
ginkgo.Skip("Test fails with in-tree docker + ubuntu. Skipping test.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
|
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
|
||||||
|
Loading…
Reference in New Issue
Block a user