mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Merge pull request #91632 from liggitt/ginkgo-recover-e2e
Defer ginkgo recovers
This commit is contained in:
commit
f834c92ce1
@ -372,6 +372,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
w, err := c.CoreV1().Pods(ns).Watch(context.TODO(), options)
|
||||
framework.ExpectNoError(err)
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
// There should be only one pod being created, which is the pod with the agnhost image.
|
||||
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
|
||||
numPodCreation := 1
|
||||
|
@ -593,6 +593,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
var orderErr error
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
|
||||
@ -644,6 +645,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
// Verify that statuful set will be scaled down in order.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
expectedOrder := []string{ssName + "-2", ssName + "-1", ssName + "-0"}
|
||||
|
@ -166,6 +166,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
|
||||
failed := false
|
||||
for ix := range nodelist.Items {
|
||||
go func(ix int) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
n := nodelist.Items[ix]
|
||||
result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd)
|
||||
|
@ -23,6 +23,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -199,6 +201,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
|
||||
for _, node := range nodes {
|
||||
node := node
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
|
||||
|
@ -235,6 +235,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
for i := 0; i < workers; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
for retries := 0; retries < pods; retries++ {
|
||||
name := fmt.Sprintf("pod-submit-status-%d-%d", i, retries)
|
||||
@ -274,6 +275,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
created := podClient.Create(pod)
|
||||
ch := make(chan []watch.Event)
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer close(ch)
|
||||
w, err := podClient.Watch(context.TODO(), metav1.ListOptions{
|
||||
ResourceVersion: created.ResourceVersion,
|
||||
|
@ -456,6 +456,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
ginkgo.By("Start a goroutine to recycle unbound PVs")
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
w, err := config.client.CoreV1().PersistentVolumes().Watch(context.TODO(), metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
Loading…
Reference in New Issue
Block a user