mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Stop using the deprecated method CurrentGinkgoTestDescription
Besides, the using of method might lead to a `concurrent map writes` issue per the discussion here: https://github.com/onsi/ginkgo/issues/970 Signed-off-by: Dave Chen <dave.chen@arm.com>
This commit is contained in:
parent
2f3028c00d
commit
fd4b5b629b
@ -120,7 +120,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
@ -1107,7 +1107,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
@ -1230,7 +1230,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
|
@ -66,7 +66,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
// Most of the reboot tests just make sure that addon/system pods are running, so dump
|
||||
// events for the kube-system namespace on failures
|
||||
namespaceName := metav1.NamespaceSystem
|
||||
|
@ -75,7 +75,7 @@ var _ = SIGDescribe("Recreate [Feature:Recreate]", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
// Make sure that addon/system pods are running, so dump
|
||||
// events for the kube-system namespace on failures
|
||||
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", systemNamespace))
|
||||
|
@ -390,7 +390,7 @@ func (f *Framework) AfterEach() {
|
||||
// Whether to delete namespace is determined by 3 factors: delete-namespace flag, delete-namespace-on-failure flag and the test result
|
||||
// if delete-namespace set to false, namespace will always be preserved.
|
||||
// if delete-namespace is true and delete-namespace-on-failure is false, namespace will be preserved if test failed.
|
||||
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentGinkgoTestDescription().Failed) {
|
||||
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentSpecReport().Failed()) {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
|
||||
if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil {
|
||||
@ -398,7 +398,7 @@ func (f *Framework) AfterEach() {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
|
||||
// Dump namespace if we are unable to delete the namespace and the dump was not already performed.
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
|
||||
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
|
||||
}
|
||||
} else {
|
||||
@ -432,7 +432,7 @@ func (f *Framework) AfterEach() {
|
||||
|
||||
// run all aftereach functions in random order to ensure no dependencies grow
|
||||
for _, afterEachFn := range f.afterEaches {
|
||||
afterEachFn(f, ginkgo.CurrentGinkgoTestDescription().Failed)
|
||||
afterEachFn(f, ginkgo.CurrentSpecReport().Failed())
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
|
||||
@ -510,7 +510,7 @@ func (f *Framework) DeleteNamespace(name string) {
|
||||
}
|
||||
}()
|
||||
// if current test failed then we should dump namespace information
|
||||
if !f.SkipNamespaceCreation && ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
|
||||
DumpAllNamespaceInfo(f.ClientSet, name)
|
||||
}
|
||||
|
||||
|
@ -488,10 +488,13 @@ type ClientConfigGetter func() (*restclient.Config, error)
|
||||
func LoadConfig() (config *restclient.Config, err error) {
|
||||
defer func() {
|
||||
if err == nil && config != nil {
|
||||
testDesc := ginkgo.CurrentGinkgoTestDescription()
|
||||
if len(testDesc.ComponentTexts) > 0 {
|
||||
componentTexts := strings.Join(testDesc.ComponentTexts, " ")
|
||||
config.UserAgent = fmt.Sprintf("%s -- %s", restclient.DefaultKubernetesUserAgent(), componentTexts)
|
||||
testDesc := ginkgo.CurrentSpecReport()
|
||||
if len(testDesc.ContainerHierarchyTexts) > 0 {
|
||||
testName := strings.Join(testDesc.ContainerHierarchyTexts, " ")
|
||||
if len(testDesc.LeafNodeText) > 0 {
|
||||
testName = testName + " " + testDesc.LeafNodeText
|
||||
}
|
||||
config.UserAgent = fmt.Sprintf("%s -- %s", restclient.DefaultKubernetesUserAgent(), testName)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -102,7 +102,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() {
|
||||
|
||||
// Platform specific cleanup
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
e2eingress.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
@ -147,7 +147,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() {
|
||||
|
||||
// Platform specific cleanup
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
e2eingress.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
|
@ -67,7 +67,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
DescribeSvc(f.Namespace.Name)
|
||||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
@ -1007,7 +1007,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
DescribeSvc(f.Namespace.Name)
|
||||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
|
@ -53,7 +53,7 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
DescribeSvc(f.Namespace.Name)
|
||||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
|
@ -760,7 +760,7 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
DescribeSvc(f.Namespace.Name)
|
||||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
|
@ -36,7 +36,7 @@ var _ = SIGDescribe("AppArmor", func() {
|
||||
e2esecurity.LoadAppArmorProfiles(f.Namespace.Name, f.ClientSet)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if !ginkgo.CurrentSpecReport().Failed() {
|
||||
return
|
||||
}
|
||||
e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
||||
|
@ -469,7 +469,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
// print out additional info if tests failed
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
// List existing PriorityClasses.
|
||||
priorityList, err := cs.SchedulingV1().PriorityClasses().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -705,7 +705,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
// Print out additional info if tests failed.
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
// List existing PriorityClasses.
|
||||
priorityList, err := cs.SchedulingV1().PriorityClasses().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
@ -57,14 +57,17 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
to.LogWriter = ginkgo.GinkgoWriter
|
||||
} else {
|
||||
test := ginkgo.CurrentGinkgoTestDescription()
|
||||
test := ginkgo.CurrentSpecReport()
|
||||
// Clean up each individual component text such that
|
||||
// it contains only characters that are valid as file
|
||||
// name.
|
||||
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
|
||||
var components []string
|
||||
for _, component := range test.ComponentTexts {
|
||||
components = append(components, reg.ReplaceAllString(component, "_"))
|
||||
var testName []string
|
||||
for _, text := range test.ContainerHierarchyTexts {
|
||||
testName = append(testName, reg.ReplaceAllString(text, "_"))
|
||||
if len(test.LeafNodeText) > 0 {
|
||||
testName = append(testName, reg.ReplaceAllString(test.LeafNodeText, "_"))
|
||||
}
|
||||
}
|
||||
// We end the prefix with a slash to ensure that all logs
|
||||
// end up in a directory named after the current test.
|
||||
@ -74,7 +77,7 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
// keeps each directory name smaller (the full test
|
||||
// name at one point exceeded 256 characters, which was
|
||||
// too much for some filesystems).
|
||||
logDir := framework.TestContext.ReportDir + "/" + strings.Join(components, "/")
|
||||
logDir := framework.TestContext.ReportDir + "/" + strings.Join(testName, "/")
|
||||
to.LogPathPrefix = logDir + "/"
|
||||
|
||||
err := os.MkdirAll(logDir, 0755)
|
||||
|
@ -155,7 +155,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
|
||||
})
|
||||
// Log the running containers here to help debugging.
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
ginkgo.By("Dump all running containers")
|
||||
runtime, _, err := getCRIClient()
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -644,7 +644,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
|
||||
},
|
||||
})
|
||||
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
if framework.TestContext.DumpLogsOnFailure {
|
||||
logPodEvents(f)
|
||||
logNodeEvents(f)
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
@ -264,7 +264,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
||||
return nil
|
||||
}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())
|
||||
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
|
||||
if ginkgo.CurrentSpecReport().Failed() && framework.TestContext.DumpLogsOnFailure {
|
||||
logNodeEvents(f)
|
||||
logPodEvents(f)
|
||||
}
|
||||
|
@ -425,7 +425,7 @@ current-context: local-context
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
|
||||
if ginkgo.CurrentSpecReport().Failed() && framework.TestContext.DumpLogsOnFailure {
|
||||
ginkgo.By("Get node problem detector log")
|
||||
log, err := e2epod.GetPodLogs(c, ns, name, name)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
@ -115,7 +115,7 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() {
|
||||
var zero int64 = 0
|
||||
f.PodClient().DeleteSync(pod0, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute)
|
||||
f.PodClient().DeleteSync(pod1, metav1.DeleteOptions{GracePeriodSeconds: &zero}, 10*time.Minute)
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if !ginkgo.CurrentSpecReport().Failed() {
|
||||
return
|
||||
}
|
||||
if framework.TestContext.DumpLogsOnFailure {
|
||||
|
@ -43,7 +43,7 @@ var _ = SIGDescribe("Summary API [NodeConformance]", func() {
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
ginkgo.Context("when querying /stats/summary", func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
if !ginkgo.CurrentSpecReport().Failed() {
|
||||
return
|
||||
}
|
||||
if framework.TestContext.DumpLogsOnFailure {
|
||||
|
Loading…
Reference in New Issue
Block a user