mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-13 21:25:09 +00:00
e2e: accept context from Ginkgo
Every ginkgo callback should return immediately when a timeout occurs or the test run manually gets aborted with CTRL-C. To do that, they must take a ctx parameter and pass it through to all code which might block. This is a first automated step towards that: the additional parameter got added with sed -i 's/\(framework.ConformanceIt\|ginkgo.It\)\(.*\)func() {$/\1\2func(ctx context.Context) {/' \ $(git grep -l -e framework.ConformanceIt -e ginkgo.It ) $GOPATH/bin/goimports -w $(git status | grep modified: | sed -e 's/.* //') log_test.go was left unchanged.
This commit is contained in:
@@ -56,7 +56,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
f := framework.NewDefaultFramework("stackdriver-monitoring")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func() {
|
||||
ginkgo.It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func(ctx context.Context) {
|
||||
testStackdriverAcceleratorMonitoring(f)
|
||||
})
|
||||
|
||||
|
@@ -56,7 +56,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
f := framework.NewDefaultFramework("stackdriver-monitoring")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.It("should run Custom Metrics - Stackdriver Adapter for old resource model [Feature:StackdriverCustomMetrics]", func() {
|
||||
ginkgo.It("should run Custom Metrics - Stackdriver Adapter for old resource model [Feature:StackdriverCustomMetrics]", func(ctx context.Context) {
|
||||
kubeClient := f.ClientSet
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
@@ -71,7 +71,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel)
|
||||
})
|
||||
|
||||
ginkgo.It("should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]", func() {
|
||||
ginkgo.It("should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]", func(ctx context.Context) {
|
||||
kubeClient := f.ClientSet
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
@@ -86,7 +86,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel)
|
||||
})
|
||||
|
||||
ginkgo.It("should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]", func() {
|
||||
ginkgo.It("should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]", func(ctx context.Context) {
|
||||
kubeClient := f.ClientSet
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
|
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
@@ -51,7 +52,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
}, 5*time.Minute, 10*time.Second).Should(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("should grab all metrics from API server.", func() {
|
||||
ginkgo.It("should grab all metrics from API server.", func(ctx context.Context) {
|
||||
ginkgo.By("Connecting to /metrics endpoint")
|
||||
response, err := grabber.GrabFromAPIServer()
|
||||
if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) {
|
||||
@@ -61,7 +62,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
gomega.Expect(response).NotTo(gomega.BeEmpty())
|
||||
})
|
||||
|
||||
ginkgo.It("should grab all metrics from a Kubelet.", func() {
|
||||
ginkgo.It("should grab all metrics from a Kubelet.", func(ctx context.Context) {
|
||||
ginkgo.By("Proxying to Node through the API server")
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) {
|
||||
@@ -73,7 +74,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
gomega.Expect(response).NotTo(gomega.BeEmpty())
|
||||
})
|
||||
|
||||
ginkgo.It("should grab all metrics from a Scheduler.", func() {
|
||||
ginkgo.It("should grab all metrics from a Scheduler.", func(ctx context.Context) {
|
||||
ginkgo.By("Proxying to Pod through the API server")
|
||||
response, err := grabber.GrabFromScheduler()
|
||||
if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) {
|
||||
@@ -83,7 +84,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
gomega.Expect(response).NotTo(gomega.BeEmpty())
|
||||
})
|
||||
|
||||
ginkgo.It("should grab all metrics from a ControllerManager.", func() {
|
||||
ginkgo.It("should grab all metrics from a ControllerManager.", func(ctx context.Context) {
|
||||
ginkgo.By("Proxying to Pod through the API server")
|
||||
response, err := grabber.GrabFromControllerManager()
|
||||
if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) {
|
||||
|
@@ -68,7 +68,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
f := framework.NewDefaultFramework("stackdriver-monitoring")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.It("should have cluster metrics [Feature:StackdriverMonitoring]", func() {
|
||||
ginkgo.It("should have cluster metrics [Feature:StackdriverMonitoring]", func(ctx context.Context) {
|
||||
testStackdriverMonitoring(f, 1, 100, 200)
|
||||
})
|
||||
|
||||
|
@@ -54,7 +54,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
var kubeClient clientset.Interface
|
||||
|
||||
ginkgo.It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func() {
|
||||
ginkgo.It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func(ctx context.Context) {
|
||||
kubeClient = f.ClientSet
|
||||
testAgent(f, kubeClient)
|
||||
})
|
||||
|
Reference in New Issue
Block a user