mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-01 09:18:45 +00:00
e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -57,15 +57,14 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func(ctx context.Context) {
|
||||
testStackdriverAcceleratorMonitoring(f)
|
||||
testStackdriverAcceleratorMonitoring(ctx, f)
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
|
||||
func testStackdriverAcceleratorMonitoring(ctx context.Context, f *framework.Framework) {
|
||||
projectID := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -80,9 +79,9 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
|
||||
gcmService.BasePath = basePathOverride
|
||||
}
|
||||
|
||||
scheduling.SetupNVIDIAGPUNode(f, false)
|
||||
scheduling.SetupNVIDIAGPUNode(ctx, f, false)
|
||||
|
||||
e2epod.NewPodClient(f).Create(&v1.Pod{
|
||||
e2epod.NewPodClient(f).Create(ctx, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rcName,
|
||||
},
|
||||
|
@@ -68,7 +68,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
restMapper.Reset()
|
||||
apiVersionsGetter := customclient.NewAvailableAPIsGetter(discoveryClient)
|
||||
customMetricsClient := customclient.NewForConfig(config, restMapper, apiVersionsGetter)
|
||||
testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel)
|
||||
testCustomMetrics(ctx, f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel)
|
||||
})
|
||||
|
||||
ginkgo.It("should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]", func(ctx context.Context) {
|
||||
@@ -83,7 +83,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
restMapper.Reset()
|
||||
apiVersionsGetter := customclient.NewAvailableAPIsGetter(discoveryClient)
|
||||
customMetricsClient := customclient.NewForConfig(config, restMapper, apiVersionsGetter)
|
||||
testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel)
|
||||
testCustomMetrics(ctx, f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel)
|
||||
})
|
||||
|
||||
ginkgo.It("should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]", func(ctx context.Context) {
|
||||
@@ -93,14 +93,13 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
framework.Failf("Failed to load config: %s", err)
|
||||
}
|
||||
externalMetricsClient := externalclient.NewForConfigOrDie(config)
|
||||
testExternalMetrics(f, kubeClient, externalMetricsClient)
|
||||
testExternalMetrics(ctx, f, kubeClient, externalMetricsClient)
|
||||
})
|
||||
})
|
||||
|
||||
func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient, adapterDeployment string) {
|
||||
func testCustomMetrics(ctx context.Context, f *framework.Framework, kubeClient clientset.Interface, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient, adapterDeployment string) {
|
||||
projectID := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -122,14 +121,14 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
|
||||
}
|
||||
ginkgo.DeferCleanup(CleanupAdapter, adapterDeployment)
|
||||
|
||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{})
|
||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, HPAPermissions, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create ClusterRoleBindings: %v", err)
|
||||
}
|
||||
ginkgo.DeferCleanup(kubeClient.RbacV1().ClusterRoleBindings().Delete, HPAPermissions.Name, metav1.DeleteOptions{})
|
||||
|
||||
// Run application that exports the metric
|
||||
_, err = createSDExporterPods(f, kubeClient)
|
||||
_, err = createSDExporterPods(ctx, f, kubeClient)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create stackdriver-exporter pod: %s", err)
|
||||
}
|
||||
@@ -144,10 +143,9 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
|
||||
}
|
||||
|
||||
// TODO(kawych): migrate this test to new resource model
|
||||
func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, externalMetricsClient externalclient.ExternalMetricsClient) {
|
||||
func testExternalMetrics(ctx context.Context, f *framework.Framework, kubeClient clientset.Interface, externalMetricsClient externalclient.ExternalMetricsClient) {
|
||||
projectID := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -170,14 +168,14 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
|
||||
}
|
||||
ginkgo.DeferCleanup(CleanupAdapter, AdapterForOldResourceModel)
|
||||
|
||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions, metav1.CreateOptions{})
|
||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, HPAPermissions, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create ClusterRoleBindings: %v", err)
|
||||
}
|
||||
ginkgo.DeferCleanup(kubeClient.RbacV1().ClusterRoleBindings().Delete, HPAPermissions.Name, metav1.DeleteOptions{})
|
||||
|
||||
// Run application that exports the metric
|
||||
pod, err := createSDExporterPods(f, kubeClient)
|
||||
pod, err := createSDExporterPods(ctx, f, kubeClient)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create stackdriver-exporter pod: %s", err)
|
||||
}
|
||||
@@ -259,22 +257,22 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric
|
||||
}
|
||||
}
|
||||
|
||||
func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) {
|
||||
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod1, metav1.DeleteOptions{})
|
||||
func cleanupSDExporterPod(ctx context.Context, f *framework.Framework, cs clientset.Interface) {
|
||||
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(ctx, stackdriverExporterPod1, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err)
|
||||
}
|
||||
err = cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod2, metav1.DeleteOptions{})
|
||||
err = cs.CoreV1().Pods(f.Namespace.Name).Delete(ctx, stackdriverExporterPod2, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createSDExporterPods(f *framework.Framework, cs clientset.Interface) (*v1.Pod, error) {
|
||||
pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue), metav1.CreateOptions{})
|
||||
func createSDExporterPods(ctx context.Context, f *framework.Framework, cs clientset.Interface) (*v1.Pod, error) {
|
||||
pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(ctx, StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue), metav1.CreateOptions{})
|
||||
_, err = cs.CoreV1().Pods(f.Namespace.Name).Create(ctx, StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue), metav1.CreateOptions{})
|
||||
return pod, err
|
||||
}
|
||||
|
@@ -39,12 +39,12 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
var c, ec clientset.Interface
|
||||
var grabber *e2emetrics.Grabber
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
var err error
|
||||
c = f.ClientSet
|
||||
ec = f.KubemarkExternalClusterClientSet
|
||||
gomega.Eventually(func() error {
|
||||
grabber, err = e2emetrics.NewMetricsGrabber(c, ec, f.ClientConfig(), true, true, true, true, true, true)
|
||||
gomega.Eventually(ctx, func() error {
|
||||
grabber, err = e2emetrics.NewMetricsGrabber(ctx, c, ec, f.ClientConfig(), true, true, true, true, true, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create metrics grabber: %v", err)
|
||||
}
|
||||
@@ -54,7 +54,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
|
||||
ginkgo.It("should grab all metrics from API server.", func(ctx context.Context) {
|
||||
ginkgo.By("Connecting to /metrics endpoint")
|
||||
response, err := grabber.GrabFromAPIServer()
|
||||
response, err := grabber.GrabFromAPIServer(ctx)
|
||||
if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) {
|
||||
e2eskipper.Skipf("%v", err)
|
||||
}
|
||||
@@ -64,19 +64,19 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
|
||||
ginkgo.It("should grab all metrics from a Kubelet.", func(ctx context.Context) {
|
||||
ginkgo.By("Proxying to Node through the API server")
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||
if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) {
|
||||
e2eskipper.Skipf("%v", err)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
response, err := grabber.GrabFromKubelet(node.Name)
|
||||
response, err := grabber.GrabFromKubelet(ctx, node.Name)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(response).NotTo(gomega.BeEmpty())
|
||||
})
|
||||
|
||||
ginkgo.It("should grab all metrics from a Scheduler.", func(ctx context.Context) {
|
||||
ginkgo.By("Proxying to Pod through the API server")
|
||||
response, err := grabber.GrabFromScheduler()
|
||||
response, err := grabber.GrabFromScheduler(ctx)
|
||||
if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) {
|
||||
e2eskipper.Skipf("%v", err)
|
||||
}
|
||||
@@ -86,7 +86,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
|
||||
ginkgo.It("should grab all metrics from a ControllerManager.", func(ctx context.Context) {
|
||||
ginkgo.By("Proxying to Pod through the API server")
|
||||
response, err := grabber.GrabFromControllerManager()
|
||||
response, err := grabber.GrabFromControllerManager(ctx)
|
||||
if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) {
|
||||
e2eskipper.Skipf("%v", err)
|
||||
}
|
||||
|
@@ -69,15 +69,14 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.It("should have cluster metrics [Feature:StackdriverMonitoring]", func(ctx context.Context) {
|
||||
testStackdriverMonitoring(f, 1, 100, 200)
|
||||
testStackdriverMonitoring(ctx, f, 1, 100, 200)
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, perPodCPU int64) {
|
||||
func testStackdriverMonitoring(ctx context.Context, f *framework.Framework, pods, allPodsCPU int, perPodCPU int64) {
|
||||
projectID := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -105,10 +104,10 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
rc := e2eautoscaling.NewDynamicResourceConsumer(rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
|
||||
rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
|
||||
ginkgo.DeferCleanup(rc.CleanUp)
|
||||
|
||||
rc.WaitForReplicas(pods, 15*time.Minute)
|
||||
rc.WaitForReplicas(ctx, pods, 15*time.Minute)
|
||||
|
||||
metricsMap := map[string]bool{}
|
||||
pollingFunction := checkForMetrics(projectID, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU)
|
||||
|
@@ -56,11 +56,11 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
|
||||
|
||||
ginkgo.It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func(ctx context.Context) {
|
||||
kubeClient = f.ClientSet
|
||||
testAgent(f, kubeClient)
|
||||
testAgent(ctx, f, kubeClient)
|
||||
})
|
||||
})
|
||||
|
||||
func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
|
||||
func testAgent(ctx context.Context, f *framework.Framework, kubeClient clientset.Interface) {
|
||||
projectID := framework.TestContext.CloudConfig.ProjectID
|
||||
resourceType := "k8s_container"
|
||||
uniqueContainerName := fmt.Sprintf("test-container-%v", time.Now().Unix())
|
||||
@@ -70,13 +70,13 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
|
||||
resourceType,
|
||||
uniqueContainerName)
|
||||
|
||||
oauthClient, err := google.DefaultClient(context.Background(), MonitoringScope)
|
||||
oauthClient, err := google.DefaultClient(ctx, MonitoringScope)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create oauth client: %s", err)
|
||||
}
|
||||
|
||||
// Create test pod with unique name.
|
||||
_ = e2epod.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) {
|
||||
_ = e2epod.CreateExecPodOrFail(ctx, kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Name = uniqueContainerName
|
||||
})
|
||||
ginkgo.DeferCleanup(kubeClient.CoreV1().Pods(f.Namespace.Name).Delete, uniqueContainerName, metav1.DeleteOptions{})
|
||||
|
Reference in New Issue
Block a user