diff --git a/hack/conformance/check_conformance_test_requirements.go b/hack/conformance/check_conformance_test_requirements.go index d233a21e578..8f53d03c730 100644 --- a/hack/conformance/check_conformance_test_requirements.go +++ b/hack/conformance/check_conformance_test_requirements.go @@ -30,7 +30,7 @@ import ( ) const ( - //e.g. framework.ConformanceIt("should provide secure master service ", func() { + //e.g. framework.ConformanceIt("should provide secure master service ", func(ctx context.Context) { patternStartConformance = `framework.ConformanceIt\(.*, func\(\) {$` patternEndConformance = `}\)$` patternSkip = `e2eskipper.Skip.*\(` diff --git a/test/conformance/cf_header.md b/test/conformance/cf_header.md index 66b4f3acd26..9ce60887b92 100644 --- a/test/conformance/cf_header.md +++ b/test/conformance/cf_header.md @@ -19,7 +19,7 @@ Example: Testname: Kubelet, log output, default Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs. */ -framework.ConformanceIt("should print the output to logs [NodeConformance]", func() { +framework.ConformanceIt("should print the output to logs [NodeConformance]", func(ctx context.Context) { ``` would generate the following documentation for the test. Note that the "TestName" from the Documentation above will diff --git a/test/e2e/README.md b/test/e2e/README.md index 3ff2135ea67..bef1f2cbff2 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -63,7 +63,7 @@ import ( ) var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { /* ... */ - ginkgo.It("should sign the new added bootstrap tokens", func() { + ginkgo.It("should sign the new added bootstrap tokens", func(ctx context.Context) { /* ... */ }) /* etc */ diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index 4b95a0c035d..ed13e37bdf6 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -83,7 +83,7 @@ var _ = SIGDescribe("Aggregator", func() { Description: Ensure that the sample-apiserver code from 1.17 and compiled against 1.17 will work on the current Aggregator/API-Server. */ - framework.ConformanceIt("Should be able to support the 1.17 Sample API Server using the current Aggregator", func() { + framework.ConformanceIt("Should be able to support the 1.17 Sample API Server using the current Aggregator", func(ctx context.Context) { // Testing a 1.17 version of the sample-apiserver TestSampleAPIServer(f, aggrclient, imageutils.GetE2EImage(imageutils.APIServer)) }) diff --git a/test/e2e/apimachinery/apiserver_identity.go b/test/e2e/apimachinery/apiserver_identity.go index 2a54ad9e793..826a287fd93 100644 --- a/test/e2e/apimachinery/apiserver_identity.go +++ b/test/e2e/apimachinery/apiserver_identity.go @@ -81,7 +81,7 @@ var _ = SIGDescribe("kube-apiserver identity [Feature:APIServerIdentity]", func( f := framework.NewDefaultFramework("kube-apiserver-identity") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("kube-apiserver identity should persist after restart [Disruptive]", func() { + ginkgo.It("kube-apiserver identity should persist after restart [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce") client := f.ClientSet diff --git a/test/e2e/apimachinery/apply.go b/test/e2e/apimachinery/apply.go index e5579dac2ff..f7ecc35fe65 100644 --- a/test/e2e/apimachinery/apply.go +++ b/test/e2e/apimachinery/apply.go @@ -68,7 +68,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Testname: Server Side Apply, Create Description: Apply an object. An apply on an object that does not exist MUST create the object. */ - ginkgo.It("should create an applied object if it does not already exist", func() { + ginkgo.It("should create an applied object if it does not already exist", func(ctx context.Context) { testCases := []struct { resource string name string @@ -162,7 +162,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Testname: Server Side Apply, Subresource Description: Apply a resource and issue a subsequent apply on a subresource. The subresource MUST be updated with the applied object contents. */ - ginkgo.It("should work for subresources", func() { + ginkgo.It("should work for subresources", func(ctx context.Context) { { testCases := []struct { resource string @@ -270,7 +270,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Testname: Server Side Apply, unset field Description: Apply an object. Issue a subsequent apply that removes a field. The particular field MUST be removed. */ - ginkgo.It("should remove a field if it is owned but removed in the apply request", func() { + ginkgo.It("should remove a field if it is owned but removed in the apply request", func(ctx context.Context) { obj := []byte(`{ "apiVersion": "apps/v1", "kind": "Deployment", @@ -373,7 +373,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Testname: Server Side Apply, unset field shared Description: Apply an object. Unset ownership of a field that is also owned by other managers and make a subsequent apply request. The unset field MUST not be removed from the object. */ - ginkgo.It("should not remove a field if an owner unsets the field but other managers still have ownership of the field", func() { + ginkgo.It("should not remove a field if an owner unsets the field but other managers still have ownership of the field", func(ctx context.Context) { // spec.replicas is a optional, defaulted field // spec.template.spec.hostname is an optional, non-defaulted field apply := []byte(`{ @@ -482,7 +482,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Testname: Server Side Apply, Force Apply Description: Apply an object. Force apply a modified version of the object such that a conflict will exist in the managed fields. The force apply MUST successfully update the object. */ - ginkgo.It("should ignore conflict errors if force apply is used", func() { + ginkgo.It("should ignore conflict errors if force apply is used", func(ctx context.Context) { obj := []byte(`{ "apiVersion": "apps/v1", "kind": "Deployment", @@ -569,7 +569,7 @@ var _ = SIGDescribe("ServerSideApply", func() { Testname: Server Side Apply, CRD Description: Create a CRD and apply a CRD resource. Subsequent apply requests that do not conflict with the previous ones should update the object. Apply requests that cause conflicts should fail. */ - ginkgo.It("should work for CRDs", func() { + ginkgo.It("should work for CRDs", func(ctx context.Context) { config, err := framework.LoadConfig() if err != nil { framework.Failf("%s", err) @@ -967,7 +967,7 @@ spec: Testname: Server Side Apply, Update take ownership Description: Apply an object. Send an Update request which should take ownership of a field. The field should be owned by the new manager and a subsequent apply from the original manager MUST not change the field it does not have ownership of. */ - ginkgo.It("should give up ownership of a field if forced applied by a controller", func() { + ginkgo.It("should give up ownership of a field if forced applied by a controller", func(ctx context.Context) { // Applier creates a deployment with replicas set to 3 apply := []byte(`{ "apiVersion": "apps/v1", diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index 8584ecb83e9..bb0ea369ad5 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -76,7 +76,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { }) }) - ginkgo.It("should return chunks of results for list calls", func() { + ginkgo.It("should return chunks of results for list calls", func(ctx context.Context) { ns := f.Namespace.Name c := f.ClientSet client := c.CoreV1().PodTemplates(ns) @@ -123,7 +123,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources)) }) - ginkgo.It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow]", func() { + ginkgo.It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow]", func(ctx context.Context) { ns := f.Namespace.Name c := f.ClientSet client := c.CoreV1().PodTemplates(ns) diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index 95c54679222..18a5329c981 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -146,7 +146,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", Description: Register a conversion webhook and a custom resource definition. Create a v1 custom resource. Attempts to read it at v2 MUST succeed. */ - framework.ConformanceIt("should be able to convert from CR v1 to CR v2", func() { + framework.ConformanceIt("should be able to convert from CR v1 to CR v2", func(ctx context.Context) { testcrd, err := crd.CreateMultiVersionTestCRD(f, "stable.example.com", func(crd *apiextensionsv1.CustomResourceDefinition) { crd.Spec.Versions = apiVersions crd.Spec.Conversion = &apiextensionsv1.CustomResourceConversion{ @@ -181,7 +181,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", v1. Change the custom resource definition storage to v2. Create a custom resource stored at v2. Attempt to list the custom resources at v2; the list result MUST contain both custom resources at v2. */ - framework.ConformanceIt("should be able to convert a non homogeneous list of CRs", func() { + framework.ConformanceIt("should be able to convert a non homogeneous list of CRs", func(ctx context.Context) { testcrd, err := crd.CreateMultiVersionTestCRD(f, "stable.example.com", func(crd *apiextensionsv1.CustomResourceDefinition) { crd.Spec.Versions = apiVersions crd.Spec.Conversion = &apiextensionsv1.CustomResourceConversion{ diff --git a/test/e2e/apimachinery/crd_publish_openapi.go b/test/e2e/apimachinery/crd_publish_openapi.go index e5b89c61b86..db4230081f3 100644 --- a/test/e2e/apimachinery/crd_publish_openapi.go +++ b/test/e2e/apimachinery/crd_publish_openapi.go @@ -66,7 +66,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu explain the nested custom resource properties. All validation should be the same. */ - framework.ConformanceIt("works for CRD with validation schema", func() { + framework.ConformanceIt("works for CRD with validation schema", func(ctx context.Context) { crd, err := setupCRD(f, schemaFoo, "foo", "v1") if err != nil { framework.Failf("%v", err) @@ -150,7 +150,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu Attempt to create and apply a change a custom resource, via kubectl; kubectl validation MUST accept unknown properties. Attempt kubectl explain; the output MUST contain a valid DESCRIPTION stanza. */ - framework.ConformanceIt("works for CRD without validation schema", func() { + framework.ConformanceIt("works for CRD without validation schema", func(ctx context.Context) { crd, err := setupCRD(f, nil, "empty", "v1") if err != nil { framework.Failf("%v", err) @@ -191,7 +191,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu Attempt to create and apply a change a custom resource, via kubectl; kubectl validation MUST accept unknown properties. Attempt kubectl explain; the output MUST show the custom resource KIND. */ - framework.ConformanceIt("works for CRD preserving unknown fields at the schema root", func() { + framework.ConformanceIt("works for CRD preserving unknown fields at the schema root", func(ctx context.Context) { crd, err := setupCRDAndVerifySchema(f, schemaPreserveRoot, nil, "unknown-at-root", "v1") if err != nil { framework.Failf("%v", err) @@ -233,7 +233,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu properties. Attempt kubectl explain; the output MUST show that x-preserve-unknown-properties is used on the nested field. */ - framework.ConformanceIt("works for CRD preserving unknown fields in an embedded object", func() { + framework.ConformanceIt("works for CRD preserving unknown fields in an embedded object", func(ctx context.Context) { crd, err := setupCRDAndVerifySchema(f, schemaPreserveNested, nil, "unknown-in-nested", "v1") if err != nil { framework.Failf("%v", err) @@ -273,7 +273,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu Description: Register multiple custom resource definitions spanning different groups and versions; OpenAPI definitions MUST be published for custom resource definitions. */ - framework.ConformanceIt("works for multiple CRDs of different groups", func() { + framework.ConformanceIt("works for multiple CRDs of different groups", func(ctx context.Context) { ginkgo.By("CRs in different groups (two CRDs) show up in OpenAPI documentation") crdFoo, err := setupCRD(f, schemaFoo, "foo", "v1") if err != nil { @@ -306,7 +306,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu Description: Register a custom resource definition with multiple versions; OpenAPI definitions MUST be published for custom resource definitions. */ - framework.ConformanceIt("works for multiple CRDs of same group but different versions", func() { + framework.ConformanceIt("works for multiple CRDs of same group but different versions", func(ctx context.Context) { ginkgo.By("CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation") crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3") if err != nil { @@ -354,7 +354,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu Description: Register multiple custom resource definitions in the same group and version but spanning different kinds; OpenAPI definitions MUST be published for custom resource definitions. */ - framework.ConformanceIt("works for multiple CRDs of same group and version but different kinds", func() { + framework.ConformanceIt("works for multiple CRDs of same group and version but different kinds", func(ctx context.Context) { ginkgo.By("CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation") crdFoo, err := setupCRD(f, schemaFoo, "common-group", "v6") if err != nil { @@ -388,7 +388,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu for custom resource definitions. Rename one of the versions of the custom resource definition via a patch; OpenAPI definitions MUST update to reflect the rename. */ - framework.ConformanceIt("updates the published spec when one version gets renamed", func() { + framework.ConformanceIt("updates the published spec when one version gets renamed", func(ctx context.Context) { ginkgo.By("set up a multi version CRD") crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3") if err != nil { @@ -439,7 +439,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu for custom resource definitions. Update the custom resource definition to not serve one of the versions. OpenAPI definitions MUST be updated to not contain the version that is no longer served. */ - framework.ConformanceIt("removes definition from spec when one version gets changed to not be served", func() { + framework.ConformanceIt("removes definition from spec when one version gets changed to not be served", func(ctx context.Context) { ginkgo.By("set up a multi version CRD") crd, err := setupCRD(f, schemaFoo, "multi-to-single-ver", "v5", "v6alpha1") if err != nil { @@ -479,7 +479,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu }) // Marked as flaky until https://github.com/kubernetes/kubernetes/issues/65517 is solved. - ginkgo.It("[Flaky] kubectl explain works for CR with the same resource name as built-in object.", func() { + ginkgo.It("[Flaky] kubectl explain works for CR with the same resource name as built-in object.", func(ctx context.Context) { customServiceShortName := fmt.Sprintf("ksvc-%d", time.Now().Unix()) // make short name unique opt := func(crd *apiextensionsv1.CustomResourceDefinition) { crd.ObjectMeta = metav1.ObjectMeta{Name: "services." + crd.Spec.Group} diff --git a/test/e2e/apimachinery/crd_validation_rules.go b/test/e2e/apimachinery/crd_validation_rules.go index 8502821dc67..dbd8278449d 100644 --- a/test/e2e/apimachinery/crd_validation_rules.go +++ b/test/e2e/apimachinery/crd_validation_rules.go @@ -94,7 +94,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f } } }`)) - ginkgo.It("MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validations rules", func() { + ginkgo.It("MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validations rules", func(ctx context.Context) { ginkgo.By("Creating a custom resource definition with validation rules") crd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false) crd, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient) @@ -124,7 +124,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f }}, metav1.CreateOptions{}) framework.ExpectNoError(err, "validation rules satisfied") }) - ginkgo.It("MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validations rules", func() { + ginkgo.It("MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validations rules", func(ctx context.Context) { ginkgo.By("Creating a custom resource definition with validation rules") crd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false) crd, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient) @@ -156,7 +156,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f } }) - ginkgo.It("MUST fail create of a custom resource definition that contains a x-kubernetes-validations rule that refers to a property that do not exist", func() { + ginkgo.It("MUST fail create of a custom resource definition that contains a x-kubernetes-validations rule that refers to a property that do not exist", func(ctx context.Context) { ginkgo.By("Defining a custom resource definition with a validation rule that refers to a property that do not exist") var schemaWithInvalidValidationRule = unmarshallSchema([]byte(`{ "type":"object", @@ -181,7 +181,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f } }) - ginkgo.It("MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that contains a syntax error", func() { + ginkgo.It("MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that contains a syntax error", func(ctx context.Context) { ginkgo.By("Defining a custom resource definition that contains a validation rule with a syntax error") var schemaWithSyntaxErrorRule = unmarshallSchema([]byte(`{ "type":"object", @@ -203,7 +203,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f } }) - ginkgo.It("MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that exceeds the estimated cost limit", func() { + ginkgo.It("MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that exceeds the estimated cost limit", func(ctx context.Context) { ginkgo.By("Defining a custom resource definition that contains a validation rule that exceeds the cost limit") var schemaWithExpensiveRule = unmarshallSchema([]byte(`{ "type":"object", @@ -236,7 +236,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f } }) - ginkgo.It("MUST fail create of a custom resource that exceeds the runtime cost limit for x-kubernetes-validations rule execution", func() { + ginkgo.It("MUST fail create of a custom resource that exceeds the runtime cost limit for x-kubernetes-validations rule execution", func(ctx context.Context) { ginkgo.By("Defining a custom resource definition including an expensive rule on a large amount of data") crd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false) _, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient) @@ -266,7 +266,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f } }) - ginkgo.It("MUST fail update of a custom resource that does not satisfy a x-kubernetes-validations transition rule", func() { + ginkgo.It("MUST fail update of a custom resource that does not satisfy a x-kubernetes-validations transition rule", func(ctx context.Context) { ginkgo.By("Defining a custom resource definition with a x-kubernetes-validations transition rule") var schemaWithTransitionRule = unmarshallSchema([]byte(`{ "type":"object", diff --git a/test/e2e/apimachinery/crd_watch.go b/test/e2e/apimachinery/crd_watch.go index a6df111ad0f..4a9117b1c59 100644 --- a/test/e2e/apimachinery/crd_watch.go +++ b/test/e2e/apimachinery/crd_watch.go @@ -48,7 +48,7 @@ var _ = SIGDescribe("CustomResourceDefinition Watch [Privileged:ClusterAdmin]", Description: Create a Custom Resource Definition. Attempt to watch it; the watch MUST observe create, modify and delete events. */ - framework.ConformanceIt("watch on custom resource definition objects", func() { + framework.ConformanceIt("watch on custom resource definition objects", func(ctx context.Context) { const ( watchCRNameA = "name1" diff --git a/test/e2e/apimachinery/custom_resource_definition.go b/test/e2e/apimachinery/custom_resource_definition.go index ee30262ea4e..dcc5934c5f5 100644 --- a/test/e2e/apimachinery/custom_resource_definition.go +++ b/test/e2e/apimachinery/custom_resource_definition.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin Create the custom resource definition and then delete it. The creation and deletion MUST be successful. */ - framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() { + framework.ConformanceIt("creating/deleting custom resource definition objects works ", func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "loading config") @@ -82,7 +82,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin custom resource definitions via delete collection; the delete MUST be successful and MUST delete only the labeled custom resource definitions. */ - framework.ConformanceIt("listing custom resource definition objects works ", func() { + framework.ConformanceIt("listing custom resource definition objects works ", func(ctx context.Context) { testListSize := 10 config, err := framework.LoadConfig() framework.ExpectNoError(err, "loading config") @@ -142,7 +142,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin Description: Create a custom resource definition. Attempt to read, update and patch its status sub-resource; all mutating sub-resource operations MUST be visible to subsequent reads. */ - framework.ConformanceIt("getting/updating/patching custom resource definition status sub-resource works ", func() { + framework.ConformanceIt("getting/updating/patching custom resource definition status sub-resource works ", func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "loading config") apiExtensionClient, err := clientset.NewForConfig(config) @@ -195,7 +195,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin Description: Fetch /apis, /apis/apiextensions.k8s.io, and /apis/apiextensions.k8s.io/v1 discovery documents, and ensure they indicate CustomResourceDefinition apiextensions.k8s.io/v1 resources are available. */ - framework.ConformanceIt("should include custom resource definition resources in discovery documents", func() { + framework.ConformanceIt("should include custom resource definition resources in discovery documents", func(ctx context.Context) { { ginkgo.By("fetching the /apis discovery document") apiGroupList := &metav1.APIGroupList{} @@ -266,7 +266,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin the default is applied. Create another CR. Remove default, add default for another field and read CR until new field is defaulted, but old default stays. */ - framework.ConformanceIt("custom resource defaulting for requests and from storage works ", func() { + framework.ConformanceIt("custom resource defaulting for requests and from storage works ", func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "loading config") apiExtensionClient, err := clientset.NewForConfig(config) diff --git a/test/e2e/apimachinery/discovery.go b/test/e2e/apimachinery/discovery.go index 414666dccf9..4b70607179b 100644 --- a/test/e2e/apimachinery/discovery.go +++ b/test/e2e/apimachinery/discovery.go @@ -49,7 +49,7 @@ var _ = SIGDescribe("Discovery", func() { setupServerCert(namespaceName, serviceName) }) - ginkgo.It("should accurately determine present and missing resources", func() { + ginkgo.It("should accurately determine present and missing resources", func(ctx context.Context) { // checks that legacy api group resources function ok, err := clientdiscovery.IsResourceEnabled(f.ClientSet.Discovery(), schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}) framework.ExpectNoError(err) @@ -76,7 +76,7 @@ var _ = SIGDescribe("Discovery", func() { } }) - ginkgo.It("Custom resource should have storage version hash", func() { + ginkgo.It("Custom resource should have storage version hash", func(ctx context.Context) { testcrd, err := crd.CreateTestCRD(f) if err != nil { return @@ -119,7 +119,7 @@ var _ = SIGDescribe("Discovery", func() { Description: Ensure that a list of apis is retrieved. Each api group found MUST return a valid PreferredVersion unless the group suffix is example.com. */ - framework.ConformanceIt("should validate PreferredVersion for each APIGroup", func() { + framework.ConformanceIt("should validate PreferredVersion for each APIGroup", func(ctx context.Context) { // get list of APIGroup endpoints list := &metav1.APIGroupList{} diff --git a/test/e2e/apimachinery/etcd_failure.go b/test/e2e/apimachinery/etcd_failure.go index bed9dd52d59..f587b1fe01c 100644 --- a/test/e2e/apimachinery/etcd_failure.go +++ b/test/e2e/apimachinery/etcd_failure.go @@ -60,7 +60,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should recover from network partition with master", func() { + ginkgo.It("should recover from network partition with master", func(ctx context.Context) { etcdFailTest( f, "sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP", @@ -68,7 +68,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() { ) }) - ginkgo.It("should recover from SIGKILL", func() { + ginkgo.It("should recover from SIGKILL", func(ctx context.Context) { etcdFailTest( f, "pgrep etcd | xargs -I {} sudo kill -9 {}", diff --git a/test/e2e/apimachinery/flowcontrol.go b/test/e2e/apimachinery/flowcontrol.go index 7ef5ecb8c96..b50f62179d5 100644 --- a/test/e2e/apimachinery/flowcontrol.go +++ b/test/e2e/apimachinery/flowcontrol.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("API priority and fairness", func() { f := framework.NewDefaultFramework("apf") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration", func() { + ginkgo.It("should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration", func(ctx context.Context) { testingFlowSchemaName := "e2e-testing-flowschema" testingPriorityLevelName := "e2e-testing-prioritylevel" matchingUsername := "noxu" @@ -97,7 +97,7 @@ var _ = SIGDescribe("API priority and fairness", func() { // clients making requests at different rates, we test to make sure that the // higher QPS client cannot drown out the other one despite having higher // priority. - ginkgo.It("should ensure that requests can't be drowned out (priority)", func() { + ginkgo.It("should ensure that requests can't be drowned out (priority)", func(ctx context.Context) { // See https://github.com/kubernetes/kubernetes/issues/96710 ginkgo.Skip("skipping test until flakiness is resolved") @@ -184,7 +184,7 @@ var _ = SIGDescribe("API priority and fairness", func() { // and priority level. We expect APF's "ByUser" flow distinguisher to isolate // the two clients and not allow one client to drown out the other despite // having a higher QPS. - ginkgo.It("should ensure that requests can't be drowned out (fairness)", func() { + ginkgo.It("should ensure that requests can't be drowned out (fairness)", func(ctx context.Context) { // See https://github.com/kubernetes/kubernetes/issues/96710 ginkgo.Skip("skipping test until flakiness is resolved") diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index a6970ea5303..3b917e0cbfd 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -309,7 +309,7 @@ var _ = SIGDescribe("Garbage collector", func() { Testname: Garbage Collector, delete replication controller, propagation policy background Description: Create a replication controller with 2 Pods. Once RC is created and the first Pod is created, delete RC with deleteOptions.PropagationPolicy set to Background. Deleting the Replication Controller MUST cause pods created by that RC to be deleted. */ - framework.ConformanceIt("should delete pods created by rc when not orphaning", func() { + framework.ConformanceIt("should delete pods created by rc when not orphaning", func(ctx context.Context) { clientSet := f.ClientSet rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name) @@ -367,7 +367,7 @@ var _ = SIGDescribe("Garbage collector", func() { Testname: Garbage Collector, delete replication controller, propagation policy orphan Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller MUST cause pods created by that RC to be orphaned. */ - framework.ConformanceIt("should orphan pods created by rc if delete options say so", func() { + framework.ConformanceIt("should orphan pods created by rc if delete options say so", func(ctx context.Context) { clientSet := f.ClientSet rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name) @@ -436,7 +436,7 @@ var _ = SIGDescribe("Garbage collector", func() { // deleteOptions.OrphanDependents is deprecated in 1.7 and preferred to use the PropagationPolicy. // Discussion is tracked under https://github.com/kubernetes/kubernetes/issues/65427 to promote for conformance in future. - ginkgo.It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() { + ginkgo.It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func(ctx context.Context) { clientSet := f.ClientSet rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name) @@ -488,7 +488,7 @@ var _ = SIGDescribe("Garbage collector", func() { Testname: Garbage Collector, delete deployment, propagation policy background Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Background. Deleting the deployment MUST delete the replicaset created by the deployment and also the Pods that belong to the deployments MUST be deleted. */ - framework.ConformanceIt("should delete RS created by deployment when not orphaning", func() { + framework.ConformanceIt("should delete RS created by deployment when not orphaning", func(ctx context.Context) { clientSet := f.ClientSet deployClient := clientSet.AppsV1().Deployments(f.Namespace.Name) rsClient := clientSet.AppsV1().ReplicaSets(f.Namespace.Name) @@ -547,7 +547,7 @@ var _ = SIGDescribe("Garbage collector", func() { Testname: Garbage Collector, delete deployment, propagation policy orphan Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Orphan. Deleting the deployment MUST cause the replicaset created by the deployment to be orphaned, also the Pods created by the deployments MUST be orphaned. */ - framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func() { + framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func(ctx context.Context) { clientSet := f.ClientSet deployClient := clientSet.AppsV1().Deployments(f.Namespace.Name) rsClient := clientSet.AppsV1().ReplicaSets(f.Namespace.Name) @@ -647,7 +647,7 @@ var _ = SIGDescribe("Garbage collector", func() { Testname: Garbage Collector, delete replication controller, after owned pods Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication Controller MUST cause pods created by that RC to be deleted before the RC is deleted. */ - framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() { + framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func(ctx context.Context) { clientSet := f.ClientSet rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name) @@ -732,7 +732,7 @@ var _ = SIGDescribe("Garbage collector", func() { Testname: Garbage Collector, multiple owners Description: Create a replication controller RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second replication controller RC2 and set RC2 as owner for half of those replicas. Once RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted or have a deletion timestamp. Deleting the Replication Controller MUST not delete Pods that are owned by multiple replication controllers. */ - framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() { + framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func(ctx context.Context) { clientSet := f.ClientSet rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name) @@ -846,7 +846,7 @@ var _ = SIGDescribe("Garbage collector", func() { Testname: Garbage Collector, dependency cycle Description: Create three pods, patch them with Owner references such that pod1 has pod3, pod2 has pod1 and pod3 has pod2 as owner references respectively. Delete pod1 MUST delete all pods. The dependency cycle MUST not block the garbage collection. */ - framework.ConformanceIt("should not be blocked by dependency circle", func() { + framework.ConformanceIt("should not be blocked by dependency circle", func(ctx context.Context) { clientSet := f.ClientSet podClient := clientSet.CoreV1().Pods(f.Namespace.Name) pod1Name := "pod1" @@ -902,7 +902,7 @@ var _ = SIGDescribe("Garbage collector", func() { } }) - ginkgo.It("should support cascading deletion of custom resources", func() { + ginkgo.It("should support cascading deletion of custom resources", func(ctx context.Context) { config, err := framework.LoadConfig() if err != nil { framework.Failf("failed to load config: %v", err) @@ -1037,7 +1037,7 @@ var _ = SIGDescribe("Garbage collector", func() { } }) - ginkgo.It("should support orphan deletion of custom resources", func() { + ginkgo.It("should support orphan deletion of custom resources", func(ctx context.Context) { config, err := framework.LoadConfig() if err != nil { framework.Failf("failed to load config: %v", err) @@ -1142,7 +1142,7 @@ var _ = SIGDescribe("Garbage collector", func() { } }) - ginkgo.It("should delete jobs and pods created by cronjob", func() { + ginkgo.It("should delete jobs and pods created by cronjob", func(ctx context.Context) { ginkgo.By("Create the cronjob") cronJob := newCronJob("simple", "*/1 * * * ?") diff --git a/test/e2e/apimachinery/generated_clientset.go b/test/e2e/apimachinery/generated_clientset.go index 1e566e0872b..7b5726c42b6 100644 --- a/test/e2e/apimachinery/generated_clientset.go +++ b/test/e2e/apimachinery/generated_clientset.go @@ -102,7 +102,7 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool) var _ = SIGDescribe("Generated clientset", func() { f := framework.NewDefaultFramework("clientset") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() { + ginkgo.It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func(ctx context.Context) { podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) ginkgo.By("constructing the pod") name := "pod" + string(uuid.NewUUID()) @@ -216,7 +216,7 @@ var _ = SIGDescribe("Generated clientset", func() { f := framework.NewDefaultFramework("clientset") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should create v1 cronJobs, delete cronJobs, watch cronJobs", func() { + ginkgo.It("should create v1 cronJobs, delete cronJobs, watch cronJobs", func(ctx context.Context) { cronJobClient := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name) ginkgo.By("constructing the cronJob") name := "cronjob" + string(uuid.NewUUID()) diff --git a/test/e2e/apimachinery/health_handlers.go b/test/e2e/apimachinery/health_handlers.go index fbc772696fe..2cb1bce7f14 100644 --- a/test/e2e/apimachinery/health_handlers.go +++ b/test/e2e/apimachinery/health_handlers.go @@ -119,7 +119,7 @@ var _ = SIGDescribe("health handlers", func() { f := framework.NewDefaultFramework("health") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should contain necessary checks", func() { + ginkgo.It("should contain necessary checks", func(ctx context.Context) { ginkgo.By("/health") err := testPath(f.ClientSet, "/healthz?verbose=1", requiredHealthzChecks) framework.ExpectNoError(err) diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index 5c767b7c1a7..cea4b13419b 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -265,7 +265,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { The Namespace is patched. The Namespace and MUST now include the new Label. */ - framework.ConformanceIt("should patch a Namespace", func() { + framework.ConformanceIt("should patch a Namespace", func(ctx context.Context) { ginkgo.By("creating a Namespace") namespaceName := "nspatchtest-" + string(uuid.NewUUID()) ns, err := f.CreateNamespace(namespaceName, nil) @@ -296,7 +296,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { equal the new values. Given the updating of the namespace status, the fields MUST equal the new values. */ - framework.ConformanceIt("should apply changes to a namespace status", func() { + framework.ConformanceIt("should apply changes to a namespace status", func(ctx context.Context) { ns := f.Namespace.Name dc := f.DynamicClient nsResource := v1.SchemeGroupVersion.WithResource("namespaces") @@ -363,7 +363,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { Description: When updating the namespace it MUST succeed and the field MUST equal the new value. */ - framework.ConformanceIt("should apply an update to a Namespace", func() { + framework.ConformanceIt("should apply an update to a Namespace", func(ctx context.Context) { var err error var updatedNamespace *v1.Namespace ns := f.Namespace.Name @@ -391,7 +391,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { fake finalizer MUST be found. Removing the fake finalizer from the namespace MUST succeed and MUST NOT be found. */ - framework.ConformanceIt("should apply a finalizer to a Namespace", func() { + framework.ConformanceIt("should apply a finalizer to a Namespace", func(ctx context.Context) { fakeFinalizer := v1.FinalizerName("e2e.example.com/fakeFinalizer") var updatedNamespace *v1.Namespace diff --git a/test/e2e/apimachinery/request_timeout.go b/test/e2e/apimachinery/request_timeout.go index fa0878d7900..4a0963d3c6a 100644 --- a/test/e2e/apimachinery/request_timeout.go +++ b/test/e2e/apimachinery/request_timeout.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "io" "net/http" "strings" @@ -35,7 +36,7 @@ var _ = SIGDescribe("Server request timeout", func() { f := framework.NewDefaultFramework("request-timeout") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should return HTTP status code 400 if the user specifies an invalid timeout in the request URL", func() { + ginkgo.It("should return HTTP status code 400 if the user specifies an invalid timeout in the request URL", func(ctx context.Context) { rt := getRoundTripper(f) req := newRequest(f, "invalid") @@ -53,7 +54,7 @@ var _ = SIGDescribe("Server request timeout", func() { } }) - ginkgo.It("the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed", func() { + ginkgo.It("the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed", func(ctx context.Context) { rt := getRoundTripper(f) // Choose a timeout that exceeds the default timeout (60s) enforced by the apiserver req := newRequest(f, "3m") @@ -67,7 +68,7 @@ var _ = SIGDescribe("Server request timeout", func() { } }) - ginkgo.It("default timeout should be used if the specified timeout in the request URL is 0s", func() { + ginkgo.It("default timeout should be used if the specified timeout in the request URL is 0s", func(ctx context.Context) { rt := getRoundTripper(f) req := newRequest(f, "0s") diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index a80d0402e5c..9a8d9acfe2f 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -72,7 +72,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Testname: ResourceQuota, object count quota, resourcequotas Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace. */ - framework.ConformanceIt("should create a ResourceQuota and ensure its status is promptly calculated.", func() { + framework.ConformanceIt("should create a ResourceQuota and ensure its status is promptly calculated.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) @@ -97,7 +97,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Create a Service. Its creation MUST be successful and resource usage count against the Service object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota. Delete the Service. Deletion MUST succeed and resource usage count against the Service object MUST be released from ResourceQuotaStatus of the ResourceQuota. */ - framework.ConformanceIt("should create a ResourceQuota and capture the life of a service.", func() { + framework.ConformanceIt("should create a ResourceQuota and capture the life of a service.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) @@ -157,7 +157,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Create a Secret. Its creation MUST be successful and resource usage count against the Secret object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota. Delete the Secret. Deletion MUST succeed and resource usage count against the Secret object MUST be released from ResourceQuotaStatus of the ResourceQuota. */ - framework.ConformanceIt("should create a ResourceQuota and capture the life of a secret.", func() { + framework.ConformanceIt("should create a ResourceQuota and capture the life of a secret.", func(ctx context.Context) { ginkgo.By("Discovering how many secrets are in namespace by default") found, unchanged := 0, 0 // On contended servers the service account controller can slow down, leading to the count changing during a run. @@ -227,7 +227,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Update the successfully created pod's resource requests. Updation MUST fail as a Pod can not dynamically update its resource requirements. Delete the successfully created Pod. Pod Deletion MUST be scuccessful and it MUST release the allocated resource counts from ResourceQuotaStatus of the ResourceQuota. */ - framework.ConformanceIt("should create a ResourceQuota and capture the life of a pod.", func() { + framework.ConformanceIt("should create a ResourceQuota and capture the life of a pod.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) @@ -323,7 +323,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Create a ConfigMap. Its creation MUST be successful and resource usage count against the ConfigMap object MUST be captured in ResourceQuotaStatus of the ResourceQuota. Delete the ConfigMap. Deletion MUST succeed and resource usage count against the ConfigMap object MUST be released from ResourceQuotaStatus of the ResourceQuota. */ - framework.ConformanceIt("should create a ResourceQuota and capture the life of a configMap.", func() { + framework.ConformanceIt("should create a ResourceQuota and capture the life of a configMap.", func(ctx context.Context) { found, unchanged := 0, 0 // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. @@ -389,7 +389,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Create a ReplicationController. Its creation MUST be successful and resource usage count against the ReplicationController object MUST be captured in ResourceQuotaStatus of the ResourceQuota. Delete the ReplicationController. Deletion MUST succeed and resource usage count against the ReplicationController object MUST be released from ResourceQuotaStatus of the ResourceQuota. */ - framework.ConformanceIt("should create a ResourceQuota and capture the life of a replication controller.", func() { + framework.ConformanceIt("should create a ResourceQuota and capture the life of a replication controller.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) @@ -445,7 +445,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Create a ReplicaSet. Its creation MUST be successful and resource usage count against the ReplicaSet object MUST be captured in ResourceQuotaStatus of the ResourceQuota. Delete the ReplicaSet. Deletion MUST succeed and resource usage count against the ReplicaSet object MUST be released from ResourceQuotaStatus of the ResourceQuota. */ - framework.ConformanceIt("should create a ResourceQuota and capture the life of a replica set.", func() { + framework.ConformanceIt("should create a ResourceQuota and capture the life of a replica set.", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) @@ -492,7 +492,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Delete the PVC. Deletion MUST succeed and resource usage count against its PVC and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota. [NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour. */ - ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim", func() { + ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) @@ -542,7 +542,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Delete the PVC. Deletion MUST succeed and resource usage count against PVC, storageClass and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota. [NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour. */ - ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class", func() { + ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class", func(ctx context.Context) { ginkgo.By("Counting existing ResourceQuota") c, err := countResourceQuota(f.ClientSet, f.Namespace.Name) framework.ExpectNoError(err) @@ -594,7 +594,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) }) - ginkgo.It("should create a ResourceQuota and capture the life of a custom resource.", func() { + ginkgo.It("should create a ResourceQuota and capture the life of a custom resource.", func(ctx context.Context) { ginkgo.By("Creating a Custom Resource Definition") testcrd, err := crd.CreateTestCRD(f) framework.ExpectNoError(err) @@ -687,7 +687,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Create a pod with specified activeDeadlineSeconds and resourceRequirements for CPU and Memory fall within quota limits. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'Terminating' scoped ResourceQuota but MUST NOT in 'NotTerminating' scoped ResourceQuota. Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'Terminating' scoped ResourceQuota. */ - framework.ConformanceIt("should verify ResourceQuota with terminating scopes.", func() { + framework.ConformanceIt("should verify ResourceQuota with terminating scopes.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with terminating scope") quotaTerminatingName := "quota-terminating" resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating)) @@ -800,7 +800,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Create a 'NotBestEffort' Pod by explicitly specifying resource limits and requests. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota but MUST NOT in 'BestEffort' scoped ResourceQuota. Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota. */ - framework.ConformanceIt("should verify ResourceQuota with best effort scope.", func() { + framework.ConformanceIt("should verify ResourceQuota with best effort scope.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with best effort scope") resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort)) framework.ExpectNoError(err) @@ -881,7 +881,7 @@ var _ = SIGDescribe("ResourceQuota", func() { When ResourceQuota is updated to modify CPU and Memory quota limits, update MUST succeed with updated values for CPU and Memory limits. When ResourceQuota is deleted, it MUST not be available in the namespace. */ - framework.ConformanceIt("should be able to update and delete ResourceQuota.", func() { + framework.ConformanceIt("should be able to update and delete ResourceQuota.", func(ctx context.Context) { client := f.ClientSet ns := f.Namespace.Name @@ -940,7 +940,7 @@ var _ = SIGDescribe("ResourceQuota", func() { the new values. It MUST succeed at deleting a collection of ResourceQuota via a label selector. */ - framework.ConformanceIt("should manage the lifecycle of a ResourceQuota", func() { + framework.ConformanceIt("should manage the lifecycle of a ResourceQuota", func(ctx context.Context) { client := f.ClientSet ns := f.Namespace.Name @@ -1007,7 +1007,7 @@ var _ = SIGDescribe("ResourceQuota", func() { values MUST succeed. The spec spec MUST NOT be changed when patching /status. */ - framework.ConformanceIt("should apply changes to a resourcequota status", func() { + framework.ConformanceIt("should apply changes to a resourcequota status", func(ctx context.Context) { ns := f.Namespace.Name rqClient := f.ClientSet.CoreV1().ResourceQuotas(ns) rqName := "e2e-rq-status-" + utilrand.String(5) @@ -1063,7 +1063,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err, "Failed to update resourceQuota") ginkgo.By(fmt.Sprintf("Confirm /status for %q resourceQuota via watch", rqName)) - ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStartShort) + ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort) defer cancel() _, err = watchtools.Until(ctx, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) { @@ -1202,7 +1202,7 @@ var _ = SIGDescribe("ResourceQuota", func() { var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { f := framework.NewDefaultFramework("scope-selectors") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func() { + ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with best effort scope") resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort)) framework.ExpectNoError(err) @@ -1275,7 +1275,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) framework.ExpectNoError(err) }) - ginkgo.It("should verify ResourceQuota with terminating scopes through scope selectors.", func() { + ginkgo.It("should verify ResourceQuota with terminating scopes through scope selectors.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with terminating scope") quotaTerminatingName := "quota-terminating" resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaTerminatingName, v1.ResourceQuotaScopeTerminating)) @@ -1384,7 +1384,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { f := framework.NewDefaultFramework("resourcequota-priorityclass") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() { + ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func(ctx context.Context) { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { @@ -1425,7 +1425,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() { + ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func(ctx context.Context) { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { @@ -1472,7 +1472,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() { + ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func(ctx context.Context) { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { @@ -1521,7 +1521,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() { + ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func(ctx context.Context) { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { framework.Failf("unexpected error while creating priority class: %v", err) @@ -1579,7 +1579,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() { + ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func(ctx context.Context) { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { @@ -1615,7 +1615,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() { + ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func(ctx context.Context) { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { @@ -1656,7 +1656,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() { + ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func(ctx context.Context) { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { @@ -1725,7 +1725,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { var _ = SIGDescribe("ResourceQuota", func() { f := framework.NewDefaultFramework("cross-namespace-pod-affinity") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It("should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors.", func() { + ginkgo.It("should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors.", func(ctx context.Context) { ginkgo.By("Creating a ResourceQuota with cross namespace pod affinity scope") quota, err := createResourceQuota( f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-cross-namespace-pod-affinity", v1.ResourceQuotaScopeCrossNamespacePodAffinity)) diff --git a/test/e2e/apimachinery/server_version.go b/test/e2e/apimachinery/server_version.go index f01a41ef99b..9df53bc8ade 100644 --- a/test/e2e/apimachinery/server_version.go +++ b/test/e2e/apimachinery/server_version.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "regexp" "k8s.io/apimachinery/pkg/version" @@ -36,7 +37,7 @@ var _ = SIGDescribe("server version", func() { Description: Ensure that an API server version can be retrieved. Both the major and minor versions MUST only be an integer. */ - framework.ConformanceIt("should find the server version", func() { + framework.ConformanceIt("should find the server version", func(ctx context.Context) { ginkgo.By("Request ServerVersion") diff --git a/test/e2e/apimachinery/storage_version.go b/test/e2e/apimachinery/storage_version.go index b797bc9dc54..57ba66fd4ef 100644 --- a/test/e2e/apimachinery/storage_version.go +++ b/test/e2e/apimachinery/storage_version.go @@ -40,7 +40,7 @@ var _ = SIGDescribe("StorageVersion resources [Feature:StorageVersionAPI]", func f := framework.NewDefaultFramework("storage-version") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("storage version with non-existing id should be GC'ed", func() { + ginkgo.It("storage version with non-existing id should be GC'ed", func(ctx context.Context) { client := f.ClientSet sv := &apiserverinternalv1alpha1.StorageVersion{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index d5b8fed4fd2..53c2c46dce4 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -50,7 +50,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { e2eskipper.SkipUnlessServerVersionGTE(serverPrintVersion, f.ClientSet.Discovery()) }) - ginkgo.It("should return pod details", func() { + ginkgo.It("should return pod details", func(ctx context.Context) { ns := f.Namespace.Name c := f.ClientSet @@ -77,7 +77,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { framework.Logf("Table:\n%s", out) }) - ginkgo.It("should return chunks of table results for list calls", func() { + ginkgo.It("should return chunks of table results for list calls", func(ctx context.Context) { ns := f.Namespace.Name c := f.ClientSet client := c.CoreV1().PodTemplates(ns) @@ -126,7 +126,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { framework.ExpectEqual(pagedTable.Rows[0].Cells[0], "template-0002") }) - ginkgo.It("should return generic metadata details across all namespaces for nodes", func() { + ginkgo.It("should return generic metadata details across all namespaces for nodes", func(ctx context.Context) { c := f.ClientSet table := &metav1beta1.Table{} @@ -151,7 +151,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { Description: Issue a HTTP request to the API. HTTP request MUST return a HTTP status code of 406. */ - framework.ConformanceIt("should return a 406 for a backend which does not implement metadata", func() { + framework.ConformanceIt("should return a 406 for a backend which does not implement metadata", func(ctx context.Context) { c := f.ClientSet table := &metav1beta1.Table{} diff --git a/test/e2e/apimachinery/watch.go b/test/e2e/apimachinery/watch.go index ca055a94ca0..15ba8fc9841 100644 --- a/test/e2e/apimachinery/watch.go +++ b/test/e2e/apimachinery/watch.go @@ -57,7 +57,7 @@ var _ = SIGDescribe("Watchers", func() { update, and delete notifications on configmaps that match a label selector and do not receive notifications for configmaps which do not match that label selector. */ - framework.ConformanceIt("should observe add, update, and delete watch notifications on configmaps", func() { + framework.ConformanceIt("should observe add, update, and delete watch notifications on configmaps", func(ctx context.Context) { c := f.ClientSet ns := f.Namespace.Name @@ -139,7 +139,7 @@ var _ = SIGDescribe("Watchers", func() { Description: Ensure that a watch can be opened from a particular resource version in the past and only notifications happening after that resource version are observed. */ - framework.ConformanceIt("should be able to start watching from a specific resource version", func() { + framework.ConformanceIt("should be able to start watching from a specific resource version", func(ctx context.Context) { c := f.ClientSet ns := f.Namespace.Name @@ -188,7 +188,7 @@ var _ = SIGDescribe("Watchers", func() { observed by the previous watch, and it will continue delivering notifications from that point in time. */ - framework.ConformanceIt("should be able to restart watching from the last resource version observed by the previous watch", func() { + framework.ConformanceIt("should be able to restart watching from the last resource version observed by the previous watch", func(ctx context.Context) { c := f.ClientSet ns := f.Namespace.Name @@ -254,7 +254,7 @@ var _ = SIGDescribe("Watchers", func() { a watch's selector, the watch will observe a delete, and will not observe notifications for that object until it meets the selector's requirements again. */ - framework.ConformanceIt("should observe an object deletion if it stops meeting the requirements of the selector", func() { + framework.ConformanceIt("should observe an object deletion if it stops meeting the requirements of the selector", func(ctx context.Context) { c := f.ClientSet ns := f.Namespace.Name @@ -331,7 +331,7 @@ var _ = SIGDescribe("Watchers", func() { for events received from the first watch, initiated at the resource version of the event, and checking that all resource versions of all events match. Events are produced from writes on a background goroutine. */ - framework.ConformanceIt("should receive events on concurrent watches in same order", func() { + framework.ConformanceIt("should receive events on concurrent watches in same order", func(ctx context.Context) { c := f.ClientSet ns := f.Namespace.Name diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index a93bd01dcc8..a66fca97423 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -114,7 +114,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { The mutatingwebhookconfigurations and validatingwebhookconfigurations resources MUST exist in the /apis/admissionregistration.k8s.io/v1 discovery document. */ - framework.ConformanceIt("should include webhook resources in discovery documents", func() { + framework.ConformanceIt("should include webhook resources in discovery documents", func(ctx context.Context) { { ginkgo.By("fetching the /apis discovery document") apiGroupList := &metav1.APIGroupList{} @@ -194,7 +194,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { and the pod creation MUST be denied. An attempt to create a non-compliant configmap in a whitelisted namespace based on the webhook namespace selector MUST be allowed. */ - framework.ConformanceIt("should be able to deny pod and configmap creation", func() { + framework.ConformanceIt("should be able to deny pod and configmap creation", func(ctx context.Context) { webhookCleanup := registerWebhook(f, f.UniqueName, certCtx, servicePort) defer webhookCleanup() testWebhook(f) @@ -206,7 +206,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { Description: Register an admission webhook configuration that denies connecting to a pod's attach sub-resource. Attempts to attach MUST be denied. */ - framework.ConformanceIt("should be able to deny attaching pod", func() { + framework.ConformanceIt("should be able to deny attaching pod", func(ctx context.Context) { webhookCleanup := registerWebhookForAttachingPod(f, f.UniqueName, certCtx, servicePort) defer webhookCleanup() testAttachingPodWebhook(f) @@ -218,7 +218,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { Description: Register an admission webhook configuration that denies creation, update and deletion of custom resources. Attempts to create, update and delete custom resources MUST be denied. */ - framework.ConformanceIt("should be able to deny custom resource creation, update and deletion", func() { + framework.ConformanceIt("should be able to deny custom resource creation, update and deletion", func(ctx context.Context) { testcrd, err := crd.CreateTestCRD(f) if err != nil { return @@ -236,7 +236,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { Description: Register a webhook with a fail closed policy and without CA bundle so that it cannot be called. Attempt operations that require the admission webhook; all MUST be denied. */ - framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func() { + framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func(ctx context.Context) { webhookCleanup := registerFailClosedWebhook(f, f.UniqueName, certCtx, servicePort) defer webhookCleanup() testFailClosedWebhook(f) @@ -249,7 +249,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { adds a data key if the configmap already has a specific key, and another that adds a key if the key added by the first webhook is present. Attempt to create a config map; both keys MUST be added to the config map. */ - framework.ConformanceIt("should mutate configmap", func() { + framework.ConformanceIt("should mutate configmap", func(ctx context.Context) { webhookCleanup := registerMutatingWebhookForConfigMap(f, f.UniqueName, certCtx, servicePort) defer webhookCleanup() testMutatingConfigMapWebhook(f) @@ -261,7 +261,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { Description: Register a mutating webhook that adds an InitContainer to pods. Attempt to create a pod; the InitContainer MUST be added the TerminationMessagePolicy MUST be defaulted. */ - framework.ConformanceIt("should mutate pod and apply defaults after mutation", func() { + framework.ConformanceIt("should mutate pod and apply defaults after mutation", func(ctx context.Context) { webhookCleanup := registerMutatingWebhookForPod(f, f.UniqueName, certCtx, servicePort) defer webhookCleanup() testMutatingPodWebhook(f) @@ -274,7 +274,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { and delete a webhook configuration object; both operations MUST be allowed and the webhook configuration object MUST NOT be mutated the webhooks. */ - framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func() { + framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func(ctx context.Context) { validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort) defer validatingWebhookCleanup() mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort) @@ -288,7 +288,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { Description: Register a webhook that mutates a custom resource. Attempt to create custom resource object; the custom resource MUST be mutated. */ - framework.ConformanceIt("should mutate custom resource", func() { + framework.ConformanceIt("should mutate custom resource", func(ctx context.Context) { testcrd, err := crd.CreateTestCRD(f) if err != nil { return @@ -305,7 +305,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { Description: Register a webhook that denies custom resource definition create. Attempt to create a custom resource definition; the create request MUST be denied. */ - framework.ConformanceIt("should deny crd creation", func() { + framework.ConformanceIt("should deny crd creation", func(ctx context.Context) { crdWebhookCleanup := registerValidatingWebhookForCRD(f, f.UniqueName, certCtx, servicePort) defer crdWebhookCleanup() @@ -320,7 +320,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { the stored version. Attempt to patch the custom resource with a new field and value; the patch MUST be applied successfully. */ - framework.ConformanceIt("should mutate custom resource with different stored version", func() { + framework.ConformanceIt("should mutate custom resource with different stored version", func(ctx context.Context) { testcrd, err := createAdmissionWebhookMultiVersionTestCRDWithV1Storage(f) if err != nil { return @@ -338,7 +338,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { with a schema that includes only one of the data keys added by the webhooks. Attempt to a custom resource; the fields included in the schema MUST be present and field not included in the schema MUST NOT be present. */ - framework.ConformanceIt("should mutate custom resource with pruning", func() { + framework.ConformanceIt("should mutate custom resource with pruning", func(ctx context.Context) { const prune = true testcrd, err := createAdmissionWebhookMultiVersionTestCRDWithV1Storage(f, func(crd *apiextensionsv1.CustomResourceDefinition) { crd.Spec.PreserveUnknownFields = false @@ -378,7 +378,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { the failure policy is ignore. Requests MUST NOT timeout if configured webhook timeout is 10 seconds (much longer than the webhook wait duration). */ - framework.ConformanceIt("should honor timeout", func() { + framework.ConformanceIt("should honor timeout", func(ctx context.Context) { policyFail := admissionregistrationv1.Fail policyIgnore := admissionregistrationv1.Ignore @@ -410,7 +410,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { operation and attempt to create an object; the webhook MUST NOT deny the create. Patch the webhook to apply to the create operation again and attempt to create an object; the webhook MUST deny the create. */ - framework.ConformanceIt("patching/updating a validating webhook should work", func() { + framework.ConformanceIt("patching/updating a validating webhook should work", func(ctx context.Context) { client := f.ClientSet admissionClient := client.AdmissionregistrationV1() @@ -505,7 +505,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { operation and attempt to create an object; the webhook MUST NOT mutate the object. Patch the webhook to apply to the create operation again and attempt to create an object; the webhook MUST mutate the object. */ - framework.ConformanceIt("patching/updating a mutating webhook should work", func() { + framework.ConformanceIt("patching/updating a mutating webhook should work", func(ctx context.Context) { client := f.ClientSet admissionClient := client.AdmissionregistrationV1() @@ -579,7 +579,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { object; the create MUST be denied. Attempt to remove the webhook configurations matching the label with deletecollection; all webhook configurations MUST be deleted. Attempt to create an object; the create MUST NOT be denied. */ - framework.ConformanceIt("listing validating webhooks should work", func() { + framework.ConformanceIt("listing validating webhooks should work", func(ctx context.Context) { testListSize := 10 testUUID := string(uuid.NewUUID()) @@ -653,7 +653,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { object; the object MUST be mutated. Attempt to remove the webhook configurations matching the label with deletecollection; all webhook configurations MUST be deleted. Attempt to create an object; the object MUST NOT be mutated. */ - framework.ConformanceIt("listing mutating webhooks should work", func() { + framework.ConformanceIt("listing mutating webhooks should work", func(ctx context.Context) { testListSize := 10 testUUID := string(uuid.NewUUID()) diff --git a/test/e2e/apps/controller_revision.go b/test/e2e/apps/controller_revision.go index 2b275847f71..87a499e9056 100644 --- a/test/e2e/apps/controller_revision.go +++ b/test/e2e/apps/controller_revision.go @@ -121,7 +121,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { Listing the ControllerRevisions by label selector MUST find only one. The current ControllerRevision revision MUST be 3. */ - framework.ConformanceIt("should manage the lifecycle of a ControllerRevision", func() { + framework.ConformanceIt("should manage the lifecycle of a ControllerRevision", func(ctx context.Context) { csAppsV1 := f.ClientSet.AppsV1() dsLabel := map[string]string{"daemonset-name": dsName} diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 102b704e2e5..ef98644da0e 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -66,7 +66,7 @@ var _ = SIGDescribe("CronJob", func() { Testname: CronJob AllowConcurrent Description: CronJob MUST support AllowConcurrent policy, allowing to run multiple jobs at the same time. */ - framework.ConformanceIt("should schedule multiple jobs concurrently", func() { + framework.ConformanceIt("should schedule multiple jobs concurrently", func(ctx context.Context) { ginkgo.By("Creating a cronjob") cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent, sleepCommand, nil, nil) @@ -93,7 +93,7 @@ var _ = SIGDescribe("CronJob", func() { Testname: CronJob Suspend Description: CronJob MUST support suspension, which suppresses creation of new jobs. */ - framework.ConformanceIt("should not schedule jobs when suspended [Slow]", func() { + framework.ConformanceIt("should not schedule jobs when suspended [Slow]", func(ctx context.Context) { ginkgo.By("Creating a suspended cronjob") cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1.AllowConcurrent, sleepCommand, nil, nil) @@ -121,7 +121,7 @@ var _ = SIGDescribe("CronJob", func() { Testname: CronJob ForbidConcurrent Description: CronJob MUST support ForbidConcurrent policy, allowing to run single, previous job at the time. */ - framework.ConformanceIt("should not schedule new jobs when ForbidConcurrent [Slow]", func() { + framework.ConformanceIt("should not schedule new jobs when ForbidConcurrent [Slow]", func(ctx context.Context) { ginkgo.By("Creating a ForbidConcurrent cronjob") cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent, sleepCommand, nil, nil) @@ -157,7 +157,7 @@ var _ = SIGDescribe("CronJob", func() { Testname: CronJob ReplaceConcurrent Description: CronJob MUST support ReplaceConcurrent policy, allowing to run single, newer job at the time. */ - framework.ConformanceIt("should replace jobs when ReplaceConcurrent", func() { + framework.ConformanceIt("should replace jobs when ReplaceConcurrent", func(ctx context.Context) { ginkgo.By("Creating a ReplaceConcurrent cronjob") cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1.ReplaceConcurrent, sleepCommand, nil, nil) @@ -188,7 +188,7 @@ var _ = SIGDescribe("CronJob", func() { framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) }) - ginkgo.It("should be able to schedule after more than 100 missed schedule", func() { + ginkgo.It("should be able to schedule after more than 100 missed schedule", func(ctx context.Context) { ginkgo.By("Creating a cronjob") cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.ForbidConcurrent, sleepCommand, nil, nil) @@ -215,7 +215,7 @@ var _ = SIGDescribe("CronJob", func() { }) // shouldn't give us unexpected warnings - ginkgo.It("should not emit unexpected warnings", func() { + ginkgo.It("should not emit unexpected warnings", func(ctx context.Context) { ginkgo.By("Creating a cronjob") cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent, nil, nil, nil) @@ -238,7 +238,7 @@ var _ = SIGDescribe("CronJob", func() { }) // deleted jobs should be removed from the active list - ginkgo.It("should remove from active list jobs that have been deleted", func() { + ginkgo.It("should remove from active list jobs that have been deleted", func(ctx context.Context) { ginkgo.By("Creating a ForbidConcurrent cronjob") cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent, sleepCommand, nil, nil) @@ -277,7 +277,7 @@ var _ = SIGDescribe("CronJob", func() { }) // cleanup of successful finished jobs, with limit of one successful job - ginkgo.It("should delete successful finished jobs with limit of one successful job", func() { + ginkgo.It("should delete successful finished jobs with limit of one successful job", func(ctx context.Context) { ginkgo.By("Creating an AllowConcurrent cronjob with custom history limit") successLimit := int32(1) failedLimit := int32(0) @@ -288,7 +288,7 @@ var _ = SIGDescribe("CronJob", func() { }) // cleanup of failed finished jobs, with limit of one failed job - ginkgo.It("should delete failed finished jobs with limit of one job", func() { + ginkgo.It("should delete failed finished jobs with limit of one job", func(ctx context.Context) { ginkgo.By("Creating an AllowConcurrent cronjob with custom history limit") successLimit := int32(0) failedLimit := int32(1) @@ -298,7 +298,7 @@ var _ = SIGDescribe("CronJob", func() { ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob) }) - ginkgo.It("should support timezone", func() { + ginkgo.It("should support timezone", func(ctx context.Context) { ginkgo.By("Creating a cronjob with TimeZone") cronJob := newTestCronJob("cronjob-with-timezone", "*/1 * * * ?", batchv1.AllowConcurrent, failureCommand, nil, nil) @@ -316,7 +316,7 @@ var _ = SIGDescribe("CronJob", func() { CronJob MUST support create, get, list, watch, update, patch, delete, and deletecollection. CronJob/status MUST support get, update and patch. */ - framework.ConformanceIt("should support CronJob API operations", func() { + framework.ConformanceIt("should support CronJob API operations", func(ctx context.Context) { ginkgo.By("Creating a cronjob") successLimit := int32(1) failedLimit := int32(0) diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index ea21f24f8b5..40ed078757e 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -270,7 +270,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { close(stopCh) }) - ginkgo.It("Controller Manager should not create/delete replicas across restart", func() { + ginkgo.It("Controller Manager should not create/delete replicas across restart", func(ctx context.Context) { // Requires master ssh access. e2eskipper.SkipUnlessProviderIs("gce", "aws") @@ -301,7 +301,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { } }) - ginkgo.It("Scheduler should continue assigning pods to nodes across restart", func() { + ginkgo.It("Scheduler should continue assigning pods to nodes across restart", func(ctx context.Context) { // Requires master ssh access. e2eskipper.SkipUnlessProviderIs("gce", "aws") @@ -319,7 +319,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true)) }) - ginkgo.It("Kubelet should not restart containers across restart", func() { + ginkgo.It("Kubelet should not restart containers across restart", func(ctx context.Context) { nodeIPs, err := e2enode.GetPublicIps(f.ClientSet) if err != nil { framework.Logf("Unexpected error occurred: %v", err) @@ -341,7 +341,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { } }) - ginkgo.It("Kube-proxy should recover after being killed accidentally", func() { + ginkgo.It("Kube-proxy should recover after being killed accidentally", func(ctx context.Context) { nodeIPs, err := e2enode.GetPublicIps(f.ClientSet) if err != nil { framework.Logf("Unexpected error occurred: %v", err) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 35a430cfa50..2e7227bc79c 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -163,7 +163,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Description: A conformant Kubernetes distribution MUST support the creation of DaemonSets. When a DaemonSet Pod is deleted, the DaemonSet controller MUST create a replacement Pod. */ - framework.ConformanceIt("should run and stop simple daemon", func() { + framework.ConformanceIt("should run and stop simple daemon", func(ctx context.Context) { label := map[string]string{daemonsetNameLabel: dsName} ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) @@ -191,7 +191,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Description: A conformant Kubernetes distribution MUST support DaemonSet Pod node selection via label selectors. */ - framework.ConformanceIt("should run and stop complex daemon", func() { + framework.ConformanceIt("should run and stop complex daemon", func(ctx context.Context) { complexLabel := map[string]string{daemonsetNameLabel: dsName} nodeSelector := map[string]string{daemonsetColorLabel: "blue"} framework.Logf("Creating daemon %q with a node selector", dsName) @@ -238,7 +238,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { // We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the // default scheduler. - ginkgo.It("should run and stop complex daemon with node affinity", func() { + ginkgo.It("should run and stop complex daemon with node affinity", func(ctx context.Context) { complexLabel := map[string]string{daemonsetNameLabel: dsName} nodeSelector := map[string]string{daemonsetColorLabel: "blue"} framework.Logf("Creating daemon %q with a node affinity", dsName) @@ -291,7 +291,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Testname: DaemonSet-FailedPodCreation Description: A conformant Kubernetes distribution MUST create new DaemonSet Pods when they fail. */ - framework.ConformanceIt("should retry creating failed daemon pods", func() { + framework.ConformanceIt("should retry creating failed daemon pods", func(ctx context.Context) { label := map[string]string{daemonsetNameLabel: dsName} ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) @@ -321,7 +321,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { // This test should not be added to conformance. We will consider deprecating OnDelete when the // extensions/v1beta1 and apps/v1beta1 are removed. - ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func() { + ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func(ctx context.Context) { label := map[string]string{daemonsetNameLabel: dsName} framework.Logf("Creating simple daemon set %s", dsName) @@ -371,7 +371,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Testname: DaemonSet-RollingUpdate Description: A conformant Kubernetes distribution MUST support DaemonSet RollingUpdates. */ - framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() { + framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func(ctx context.Context) { label := map[string]string{daemonsetNameLabel: dsName} framework.Logf("Creating simple daemon set %s", dsName) @@ -429,7 +429,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Description: A conformant Kubernetes distribution MUST support automated, minimally disruptive rollback of updates to a DaemonSet. */ - framework.ConformanceIt("should rollback without unnecessary restarts", func() { + framework.ConformanceIt("should rollback without unnecessary restarts", func(ctx context.Context) { schedulableNodes, err := e2enode.GetReadySchedulableNodes(c) framework.ExpectNoError(err) gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.") @@ -501,7 +501,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { }) // TODO: This test is expected to be promoted to conformance after the feature is promoted - ginkgo.It("should surge pods onto nodes when spec was updated and update strategy is RollingUpdate", func() { + ginkgo.It("should surge pods onto nodes when spec was updated and update strategy is RollingUpdate", func(ctx context.Context) { label := map[string]string{daemonsetNameLabel: dsName} framework.Logf("Creating surge daemon set %s", dsName) @@ -820,7 +820,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { MUST succeed when listing DaemonSets via a label selector. It MUST succeed when deleting the DaemonSet via deleteCollection. */ - framework.ConformanceIt("should list and delete a collection of DaemonSets", func() { + framework.ConformanceIt("should list and delete a collection of DaemonSets", func(ctx context.Context) { label := map[string]string{daemonsetNameLabel: dsName} labelSelector := labels.SelectorFromSet(label).String() @@ -859,7 +859,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Attempt to read, update and patch its status sub-resource; all mutating sub-resource operations MUST be visible to subsequent reads. */ - framework.ConformanceIt("should verify changes to a daemon set status", func() { + framework.ConformanceIt("should verify changes to a daemon set status", func(ctx context.Context) { label := map[string]string{daemonsetNameLabel: dsName} labelSelector := labels.SelectorFromSet(label).String() @@ -919,7 +919,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) ginkgo.By("watching for the daemon set status to be updated") - ctx, cancel := context.WithTimeout(context.Background(), dsRetryTimeout) + ctx, cancel := context.WithTimeout(ctx, dsRetryTimeout) defer cancel() _, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if ds, ok := event.Object.(*appsv1.DaemonSet); ok { diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 4ac2e3d461c..cb987e185a5 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -94,7 +94,7 @@ var _ = SIGDescribe("Deployment", func() { dc = f.DynamicClient }) - ginkgo.It("deployment reaping should cascade to its replica sets and pods", func() { + ginkgo.It("deployment reaping should cascade to its replica sets and pods", func(ctx context.Context) { testDeleteDeployment(f) }) /* @@ -102,7 +102,7 @@ var _ = SIGDescribe("Deployment", func() { Testname: Deployment RollingUpdate Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy. */ - framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func() { + framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func(ctx context.Context) { testRollingUpdateDeployment(f) }) /* @@ -110,7 +110,7 @@ var _ = SIGDescribe("Deployment", func() { Testname: Deployment Recreate Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy. */ - framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func() { + framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func(ctx context.Context) { testRecreateDeployment(f) }) /* @@ -119,7 +119,7 @@ var _ = SIGDescribe("Deployment", func() { Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on the Deployment's `.spec.revisionHistoryLimit`. */ - framework.ConformanceIt("deployment should delete old replica sets", func() { + framework.ConformanceIt("deployment should delete old replica sets", func(ctx context.Context) { testDeploymentCleanUpPolicy(f) }) /* @@ -129,13 +129,13 @@ var _ = SIGDescribe("Deployment", func() { i.e. allow arbitrary number of changes to desired state during rolling update before the rollout finishes. */ - framework.ConformanceIt("deployment should support rollover", func() { + framework.ConformanceIt("deployment should support rollover", func(ctx context.Context) { testRolloverDeployment(f) }) - ginkgo.It("iterative rollouts should eventually progress", func() { + ginkgo.It("iterative rollouts should eventually progress", func(ctx context.Context) { testIterativeDeployments(f) }) - ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() { + ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func(ctx context.Context) { testDeploymentsControllerRef(f) }) @@ -147,7 +147,7 @@ var _ = SIGDescribe("Deployment", func() { The Deployment MUST update and verify the scale subresource. The Deployment MUST patch and verify a scale subresource. */ - framework.ConformanceIt("Deployment should have a working scale subresource", func() { + framework.ConformanceIt("Deployment should have a working scale subresource", func(ctx context.Context) { testDeploymentSubresources(f) }) /* @@ -157,10 +157,10 @@ var _ = SIGDescribe("Deployment", func() { proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets when a Deployment is scaled. */ - framework.ConformanceIt("deployment should support proportional scaling", func() { + framework.ConformanceIt("deployment should support proportional scaling", func(ctx context.Context) { testProportionalScalingDeployment(f) }) - ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func() { + ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke") e2eskipper.SkipIfIPv6("aws") nodes, err := e2enode.GetReadySchedulableNodes(c) @@ -182,7 +182,7 @@ var _ = SIGDescribe("Deployment", func() { When fetching and patching the DeploymentStatus it MUST succeed. It MUST succeed when deleting the Deployment. */ - framework.ConformanceIt("should run the lifecycle of a Deployment", func() { + framework.ConformanceIt("should run the lifecycle of a Deployment", func(ctx context.Context) { one := int64(1) deploymentResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} testNamespaceName := f.Namespace.Name @@ -215,7 +215,7 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName) ginkgo.By("waiting for Deployment to be created") - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { @@ -476,7 +476,7 @@ var _ = SIGDescribe("Deployment", func() { Attempt to read, update and patch its status sub-resource; all mutating sub-resource operations MUST be visible to subsequent reads. */ - framework.ConformanceIt("should validate Deployment Status endpoints", func() { + framework.ConformanceIt("should validate Deployment Status endpoints", func(ctx context.Context) { dClient := c.AppsV1().Deployments(ns) dName := "test-deployment-" + utilrand.String(5) labelSelector := "e2e=testing" @@ -542,7 +542,7 @@ var _ = SIGDescribe("Deployment", func() { framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) ginkgo.By("watching for the Deployment status to be updated") - ctx, cancel := context.WithTimeout(context.Background(), dRetryTimeout) + ctx, cancel := context.WithTimeout(ctx, dRetryTimeout) defer cancel() _, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index 082380b87d9..902c7939a11 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -84,7 +84,7 @@ var _ = SIGDescribe("DisruptionController", func() { Testname: PodDisruptionBudget: list and delete collection Description: PodDisruptionBudget API must support list and deletecollection operations. */ - framework.ConformanceIt("should list and delete a collection of PodDisruptionBudgets", func() { + framework.ConformanceIt("should list and delete a collection of PodDisruptionBudgets", func(ctx context.Context) { specialLabels := map[string]string{"foo_pdb": "bar_pdb"} labelSelector := labels.SelectorFromSet(specialLabels).String() createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(2), specialLabels) @@ -105,7 +105,7 @@ var _ = SIGDescribe("DisruptionController", func() { Testname: PodDisruptionBudget: create, update, patch, and delete object Description: PodDisruptionBudget API must support create, update, patch, and delete operations. */ - framework.ConformanceIt("should create a PodDisruptionBudget", func() { + framework.ConformanceIt("should create a PodDisruptionBudget", func(ctx context.Context) { ginkgo.By("creating the pdb") createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromString("1%"), defaultLabels) @@ -138,7 +138,7 @@ var _ = SIGDescribe("DisruptionController", func() { Description: Disruption controller MUST update the PDB status with how many disruptions are allowed. */ - framework.ConformanceIt("should observe PodDisruptionBudget status updated", func() { + framework.ConformanceIt("should observe PodDisruptionBudget status updated", func(ctx context.Context) { createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels) createPodsOrDie(cs, ns, 3) @@ -161,7 +161,7 @@ var _ = SIGDescribe("DisruptionController", func() { Testname: PodDisruptionBudget: update and patch status Description: PodDisruptionBudget API must support update and patch operations on status subresource. */ - framework.ConformanceIt("should update/patch PodDisruptionBudget status", func() { + framework.ConformanceIt("should update/patch PodDisruptionBudget status", func(ctx context.Context) { createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels) ginkgo.By("Updating PodDisruptionBudget status") @@ -287,7 +287,7 @@ var _ = SIGDescribe("DisruptionController", func() { if c.exclusive { serial = " [Serial]" } - ginkgo.It(fmt.Sprintf("evictions: %s => %s%s", c.description, expectation, serial), func() { + ginkgo.It(fmt.Sprintf("evictions: %s => %s%s", c.description, expectation, serial), func(ctx context.Context) { if c.skipForBigClusters { e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1) } @@ -344,7 +344,7 @@ var _ = SIGDescribe("DisruptionController", func() { Testname: PodDisruptionBudget: block an eviction until the PDB is updated to allow it Description: Eviction API must block an eviction until the PDB is updated to allow it */ - framework.ConformanceIt("should block an eviction until the PDB is updated to allow it", func() { + framework.ConformanceIt("should block an eviction until the PDB is updated to allow it", func(ctx context.Context) { ginkgo.By("Creating a pdb that targets all three pods in a test replica set") createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(3), defaultLabels) createReplicaSetOrDie(cs, ns, 3, false) diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 8e25dc433b9..2944f45be62 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -79,7 +79,7 @@ var _ = SIGDescribe("Job", func() { backoffLimit := int32(6) // default value // Simplest case: N pods succeed - ginkgo.It("should run a job to completion when tasks succeed", func() { + ginkgo.It("should run a job to completion when tasks succeed", func(ctx context.Context) { ginkgo.By("Creating a job") job := e2ejob.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) @@ -101,7 +101,7 @@ var _ = SIGDescribe("Job", func() { framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got %d", completions, successes) }) - ginkgo.It("should allow to use the pod failure policy on exit code to fail the job early", func() { + ginkgo.It("should allow to use the pod failure policy on exit code to fail the job early", func(ctx context.Context) { // We fail the Job's pod only once to ensure the backoffLimit is not // reached and thus the job is failed due to the pod failure policy @@ -134,7 +134,7 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed to ensure job failure in namespace: %s", f.Namespace.Name) }) - ginkgo.It("should allow to use the pod failure policy to not count the failure towards the backoffLimit", func() { + ginkgo.It("should allow to use the pod failure policy to not count the failure towards the backoffLimit", func(ctx context.Context) { // We set the backoffLimit to 0 so that any pod failure would trigger // job failure if not for the pod failure policy to ignore the failed @@ -272,7 +272,7 @@ var _ = SIGDescribe("Job", func() { }), ) - ginkgo.It("should not create pods when created in suspend state", func() { + ginkgo.It("should not create pods when created in suspend state", func(ctx context.Context) { ginkgo.By("Creating a job with suspend=true") job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job.Spec.Suspend = pointer.BoolPtr(true) @@ -310,7 +310,7 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) }) - ginkgo.It("should delete pods when suspended", func() { + ginkgo.It("should delete pods when suspended", func(ctx context.Context) { ginkgo.By("Creating a job with suspend=false") job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job.Spec.Suspend = pointer.Bool(false) @@ -363,7 +363,7 @@ var _ = SIGDescribe("Job", func() { Description: Create an Indexed job. Job MUST complete successfully. Ensure that created pods have completion index annotation and environment variable. */ - framework.ConformanceIt("should create pods for an Indexed job with completion indexes and specified hostname", func() { + framework.ConformanceIt("should create pods for an Indexed job with completion indexes and specified hostname", func(ctx context.Context) { ginkgo.By("Creating Indexed job") job := e2ejob.NewTestJob("succeed", "indexed-job", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) mode := batchv1.IndexedCompletion @@ -398,7 +398,7 @@ var _ = SIGDescribe("Job", func() { Description: Create a job and ensure the associated pod count is equal to parallelism count. Delete the job and ensure if the pods associated with the job have been removed */ - ginkgo.It("should remove pods when job is deleted", func() { + ginkgo.It("should remove pods when job is deleted", func(ctx context.Context) { ginkgo.By("Creating a job") job := e2ejob.NewTestJob("notTerminate", "all-pods-removed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) @@ -423,7 +423,7 @@ var _ = SIGDescribe("Job", func() { Description: Explicitly cause the tasks to fail once initially. After restarting, the Job MUST execute to completion. */ - framework.ConformanceIt("should run a job to completion when tasks sometimes fail and are locally restarted", func() { + framework.ConformanceIt("should run a job to completion when tasks sometimes fail and are locally restarted", func(ctx context.Context) { ginkgo.By("Creating a job") // One failure, then a success, local restarts. // We can't use the random failure approach, because kubelet will @@ -440,7 +440,7 @@ var _ = SIGDescribe("Job", func() { }) // Pods sometimes fail, but eventually succeed, after pod restarts - ginkgo.It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() { + ginkgo.It("should run a job to completion when tasks sometimes fail and are not locally restarted", func(ctx context.Context) { // One failure, then a success, no local restarts. // We can't use the random failure approach, because JobController // will throttle frequently failing Pods of a given Job, ramping @@ -462,7 +462,7 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) }) - ginkgo.It("should fail when exceeds active deadline", func() { + ginkgo.It("should fail when exceeds active deadline", func(ctx context.Context) { ginkgo.By("Creating a job") var activeDeadlineSeconds int64 = 1 job := e2ejob.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit) @@ -478,7 +478,7 @@ var _ = SIGDescribe("Job", func() { Testname: Jobs, active pods, graceful termination Description: Create a job. Ensure the active pods reflect parallelism in the namespace and delete the job. Job MUST be deleted successfully. */ - framework.ConformanceIt("should delete a job", func() { + framework.ConformanceIt("should delete a job", func(ctx context.Context) { ginkgo.By("Creating a job") job := e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) @@ -504,7 +504,7 @@ var _ = SIGDescribe("Job", func() { Orphan a Pod by modifying its owner reference. The Job MUST re-adopt the orphan pod. Modify the labels of one of the Job's Pods. The Job MUST release the Pod. */ - framework.ConformanceIt("should adopt matching orphans and release non-matching pods", func() { + framework.ConformanceIt("should adopt matching orphans and release non-matching pods", func(ctx context.Context) { ginkgo.By("Creating a job") job := e2ejob.NewTestJob("notTerminate", "adopt-release", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) // Replace job with the one returned from Create() so it has the UID. @@ -558,7 +558,7 @@ var _ = SIGDescribe("Job", func() { )).To(gomega.Succeed(), "wait for pod %q to be released", pod.Name) }) - ginkgo.It("should fail to exceed backoffLimit", func() { + ginkgo.It("should fail to exceed backoffLimit", func(ctx context.Context) { ginkgo.By("Creating a job") backoff := 1 job := e2ejob.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff)) @@ -578,7 +578,7 @@ var _ = SIGDescribe("Job", func() { } }) - ginkgo.It("should run a job to completion with CPU requests [Serial]", func() { + ginkgo.It("should run a job to completion with CPU requests [Serial]", func(ctx context.Context) { ginkgo.By("Creating a job that with CPU requests") testNodeName := scheduling.GetNodeThatCanRunPod(f) @@ -633,7 +633,7 @@ var _ = SIGDescribe("Job", func() { Attempt to replace the job status with a new start time which MUST succeed. Attempt to read its status sub-resource which MUST succeed */ - framework.ConformanceIt("should apply changes to a job status", func() { + framework.ConformanceIt("should apply changes to a job status", func(ctx context.Context) { ns := f.Namespace.Name jClient := f.ClientSet.BatchV1().Jobs(ns) @@ -700,7 +700,7 @@ var _ = SIGDescribe("Job", func() { succeed. One list MUST be found. It MUST succeed at deleting a collection of jobs via a label selector. */ - framework.ConformanceIt("should manage the lifecycle of a job", func() { + framework.ConformanceIt("should manage the lifecycle of a job", func(ctx context.Context) { jobName := "e2e-" + utilrand.String(5) label := map[string]string{"e2e-job-label": jobName} labelSelector := labels.SelectorFromSet(label).String() diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index c351cb2e8ca..b56d63a69a2 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -64,11 +64,11 @@ var _ = SIGDescribe("ReplicationController", func() { Testname: Replication Controller, run basic image Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP. */ - framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() { + framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) { TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage) }) - ginkgo.It("should serve a basic image on each replica with a private image", func() { + ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { // requires private images e2eskipper.SkipUnlessProviderIs("gce", "gke") privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) @@ -80,7 +80,7 @@ var _ = SIGDescribe("ReplicationController", func() { Testname: Replication Controller, check for issues like exceeding allocated quota Description: Attempt to create a Replication Controller with pods exceeding the namespace quota. The creation MUST fail */ - framework.ConformanceIt("should surface a failure condition on a common issue like exceeded quota", func() { + framework.ConformanceIt("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) { testReplicationControllerConditionCheck(f) }) @@ -89,7 +89,7 @@ var _ = SIGDescribe("ReplicationController", func() { Testname: Replication Controller, adopt matching pods Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod */ - framework.ConformanceIt("should adopt matching pods on creation", func() { + framework.ConformanceIt("should adopt matching pods on creation", func(ctx context.Context) { testRCAdoptMatchingOrphans(f) }) @@ -98,7 +98,7 @@ var _ = SIGDescribe("ReplicationController", func() { Testname: Replication Controller, release pods Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references. */ - framework.ConformanceIt("should release no longer matching pods", func() { + framework.ConformanceIt("should release no longer matching pods", func(ctx context.Context) { testRCReleaseControlledNotMatching(f) }) @@ -107,7 +107,7 @@ var _ = SIGDescribe("ReplicationController", func() { Testname: Replication Controller, lifecycle Description: A Replication Controller (RC) is created, read, patched, and deleted with verification. */ - framework.ConformanceIt("should test the lifecycle of a ReplicationController", func() { + framework.ConformanceIt("should test the lifecycle of a ReplicationController", func(ctx context.Context) { testRcName := "rc-test" testRcNamespace := ns testRcInitialReplicaCount := int32(1) @@ -399,7 +399,7 @@ var _ = SIGDescribe("ReplicationController", func() { succeed when reading the ReplicationController scale. When updating the ReplicationController scale it MUST succeed and the field MUST equal the new value. */ - framework.ConformanceIt("should get and update a ReplicationController scale", func() { + framework.ConformanceIt("should get and update a ReplicationController scale", func(ctx context.Context) { rcClient := f.ClientSet.CoreV1().ReplicationControllers(ns) rcName := "e2e-rc-" + utilrand.String(5) initialRCReplicaCount := int32(1) diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index c16ac899f72..1f59d0e0c38 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -108,18 +108,18 @@ var _ = SIGDescribe("ReplicaSet", func() { Testname: Replica Set, run basic image Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried. */ - framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() { + framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) { testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage) }) - ginkgo.It("should serve a basic image on each replica with a private image", func() { + ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { // requires private images e2eskipper.SkipUnlessProviderIs("gce", "gke") privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) testReplicaSetServeImageOrFail(f, "private", privateimage.GetE2EImage()) }) - ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func() { + ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) { testReplicaSetConditionCheck(f) }) @@ -128,7 +128,7 @@ var _ = SIGDescribe("ReplicaSet", func() { Testname: Replica Set, adopt matching pods and release non matching pods Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references */ - framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func() { + framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func(ctx context.Context) { testRSAdoptMatchingAndReleaseNotMatching(f) }) @@ -140,7 +140,7 @@ var _ = SIGDescribe("ReplicaSet", func() { The RS MUST update and verify the scale subresource. The RS MUST patch and verify a scale subresource. */ - framework.ConformanceIt("Replicaset should have a working scale subresource", func() { + framework.ConformanceIt("Replicaset should have a working scale subresource", func(ctx context.Context) { testRSScaleSubresources(f) }) @@ -151,7 +151,7 @@ var _ = SIGDescribe("ReplicaSet", func() { that it is running. The RS MUST scale to two replicas and verify the scale count The RS MUST be patched and verify that patch succeeded. */ - framework.ConformanceIt("Replace and Patch tests", func() { + framework.ConformanceIt("Replace and Patch tests", func(ctx context.Context) { testRSLifeCycle(f) }) @@ -162,7 +162,7 @@ var _ = SIGDescribe("ReplicaSet", func() { MUST succeed when listing ReplicaSets via a label selector. It MUST succeed when deleting the ReplicaSet via deleteCollection. */ - framework.ConformanceIt("should list and delete a collection of ReplicaSets", func() { + framework.ConformanceIt("should list and delete a collection of ReplicaSets", func(ctx context.Context) { listRSDeleteCollection(f) }) @@ -173,7 +173,7 @@ var _ = SIGDescribe("ReplicaSet", func() { Attempt to read, update and patch its status sub-resource; all mutating sub-resource operations MUST be visible to subsequent reads. */ - framework.ConformanceIt("should validate Replicaset Status endpoints", func() { + framework.ConformanceIt("should validate Replicaset Status endpoints", func(ctx context.Context) { testRSStatus(f) }) }) diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 29dfb423d93..ed83fa6d11e 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -131,7 +131,7 @@ var _ = SIGDescribe("StatefulSet", func() { // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. - ginkgo.It("should provide basic identity", func() { + ginkgo.It("should provide basic identity", func(ctx context.Context) { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) e2epv.SkipIfNoDefaultStorageClass(c) *(ss.Spec.Replicas) = 3 @@ -170,7 +170,7 @@ var _ = SIGDescribe("StatefulSet", func() { // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. - ginkgo.It("should adopt matching orphans and release non-matching pods", func() { + ginkgo.It("should adopt matching orphans and release non-matching pods", func(ctx context.Context) { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) e2epv.SkipIfNoDefaultStorageClass(c) *(ss.Spec.Replicas) = 1 @@ -255,7 +255,7 @@ var _ = SIGDescribe("StatefulSet", func() { // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. - ginkgo.It("should not deadlock when a pod's predecessor fails", func() { + ginkgo.It("should not deadlock when a pod's predecessor fails", func(ctx context.Context) { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) e2epv.SkipIfNoDefaultStorageClass(c) *(ss.Spec.Replicas) = 2 @@ -291,7 +291,7 @@ var _ = SIGDescribe("StatefulSet", func() { // This can't be Conformance yet because it depends on a default // StorageClass and a dynamic provisioner. - ginkgo.It("should perform rolling updates and roll backs of template modifications with PVCs", func() { + ginkgo.It("should perform rolling updates and roll backs of template modifications with PVCs", func(ctx context.Context) { ginkgo.By("Creating a new StatefulSet with PVCs") e2epv.SkipIfNoDefaultStorageClass(c) *(ss.Spec.Replicas) = 3 @@ -303,7 +303,7 @@ var _ = SIGDescribe("StatefulSet", func() { Testname: StatefulSet, Rolling Update Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods one at a time when the Pod template changes. The StatefulSet's status MUST indicate the CurrentRevision and UpdateRevision. If the template is changed to match a prior revision, StatefulSet MUST detect this as a rollback instead of creating a new revision. This test does not depend on a preexisting default StorageClass or a dynamic provisioner. */ - framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() { + framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func(ctx context.Context) { ginkgo.By("Creating a new StatefulSet") ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) rollbackTest(c, ns, ss) @@ -314,7 +314,7 @@ var _ = SIGDescribe("StatefulSet", func() { Testname: StatefulSet, Rolling Update with Partition Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress, StatefulSet MUST restore the Pod without violating the Partition. This test does not depend on a preexisting default StorageClass or a dynamic provisioner. */ - framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() { + framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func(ctx context.Context) { ginkgo.By("Creating a new StatefulSet") ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) setHTTPProbe(ss) @@ -506,7 +506,7 @@ var _ = SIGDescribe("StatefulSet", func() { // Do not mark this as Conformance. // The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs. - ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func() { + ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func(ctx context.Context) { ginkgo.By("Creating a new StatefulSet") ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) setHTTPProbe(ss) @@ -584,7 +584,7 @@ var _ = SIGDescribe("StatefulSet", func() { Testname: StatefulSet, Scaling Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up, and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any Pods belonging to the StatefulSet are unhealthy. This test does not depend on a preexisting default StorageClass or a dynamic provisioner. */ - framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow]", func() { + framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow]", func(ctx context.Context) { psLabels := klabels.Set(labels) w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { @@ -694,7 +694,7 @@ var _ = SIGDescribe("StatefulSet", func() { Testname: StatefulSet, Burst Scaling Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling. This test does not depend on a preexisting default StorageClass or a dynamic provisioner. */ - framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods [Slow]", func() { + framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods [Slow]", func(ctx context.Context) { psLabels := klabels.Set(labels) ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) @@ -736,7 +736,7 @@ var _ = SIGDescribe("StatefulSet", func() { Testname: StatefulSet, Recreate Failed Pod Description: StatefulSet MUST delete and recreate Pods it owns that go into a Failed state, such as when they are rejected or evicted by a Node. This test does not depend on a preexisting default StorageClass or a dynamic provisioner. */ - framework.ConformanceIt("Should recreate evicted statefulset", func() { + framework.ConformanceIt("Should recreate evicted statefulset", func(ctx context.Context) { podName := "test-pod" statefulPodName := ssName + "-0" ginkgo.By("Looking for a node to schedule stateful set and pod") @@ -796,7 +796,7 @@ var _ = SIGDescribe("StatefulSet", func() { return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(context.TODO(), options) }, } - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), statefulPodTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, statefulPodTimeout) defer cancel() // we need to get UID from pod in any state and wait until stateful set controller will remove pod at least once _, err = watchtools.Until(ctx, pl.ResourceVersion, lw, func(event watch.Event) (bool, error) { @@ -845,7 +845,7 @@ var _ = SIGDescribe("StatefulSet", func() { Newly created StatefulSet resource MUST have a scale of one. Bring the scale of the StatefulSet resource up to two. StatefulSet scale MUST be at two replicas. */ - framework.ConformanceIt("should have a working scale subresource", func() { + framework.ConformanceIt("should have a working scale subresource", func(ctx context.Context) { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) setHTTPProbe(ss) @@ -905,7 +905,7 @@ var _ = SIGDescribe("StatefulSet", func() { MUST succeed when patching a StatefulSet. It MUST succeed when deleting the StatefulSet via deleteCollection. */ - framework.ConformanceIt("should list, patch and delete a collection of StatefulSets", func() { + framework.ConformanceIt("should list, patch and delete a collection of StatefulSets", func(ctx context.Context) { ssPatchReplicas := int32(2) ssPatchImage := imageutils.GetE2EImage(imageutils.Pause) @@ -974,7 +974,7 @@ var _ = SIGDescribe("StatefulSet", func() { Attempt to read, update and patch its status sub-resource; all mutating sub-resource operations MUST be visible to subsequent reads. */ - framework.ConformanceIt("should validate Statefulset Status endpoints", func() { + framework.ConformanceIt("should validate Statefulset Status endpoints", func(ctx context.Context) { ssClient := c.AppsV1().StatefulSets(ns) labelSelector := "e2e=testing" @@ -1034,7 +1034,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("watching for the statefulset status to be updated") - ctx, cancel := context.WithTimeout(context.Background(), statefulSetTimeout) + ctx, cancel := context.WithTimeout(ctx, statefulSetTimeout) defer cancel() _, err = watchtools.Until(ctx, ssList.ResourceVersion, w, func(event watch.Event) (bool, error) { @@ -1118,7 +1118,7 @@ var _ = SIGDescribe("StatefulSet", func() { // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. - ginkgo.It("should creating a working zookeeper cluster", func() { + ginkgo.It("should creating a working zookeeper cluster", func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(c) appTester.statefulPod = &zookeeperTester{client: c} appTester.run() @@ -1126,7 +1126,7 @@ var _ = SIGDescribe("StatefulSet", func() { // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. - ginkgo.It("should creating a working redis cluster", func() { + ginkgo.It("should creating a working redis cluster", func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(c) appTester.statefulPod = &redisTester{client: c} appTester.run() @@ -1134,7 +1134,7 @@ var _ = SIGDescribe("StatefulSet", func() { // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. - ginkgo.It("should creating a working mysql cluster", func() { + ginkgo.It("should creating a working mysql cluster", func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(c) appTester.statefulPod = &mysqlGaleraTester{client: c} appTester.run() @@ -1142,7 +1142,7 @@ var _ = SIGDescribe("StatefulSet", func() { // Do not mark this as Conformance. // StatefulSet Conformance should not be dependent on specific applications. - ginkgo.It("should creating a working CockroachDB cluster", func() { + ginkgo.It("should creating a working CockroachDB cluster", func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(c) appTester.statefulPod = &cockroachDBTester{client: c} appTester.run() @@ -1151,7 +1151,7 @@ var _ = SIGDescribe("StatefulSet", func() { // Make sure minReadySeconds is honored // Don't mark it as conformance yet - ginkgo.It("MinReadySeconds should be honored when enabled", func() { + ginkgo.It("MinReadySeconds should be honored when enabled", func(ctx context.Context) { ssName := "test-ss" headlessSvcName := "test" // Define StatefulSet Labels @@ -1166,7 +1166,7 @@ var _ = SIGDescribe("StatefulSet", func() { e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 1) }) - ginkgo.It("AvailableReplicas should get updated accordingly when MinReadySeconds is enabled", func() { + ginkgo.It("AvailableReplicas should get updated accordingly when MinReadySeconds is enabled", func(ctx context.Context) { ssName := "test-ss" headlessSvcName := "test" // Define StatefulSet Labels @@ -1239,7 +1239,7 @@ var _ = SIGDescribe("StatefulSet", func() { e2estatefulset.DeleteAllStatefulSets(c, ns) }) - ginkgo.It("should delete PVCs with a WhenDeleted policy", func() { + ginkgo.It("should delete PVCs with a WhenDeleted policy", func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(c) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 @@ -1262,7 +1262,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectNoError(err) }) - ginkgo.It("should delete PVCs with a OnScaledown policy", func() { + ginkgo.It("should delete PVCs with a OnScaledown policy", func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(c) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 @@ -1285,7 +1285,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectNoError(err) }) - ginkgo.It("should delete PVCs after adopting pod (WhenDeleted)", func() { + ginkgo.It("should delete PVCs after adopting pod (WhenDeleted)", func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(c) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 @@ -1316,7 +1316,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectNoError(err) }) - ginkgo.It("should delete PVCs after adopting pod (WhenScaled) [Feature:StatefulSetAutoDeletePVC]", func() { + ginkgo.It("should delete PVCs after adopting pod (WhenScaled) [Feature:StatefulSetAutoDeletePVC]", func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(c) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 diff --git a/test/e2e/apps/ttl_after_finished.go b/test/e2e/apps/ttl_after_finished.go index 807744255b3..ac3e991d0ad 100644 --- a/test/e2e/apps/ttl_after_finished.go +++ b/test/e2e/apps/ttl_after_finished.go @@ -45,7 +45,7 @@ var _ = SIGDescribe("TTLAfterFinished", func() { f := framework.NewDefaultFramework("ttlafterfinished") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It("job should be deleted once it finishes after TTL seconds", func() { + ginkgo.It("job should be deleted once it finishes after TTL seconds", func(ctx context.Context) { testFinishedJob(f) }) }) diff --git a/test/e2e/architecture/conformance.go b/test/e2e/architecture/conformance.go index 5bfdaaefb30..ad371755dc8 100644 --- a/test/e2e/architecture/conformance.go +++ b/test/e2e/architecture/conformance.go @@ -17,6 +17,7 @@ limitations under the License. package architecture import ( + "context" "time" "github.com/onsi/ginkgo/v2" @@ -35,7 +36,7 @@ var _ = SIGDescribe("Conformance Tests", func() { Testname: Conformance tests minimum number of nodes. Description: Conformance tests requires at least two untainted nodes where pods can be scheduled. */ - framework.ConformanceIt("should have at least two untainted nodes", func() { + framework.ConformanceIt("should have at least two untainted nodes", func(ctx context.Context) { ginkgo.By("Getting node addresses") framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute)) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) diff --git a/test/e2e/auth/certificates.go b/test/e2e/auth/certificates.go index d9506a04f7c..61af59027c5 100644 --- a/test/e2e/auth/certificates.go +++ b/test/e2e/auth/certificates.go @@ -56,7 +56,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { The certificatesigningrequests resource must accept a request for a certificate signed by kubernetes.io/kube-apiserver-client. The issued certificate must be valid as a client certificate used to authenticate to the kube-apiserver. */ - ginkgo.It("should support building a client with a CSR", func() { + ginkgo.It("should support building a client with a CSR", func(ctx context.Context) { const commonName = "tester-csr" csrClient := f.ClientSet.CertificatesV1().CertificateSigningRequests() @@ -197,7 +197,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() { The certificatesigningrequests/approval resource must support get, update, patch. The certificatesigningrequests/status resource must support get, update, patch. */ - framework.ConformanceIt("should support CSR API operations", func() { + framework.ConformanceIt("should support CSR API operations", func(ctx context.Context) { // Setup csrVersion := "v1" diff --git a/test/e2e/auth/node_authn.go b/test/e2e/auth/node_authn.go index f9a84e08732..d5708dd32a5 100644 --- a/test/e2e/auth/node_authn.go +++ b/test/e2e/auth/node_authn.go @@ -56,7 +56,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { framework.ExpectNotEqual(len(nodeIPs), 0) }) - ginkgo.It("The kubelet's main port 10250 should reject requests with no credentials", func() { + ginkgo.It("The kubelet's main port 10250 should reject requests with no credentials", func(ctx context.Context) { pod := createNodeAuthTestPod(f) for _, nodeIP := range nodeIPs { // Anonymous authentication is disabled by default @@ -66,7 +66,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { } }) - ginkgo.It("The kubelet can delegate ServiceAccount tokens to the API server", func() { + ginkgo.It("The kubelet can delegate ServiceAccount tokens to the API server", func(ctx context.Context) { ginkgo.By("create a new ServiceAccount for authentication") trueValue := true newSA := &v1.ServiceAccount{ diff --git a/test/e2e/auth/node_authz.go b/test/e2e/auth/node_authz.go index 8336ba9bd0c..4587685d701 100644 --- a/test/e2e/auth/node_authz.go +++ b/test/e2e/auth/node_authz.go @@ -68,14 +68,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { framework.ExpectNoError(err, "failed to create Clientset for the given config: %+v", *config) }) - ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() { + ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) { _, err := c.CoreV1().Secrets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) if !apierrors.IsForbidden(err) { framework.Failf("should be a forbidden error, got %#v", err) } }) - ginkgo.It("Getting an existing secret should exit with the Forbidden error", func() { + ginkgo.It("Getting an existing secret should exit with the Forbidden error", func(ctx context.Context) { ginkgo.By("Create a secret for testing") secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -92,14 +92,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { } }) - ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() { + ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) { _, err := c.CoreV1().ConfigMaps(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) if !apierrors.IsForbidden(err) { framework.Failf("should be a forbidden error, got %#v", err) } }) - ginkgo.It("Getting an existing configmap should exit with the Forbidden error", func() { + ginkgo.It("Getting an existing configmap should exit with the Forbidden error", func(ctx context.Context) { ginkgo.By("Create a configmap for testing") configmap := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -118,7 +118,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { } }) - ginkgo.It("Getting a secret for a workload the node has access to should succeed", func() { + ginkgo.It("Getting a secret for a workload the node has access to should succeed", func(ctx context.Context) { ginkgo.By("Create a secret for testing") secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -181,7 +181,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { framework.ExpectNoError(err, "failed to get secret after trying every %v for %v (%s:%s)", itv, dur, ns, secret.Name) }) - ginkgo.It("A node shouldn't be able to create another node", func() { + ginkgo.It("A node shouldn't be able to create another node", func(ctx context.Context) { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, TypeMeta: metav1.TypeMeta{ @@ -203,7 +203,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { } }) - ginkgo.It("A node shouldn't be able to delete another node", func() { + ginkgo.It("A node shouldn't be able to delete another node", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) err := c.CoreV1().Nodes().Delete(context.TODO(), "foo", metav1.DeleteOptions{}) if !apierrors.IsForbidden(err) { diff --git a/test/e2e/auth/selfsubjectreviews.go b/test/e2e/auth/selfsubjectreviews.go index 9b8de3df66a..9419345e97a 100644 --- a/test/e2e/auth/selfsubjectreviews.go +++ b/test/e2e/auth/selfsubjectreviews.go @@ -41,7 +41,7 @@ var _ = SIGDescribe("SelfSubjectReview [Feature:APISelfSubjectReview]", func() { The selfsubjectreviews resource MUST exist in the /apis/authentication.k8s.io/v1alpha1 discovery document. The selfsubjectreviews resource must support create. */ - ginkgo.It("should support SelfSubjectReview API operations", func() { + ginkgo.It("should support SelfSubjectReview API operations", func(ctx context.Context) { // Setup ssarAPIVersion := "v1alpha1" diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index f7bd947eaa3..5dc8f6327ec 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -56,7 +56,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { f := framework.NewDefaultFramework("svcaccounts") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It("no secret-based service account token should be auto-generated", func() { + ginkgo.It("no secret-based service account token should be auto-generated", func(ctx context.Context) { { ginkgo.By("ensuring no secret-based service account token exists") time.Sleep(10 * time.Second) @@ -75,7 +75,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { Token Mount path. All these three files MUST exist and the Service Account mount path MUST be auto mounted to the Container. */ - framework.ConformanceIt("should mount an API token into pods ", func() { + framework.ConformanceIt("should mount an API token into pods ", func(ctx context.Context) { sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -158,7 +158,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { include test cases 1a,1b,2a,2b and 2c. In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted. */ - framework.ConformanceIt("should allow opting out of API token automount ", func() { + framework.ConformanceIt("should allow opting out of API token automount ", func(ctx context.Context) { var err error trueValue := true @@ -272,7 +272,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { Testname: TokenRequestProjection should mount a projected volume with token using TokenRequest API. Description: Ensure that projected service account token is mounted. */ - framework.ConformanceIt("should mount projected service account token", func() { + framework.ConformanceIt("should mount projected service account token", func(ctx context.Context) { var ( podName = "test-pod-" + string(uuid.NewUUID()) @@ -333,7 +333,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { Containers MUST verify that the projected service account token can be read and has correct file mode set including ownership and permission. */ - ginkgo.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { e2eskipper.SkipIfNodeOSDistroIs("windows") var ( @@ -429,7 +429,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { } }) - ginkgo.It("should support InClusterConfig with token rotation [Slow]", func() { + ginkgo.It("should support InClusterConfig with token rotation [Slow]", func(ctx context.Context) { tenMin := int64(10 * 60) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"}, @@ -528,7 +528,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { endpoints by deploying a Pod that verifies its own token against these endpoints. */ - framework.ConformanceIt("ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer", func() { + framework.ConformanceIt("ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer", func(ctx context.Context) { // Allow the test pod access to the OIDC discovery non-resource URLs. // The role should have already been automatically created as part of the @@ -646,7 +646,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { Listing the ServiceAccounts MUST return the test ServiceAccount with it's patched values. ServiceAccount will be deleted and MUST find a deleted watch event. */ - framework.ConformanceIt("should run through the lifecycle of a ServiceAccount", func() { + framework.ConformanceIt("should run through the lifecycle of a ServiceAccount", func(ctx context.Context) { testNamespaceName := f.Namespace.Name testServiceAccountName := "testserviceaccount" testServiceAccountStaticLabels := map[string]string{"test-serviceaccount-static": "true"} @@ -739,7 +739,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { 2. Recreated if deleted 3. Reconciled if modified */ - framework.ConformanceIt("should guarantee kube-root-ca.crt exist in any namespace", func() { + framework.ConformanceIt("should guarantee kube-root-ca.crt exist in any namespace", func(ctx context.Context) { framework.ExpectNoError(wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{}) if err == nil { @@ -807,7 +807,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { updating the ServiceAccount it MUST succeed and the field MUST equal the new value. */ - framework.ConformanceIt("should update a ServiceAccount", func() { + framework.ConformanceIt("should update a ServiceAccount", func(ctx context.Context) { saClient := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name) saName := "e2e-sa-" + utilrand.String(5) diff --git a/test/e2e/autoscaling/autoscaling_timer.go b/test/e2e/autoscaling/autoscaling_timer.go index f3793a08fe2..5e42c456b42 100644 --- a/test/e2e/autoscaling/autoscaling_timer.go +++ b/test/e2e/autoscaling/autoscaling_timer.go @@ -86,7 +86,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" } }) - ginkgo.It("takes less than 15 minutes", func() { + ginkgo.It("takes less than 15 minutes", func(ctx context.Context) { // Measured over multiple samples, scaling takes 10 +/- 2 minutes, so 15 minutes should be fully sufficient. const timeToWait = 15 * time.Minute diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index fce86e13040..8875f511f6f 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -137,7 +137,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { klog.Infof("Made nodes schedulable again in %v", time.Since(s).String()) }) - ginkgo.It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() { + ginkgo.It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func(ctx context.Context) { perNodeReservation := int(float64(memCapacityMb) * 0.95) replicasPerNode := 10 @@ -160,7 +160,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { defer testCleanup() }) - ginkgo.It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() { + ginkgo.It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func(ctx context.Context) { perNodeReservation := int(float64(memCapacityMb) * 0.95) replicasPerNode := 10 additionalNodes1 := int(math.Ceil(0.7 * maxNodes)) @@ -209,7 +209,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { klog.Infof("Scaled up twice") }) - ginkgo.It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() { + ginkgo.It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func(ctx context.Context) { perNodeReservation := int(float64(memCapacityMb) * 0.7) replicas := int(math.Ceil(maxNodes * 0.7)) totalNodes := maxNodes @@ -237,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { }, scaleDownTimeout)) }) - ginkgo.It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() { + ginkgo.It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func(ctx context.Context) { perPodReservation := int(float64(memCapacityMb) * 0.01) // underutilizedNodes are 10% full underutilizedPerNodeReplicas := 10 @@ -296,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { }, timeout)) }) - ginkgo.It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() { + ginkgo.It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func(ctx context.Context) { fullReservation := int(float64(memCapacityMb) * 0.9) hostPortPodReservation := int(float64(memCapacityMb) * 0.3) totalNodes := maxNodes diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 71252cb86fe..5d4904915f1 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -165,7 +165,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { klog.Infof("Made nodes schedulable again in %v", time.Since(s).String()) }) - ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.By("Creating unschedulable pod") ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout) defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") @@ -210,7 +210,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { gpuType := os.Getenv("TESTED_GPU_TYPE") - ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { + ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") if gpuType == "" { framework.Failf("TEST_GPU_TYPE not defined") @@ -237,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1) }) - ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { + ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") if gpuType == "" { framework.Failf("TEST_GPU_TYPE not defined") @@ -267,7 +267,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 2) }) - ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { + ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") if gpuType == "" { framework.Failf("TEST_GPU_TYPE not defined") @@ -296,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0) }) - ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { + ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") if gpuType == "" { framework.Failf("TEST_GPU_TYPE not defined") @@ -331,7 +331,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) }) }) - ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { // Wait for the situation to stabilize - CA should be running and have up-to-date node readiness info. status, err := waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { return s.ready == s.target && s.ready <= nodeCount @@ -371,7 +371,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectEqual(len(nodes.Items), status.target+unmanagedNodes) }) - ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") ginkgo.By("Creating new node-pool with n1-standard-4 machines") @@ -405,7 +405,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) - ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") ginkgo.By("Creating new node-pool with n1-standard-4 machines") @@ -418,7 +418,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2)) }) - ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false) defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port") @@ -427,7 +427,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) - ginkgo.It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { pods := nodeCount newPods := 2 labels := map[string]string{ @@ -446,7 +446,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) }) - ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.By("creating pods") pods := nodeCount newPods := 1 @@ -467,7 +467,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) }) - ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") volumeLabels := labels.Set{ @@ -539,7 +539,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) }) - ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { labelKey := "cluster-autoscaling-test.special-node" labelValue := "true" @@ -639,7 +639,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector")) }) - ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") ginkgo.By("Creating new node-pool with n1-standard-4 machines") @@ -694,7 +694,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) }) }) - ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() { + ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") increasedSize := manuallyIncreaseClusterSize(f, originalSizes) @@ -716,7 +716,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute)) }) - ginkgo.It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() { + ginkgo.It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { runDrainTest(f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) { ginkgo.By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, @@ -724,7 +724,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) }) - ginkgo.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() { + ginkgo.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) { ginkgo.By("No nodes should be removed") time.Sleep(scaleDownTimeout) @@ -734,7 +734,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) }) - ginkgo.It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() { + ginkgo.It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { runDrainTest(f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) { ginkgo.By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, @@ -742,7 +742,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) }) - ginkgo.It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() { + ginkgo.It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { runDrainTest(f, originalSizes, "kube-system", 2, 1, func(increasedSize int) { ginkgo.By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, @@ -750,7 +750,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) }) - ginkgo.It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { // Provider-specific setup if framework.ProviderIs("gke") { // GKE-specific setup @@ -874,7 +874,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectEqual(newSize, 0) } - ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() { + ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { if framework.ProviderIs("gke") { // In GKE, we can just add a node pool gkeScaleToZero() } else if len(originalSizes) >= 2 { @@ -884,7 +884,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } }) - ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() clusterSize := nodeCount @@ -948,7 +948,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2enode.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout)) }) - ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { defer createPriorityClasses(f)() // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName) @@ -960,7 +960,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size == nodeCount }, time.Second)) }) - ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { defer createPriorityClasses(f)() // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName) @@ -970,7 +970,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size > nodeCount }, time.Second)) }) - ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() { + ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { defer createPriorityClasses(f)() // Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node. cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName) @@ -982,7 +982,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size == nodeCount }, time.Second)) }) - ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() { + ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { defer createPriorityClasses(f)() increasedSize := manuallyIncreaseClusterSize(f, originalSizes) // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node. @@ -993,7 +993,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size == nodeCount }, scaleDownTimeout)) }) - ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() { + ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { defer createPriorityClasses(f)() increasedSize := manuallyIncreaseClusterSize(f, originalSizes) // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node. diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index 00e9063d9ca..ef934e7b388 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -61,7 +61,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Describe("with Custom Metric of type Pod from Stackdriver", func() { - ginkgo.It("should scale down", func() { + ginkgo.It("should scale down", func(ctx context.Context) { initialReplicas := 2 // metric should cause scale down metricValue := int64(100) @@ -80,7 +80,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut tc.Run() }) - ginkgo.It("should scale up with two metrics", func() { + ginkgo.It("should scale up with two metrics", func(ctx context.Context) { initialReplicas := 1 // metric 1 would cause a scale down, if not for metric 2 metric1Value := int64(100) @@ -115,7 +115,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut tc.Run() }) - ginkgo.It("should scale down with Prometheus", func() { + ginkgo.It("should scale down with Prometheus", func(ctx context.Context) { initialReplicas := 2 // metric should cause scale down metricValue := int64(100) @@ -136,7 +136,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut }) ginkgo.Describe("with Custom Metric of type Object from Stackdriver", func() { - ginkgo.It("should scale down", func() { + ginkgo.It("should scale down", func(ctx context.Context) { initialReplicas := 2 // metric should cause scale down metricValue := int64(100) @@ -157,7 +157,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut tc.Run() }) - ginkgo.It("should scale down to 0", func() { + ginkgo.It("should scale down to 0", func(ctx context.Context) { initialReplicas := 2 // metric should cause scale down metricValue := int64(0) @@ -180,7 +180,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut }) ginkgo.Describe("with External Metric from Stackdriver", func() { - ginkgo.It("should scale down with target value", func() { + ginkgo.It("should scale down with target value", func(ctx context.Context) { initialReplicas := 2 // metric should cause scale down metricValue := externalMetricValue @@ -204,7 +204,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut tc.Run() }) - ginkgo.It("should scale down with target average value", func() { + ginkgo.It("should scale down with target average value", func(ctx context.Context) { initialReplicas := 2 // metric should cause scale down metricValue := externalMetricValue @@ -228,7 +228,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut tc.Run() }) - ginkgo.It("should scale up with two metrics", func() { + ginkgo.It("should scale up with two metrics", func(ctx context.Context) { initialReplicas := 1 // metric 1 would cause a scale down, if not for metric 2 metric1Value := externalMetricValue @@ -271,7 +271,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut }) ginkgo.Describe("with multiple metrics of different types", func() { - ginkgo.It("should scale up when one metric is missing (Pod and External metrics)", func() { + ginkgo.It("should scale up when one metric is missing (Pod and External metrics)", func(ctx context.Context) { initialReplicas := 1 // First metric a pod metric which is missing. // Second metric is external metric which is present, it should cause scale up. @@ -300,7 +300,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut tc.Run() }) - ginkgo.It("should scale up when one metric is missing (Resource and Object metrics)", func() { + ginkgo.It("should scale up when one metric is missing (Resource and Object metrics)", func(ctx context.Context) { initialReplicas := 1 metricValue := int64(100) // First metric a resource metric which is missing (no consumption). @@ -320,7 +320,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut tc.Run() }) - ginkgo.It("should not scale down when one metric is missing (Container Resource and External Metrics)", func() { + ginkgo.It("should not scale down when one metric is missing (Container Resource and External Metrics)", func(ctx context.Context) { initialReplicas := 2 // First metric a container resource metric which is missing. // Second metric is external metric which is present, it should cause scale down if the first metric wasn't missing. @@ -350,7 +350,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut tc.Run() }) - ginkgo.It("should not scale down when one metric is missing (Pod and Object Metrics)", func() { + ginkgo.It("should not scale down when one metric is missing (Pod and Object Metrics)", func(ctx context.Context) { initialReplicas := 2 metricValue := int64(100) // First metric an object metric which is missing. diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 3682fec3665..bbe789de626 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -104,7 +104,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() { // This test is separated because it is slow and need to run serially. // Will take around 5 minutes to run on a 4 nodes cluster. - ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() { + ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func(ctx context.Context) { numNodes, err := e2enode.TotalRegistered(c) framework.ExpectNoError(err) @@ -168,7 +168,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() { framework.ExpectNoError(err) }) - ginkgo.It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() { + ginkgo.It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func(ctx context.Context) { ginkgo.By("Replace the dns autoscaling parameters with testing parameters") err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1))) diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index 1afe8d53b5f..41e0878cc4f 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "time" "github.com/onsi/ginkgo/v2" @@ -46,47 +47,47 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C f.NamespacePodSecurityEnforceLevel = api.LevelBaseline ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() { - ginkgo.It(titleUp+titleAverageUtilization, func() { + ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f) }) - ginkgo.It(titleDown+titleAverageUtilization, func() { + ginkgo.It(titleDown+titleAverageUtilization, func(ctx context.Context) { scaleDown("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f) }) - ginkgo.It(titleUp+titleAverageValue, func() { + ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, false, f) }) }) ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() { - ginkgo.It(titleUp+titleAverageUtilization, func() { + ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, f) }) - ginkgo.It(titleUp+titleAverageValue, func() { + ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, f) }) }) ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() { - ginkgo.It(titleUp, func() { + ginkgo.It(titleUp, func(ctx context.Context) { scaleUp("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) }) - ginkgo.It(titleDown, func() { + ginkgo.It(titleDown, func(ctx context.Context) { scaleDown("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) }) }) // These tests take ~20 minutes each. ginkgo.Describe("[Serial] [Slow] ReplicationController", func() { - ginkgo.It(titleUp+" and verify decision stability", func() { + ginkgo.It(titleUp+" and verify decision stability", func(ctx context.Context) { scaleUp("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f) }) - ginkgo.It(titleDown+" and verify decision stability", func() { + ginkgo.It(titleDown+" and verify decision stability", func(ctx context.Context) { scaleDown("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f) }) }) ginkgo.Describe("ReplicationController light", func() { - ginkgo.It("Should scale from 1 pod to 2 pods", func() { + ginkgo.It("Should scale from 1 pod to 2 pods", func(ctx context.Context) { st := &HPAScaleTest{ initPods: 1, initCPUTotal: 150, @@ -100,7 +101,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C } st.run("rc-light", e2eautoscaling.KindRC, f) }) - ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func() { + ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func(ctx context.Context) { st := &HPAScaleTest{ initPods: 2, initCPUTotal: 50, @@ -118,18 +119,18 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() { // ContainerResource CPU autoscaling on idle sidecar - ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func() { + ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func(ctx context.Context) { scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) }) // ContainerResource CPU autoscaling on busy sidecar - ginkgo.It("Should not scale up on a busy sidecar with an idle application", func() { + ginkgo.It("Should not scale up on a busy sidecar with an idle application", func(ctx context.Context) { doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, true, f) }) }) ginkgo.Describe("CustomResourceDefinition", func() { - ginkgo.It("Should scale with a CRD targetRef", func() { + ginkgo.It("Should scale with a CRD targetRef", func(ctx context.Context) { scaleTest := &HPAScaleTest{ initPods: 1, initCPUTotal: 150, @@ -151,19 +152,19 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: M f.NamespacePodSecurityEnforceLevel = api.LevelBaseline ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() { - ginkgo.It(titleUp+titleAverageUtilization, func() { + ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, false, f) }) - ginkgo.It(titleUp+titleAverageValue, func() { + ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, false, f) }) }) ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() { - ginkgo.It(titleUp+titleAverageUtilization, func() { + ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, f) }) - ginkgo.It(titleUp+titleAverageValue, func() { + ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) { scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, f) }) }) diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go b/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go index 882a4ef6811..7447c16a50f 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "time" autoscalingv2 "k8s.io/api/autoscaling/v2" @@ -53,7 +54,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n waitBuffer := 1 * time.Minute ginkgo.Describe("with short downscale stabilization window", func() { - ginkgo.It("should scale down soon after the stabilization period", func() { + ginkgo.It("should scale down soon after the stabilization period", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 1 initCPUUsageTotal := initPods * usageForSingleReplica @@ -94,7 +95,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n }) ginkgo.Describe("with long upscale stabilization window", func() { - ginkgo.It("should scale up only after the stabilization period", func() { + ginkgo.It("should scale up only after the stabilization period", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 2 initCPUUsageTotal := initPods * usageForSingleReplica @@ -135,7 +136,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n }) ginkgo.Describe("with autoscaling disabled", func() { - ginkgo.It("shouldn't scale up", func() { + ginkgo.It("shouldn't scale up", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 1 initCPUUsageTotal := initPods * usageForSingleReplica @@ -170,7 +171,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas) }) - ginkgo.It("shouldn't scale down", func() { + ginkgo.It("shouldn't scale down", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 3 initCPUUsageTotal := initPods * usageForSingleReplica @@ -213,7 +214,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n targetCPUUtilizationPercent := 25 usageForSingleReplica := 45 - ginkgo.It("should scale up no more than given number of Pods per minute", func() { + ginkgo.It("should scale up no more than given number of Pods per minute", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 1 initCPUUsageTotal := initPods * usageForSingleReplica @@ -255,7 +256,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline) }) - ginkgo.It("should scale down no more than given number of Pods per minute", func() { + ginkgo.It("should scale down no more than given number of Pods per minute", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 3 initCPUUsageTotal := initPods * usageForSingleReplica @@ -303,7 +304,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n targetCPUUtilizationPercent := 25 usageForSingleReplica := 45 - ginkgo.It("should scale up no more than given percentage of current Pods per minute", func() { + ginkgo.It("should scale up no more than given percentage of current Pods per minute", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 2 initCPUUsageTotal := initPods * usageForSingleReplica @@ -346,7 +347,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline) }) - ginkgo.It("should scale down no more than given percentage of current Pods per minute", func() { + ginkgo.It("should scale down no more than given percentage of current Pods per minute", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 7 initCPUUsageTotal := initPods * usageForSingleReplica @@ -393,7 +394,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n ginkgo.Describe("with both scale up and down controls configured", func() { waitBuffer := 2 * time.Minute - ginkgo.It("should keep recommendation within the range over two stabilization windows", func() { + ginkgo.It("should keep recommendation within the range over two stabilization windows", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 2 initCPUUsageTotal := initPods * usageForSingleReplica @@ -444,7 +445,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline) }) - ginkgo.It("should keep recommendation within the range with stabilization window and pod limit rate", func() { + ginkgo.It("should keep recommendation within the range with stabilization window and pod limit rate", func(ctx context.Context) { ginkgo.By("setting up resource consumer and HPA") initPods := 2 initCPUUsageTotal := initPods * usageForSingleReplica diff --git a/test/e2e/cloud/gcp/addon_update.go b/test/e2e/cloud/gcp/addon_update.go index 540d332c222..38c3ed15136 100644 --- a/test/e2e/cloud/gcp/addon_update.go +++ b/test/e2e/cloud/gcp/addon_update.go @@ -241,7 +241,7 @@ var _ = SIGDescribe("Addon update", func() { }) // WARNING: the test is not parallel-friendly! - ginkgo.It("should propagate add-on file changes [Slow]", func() { + ginkgo.It("should propagate add-on file changes [Slow]", func(ctx context.Context) { // This test requires: // - SSH // - master access diff --git a/test/e2e/cloud/gcp/apps/stateful_apps.go b/test/e2e/cloud/gcp/apps/stateful_apps.go index 9c31b20a407..69b625a7047 100644 --- a/test/e2e/cloud/gcp/apps/stateful_apps.go +++ b/test/e2e/cloud/gcp/apps/stateful_apps.go @@ -17,6 +17,8 @@ limitations under the License. package apps import ( + "context" + "k8s.io/kubernetes/test/e2e/cloud/gcp/common" "k8s.io/kubernetes/test/e2e/framework" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -40,7 +42,7 @@ var _ = SIGDescribe("stateful Upgrade [Feature:StatefulUpgrade]", func() { testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) ginkgo.Describe("stateful upgrade", func() { - ginkgo.It("should maintain a functioning cluster", func() { + ginkgo.It("should maintain a functioning cluster", func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(f.ClientSet) upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go b/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go index ba3f5b155ab..dea4951f0b3 100644 --- a/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go +++ b/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go @@ -17,6 +17,8 @@ limitations under the License. package auth import ( + "context" + "k8s.io/kubernetes/test/e2e/cloud/gcp/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/upgrades" @@ -37,7 +39,7 @@ var _ = SIGDescribe("ServiceAccount admission controller migration [Feature:Boun testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) ginkgo.Describe("master upgrade", func() { - ginkgo.It("should maintain a functioning cluster", func() { + ginkgo.It("should maintain a functioning cluster", func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/cluster_upgrade.go b/test/e2e/cloud/gcp/cluster_upgrade.go index eeed66f1dbb..0cda019fc37 100644 --- a/test/e2e/cloud/gcp/cluster_upgrade.go +++ b/test/e2e/cloud/gcp/cluster_upgrade.go @@ -17,6 +17,8 @@ limitations under the License. package gcp import ( + "context" + "k8s.io/kubernetes/test/e2e/cloud/gcp/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/upgrades" @@ -58,7 +60,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { // Create the frameworks here because we can only create them // in a "Describe". ginkgo.Describe("master upgrade", func() { - ginkgo.It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() { + ginkgo.It("should maintain a functioning cluster [Feature:MasterUpgrade]", func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -75,7 +77,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { }) ginkgo.Describe("cluster upgrade", func() { - ginkgo.It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() { + ginkgo.It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -95,7 +97,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() { testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) ginkgo.Describe("cluster downgrade", func() { - ginkgo.It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() { + ginkgo.It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/gke_node_pools.go b/test/e2e/cloud/gcp/gke_node_pools.go index 0b040bb29c9..e9a81005d7e 100644 --- a/test/e2e/cloud/gcp/gke_node_pools.go +++ b/test/e2e/cloud/gcp/gke_node_pools.go @@ -17,6 +17,7 @@ limitations under the License. package gcp import ( + "context" "fmt" "os/exec" @@ -37,7 +38,7 @@ var _ = SIGDescribe("GKE node pools [Feature:GKENodePool]", func() { e2eskipper.SkipUnlessProviderIs("gke") }) - ginkgo.It("should create a cluster with multiple node pools [Feature:GKENodePool]", func() { + ginkgo.It("should create a cluster with multiple node pools [Feature:GKENodePool]", func(ctx context.Context) { framework.Logf("Start create node pool test") testCreateDeleteNodePool(f, "test-pool") }) diff --git a/test/e2e/cloud/gcp/ha_master.go b/test/e2e/cloud/gcp/ha_master.go index 299715a635b..987bc7409ee 100644 --- a/test/e2e/cloud/gcp/ha_master.go +++ b/test/e2e/cloud/gcp/ha_master.go @@ -227,7 +227,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { verifyRCs(c, ns, existingRCs) } - ginkgo.It("survive addition/removal replicas same zone [Serial][Disruptive]", func() { + ginkgo.It("survive addition/removal replicas same zone [Serial][Disruptive]", func(ctx context.Context) { zone := framework.TestContext.CloudConfig.Zone step(None, "") numAdditionalReplicas := 2 @@ -239,7 +239,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { } }) - ginkgo.It("survive addition/removal replicas different zones [Serial][Disruptive]", func() { + ginkgo.It("survive addition/removal replicas different zones [Serial][Disruptive]", func(ctx context.Context) { zone := framework.TestContext.CloudConfig.Zone region := findRegionForZone(zone) zones := findZonesForRegion(region) @@ -257,7 +257,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { } }) - ginkgo.It("survive addition/removal replicas multizone workers [Serial][Disruptive]", func() { + ginkgo.It("survive addition/removal replicas multizone workers [Serial][Disruptive]", func(ctx context.Context) { zone := framework.TestContext.CloudConfig.Zone region := findRegionForZone(zone) zones := findZonesForRegion(region) diff --git a/test/e2e/cloud/gcp/kubelet_security.go b/test/e2e/cloud/gcp/kubelet_security.go index be9a20d9f11..e88de971dc6 100644 --- a/test/e2e/cloud/gcp/kubelet_security.go +++ b/test/e2e/cloud/gcp/kubelet_security.go @@ -17,6 +17,7 @@ limitations under the License. package gcp import ( + "context" "fmt" "net" "net/http" @@ -47,7 +48,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() { }) // make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy - ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() { + ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func(ctx context.Context) { result, err := e2ekubelet.ProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort) framework.ExpectNoError(err) @@ -55,7 +56,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() { result.StatusCode(&statusCode) framework.ExpectNotEqual(statusCode, http.StatusOK) }) - ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() { + ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func(ctx context.Context) { result, err := e2ekubelet.ProxyRequest(f.ClientSet, nodeName, "containers/", 4194) framework.ExpectNoError(err) @@ -68,7 +69,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() { disabledPorts := []int{ports.KubeletReadOnlyPort, 4194} for _, port := range disabledPorts { port := port - ginkgo.It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() { + ginkgo.It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func(ctx context.Context) { portClosedTest(f, node, port) }) } diff --git a/test/e2e/cloud/gcp/network/kube_proxy_migration.go b/test/e2e/cloud/gcp/network/kube_proxy_migration.go index e1cf81db303..2e51911b790 100644 --- a/test/e2e/cloud/gcp/network/kube_proxy_migration.go +++ b/test/e2e/cloud/gcp/network/kube_proxy_migration.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "k8s.io/kubernetes/test/e2e/cloud/gcp/common" @@ -55,7 +56,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]" }) ginkgo.Describe("Upgrade kube-proxy from static pods to a DaemonSet", func() { - ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() { + ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -73,7 +74,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]" }) ginkgo.Describe("Downgrade kube-proxy from a DaemonSet to static pods", func() { - ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() { + ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/node/gpu.go b/test/e2e/cloud/gcp/node/gpu.go index 0d7a37f8eae..4c668f794e5 100644 --- a/test/e2e/cloud/gcp/node/gpu.go +++ b/test/e2e/cloud/gcp/node/gpu.go @@ -17,6 +17,8 @@ limitations under the License. package node import ( + "context" + "k8s.io/kubernetes/test/e2e/cloud/gcp/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/upgrades" @@ -37,7 +39,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) ginkgo.Describe("master upgrade", func() { - ginkgo.It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func() { + ginkgo.It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -50,7 +52,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { }) }) ginkgo.Describe("cluster upgrade", func() { - ginkgo.It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func() { + ginkgo.It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -63,7 +65,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { }) }) ginkgo.Describe("cluster downgrade", func() { - ginkgo.It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() { + ginkgo.It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/node_lease.go b/test/e2e/cloud/gcp/node_lease.go index dc48456441e..336f8ff21e9 100644 --- a/test/e2e/cloud/gcp/node_lease.go +++ b/test/e2e/cloud/gcp/node_lease.go @@ -101,7 +101,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { framework.ExpectNoError(err) }) - ginkgo.It("node lease should be deleted when corresponding node is deleted", func() { + ginkgo.It("node lease should be deleted when corresponding node is deleted", func(ctx context.Context) { leaseClient := c.CoordinationV1().Leases(v1.NamespaceNodeLease) err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/reboot.go b/test/e2e/cloud/gcp/reboot.go index 57e50e1d3db..152120ab2c9 100644 --- a/test/e2e/cloud/gcp/reboot.go +++ b/test/e2e/cloud/gcp/reboot.go @@ -94,25 +94,25 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { f = framework.NewDefaultFramework("reboot") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("each node by ordering clean reboot and ensure they function upon restart", func() { + ginkgo.It("each node by ordering clean reboot and ensure they function upon restart", func(ctx context.Context) { // clean shutdown and restart // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted. testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &", nil) }) - ginkgo.It("each node by ordering unclean reboot and ensure they function upon restart", func() { + ginkgo.It("each node by ordering unclean reboot and ensure they function upon restart", func(ctx context.Context) { // unclean shutdown and restart // We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown. testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil) }) - ginkgo.It("each node by triggering kernel panic and ensure they function upon restart", func() { + ginkgo.It("each node by triggering kernel panic and ensure they function upon restart", func(ctx context.Context) { // kernel panic // We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered. testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil) }) - ginkgo.It("each node by switching off the network interface and ensure they function upon switch on", func() { + ginkgo.It("each node by switching off the network interface and ensure they function upon switch on", func(ctx context.Context) { // switch the network interface off for a while to simulate a network outage // We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down. cmd := "nohup sh -c '" + @@ -133,7 +133,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { testReboot(f.ClientSet, cmd, nil) }) - ginkgo.It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() { + ginkgo.It("each node by dropping all inbound packets for a while and ensure they function afterwards", func(ctx context.Context) { // tell the firewall to drop all inbound packets for a while // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets. // We still accept packages send from localhost to prevent monit from restarting kubelet. @@ -141,7 +141,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { testReboot(f.ClientSet, dropPacketsScript("INPUT", tmpLogPath), catLogHook(tmpLogPath)) }) - ginkgo.It("each node by dropping all outbound packets for a while and ensure they function afterwards", func() { + ginkgo.It("each node by dropping all outbound packets for a while and ensure they function afterwards", func(ctx context.Context) { // tell the firewall to drop all outbound packets for a while // We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets. // We still accept packages send to localhost to prevent monit from restarting kubelet. diff --git a/test/e2e/cloud/gcp/recreate_node.go b/test/e2e/cloud/gcp/recreate_node.go index 265e6dabd9d..75f33429f68 100644 --- a/test/e2e/cloud/gcp/recreate_node.go +++ b/test/e2e/cloud/gcp/recreate_node.go @@ -91,7 +91,7 @@ var _ = SIGDescribe("Recreate [Feature:Recreate]", func() { } }) - ginkgo.It("recreate nodes and ensure they function upon restart", func() { + ginkgo.It("recreate nodes and ensure they function upon restart", func(ctx context.Context) { testRecreate(f.ClientSet, ps, systemNamespace, originalNodes, originalPodNames) }) }) diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index dbbc70074fd..b49c2e5ab34 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -112,7 +112,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should be able to delete nodes", func() { + ginkgo.It("should be able to delete nodes", func(ctx context.Context) { // Create a replication controller for a service that serves its hostname. // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-delete-node" @@ -142,7 +142,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { }) // TODO: Bug here - testName is not correct - ginkgo.It("should be able to add nodes", func() { + ginkgo.It("should be able to add nodes", func(ctx context.Context) { // Create a replication controller for a service that serves its hostname. // The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname name := "my-hostname-add-node" diff --git a/test/e2e/cloud/gcp/restart.go b/test/e2e/cloud/gcp/restart.go index 1b76e907b30..3d23ebe3635 100644 --- a/test/e2e/cloud/gcp/restart.go +++ b/test/e2e/cloud/gcp/restart.go @@ -17,6 +17,7 @@ limitations under the License. package gcp import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -87,7 +88,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { } }) - ginkgo.It("should restart all nodes and ensure all nodes and pods recover", func() { + ginkgo.It("should restart all nodes and ensure all nodes and pods recover", func(ctx context.Context) { ginkgo.By("restarting all of the nodes") err := common.RestartNodes(f.ClientSet, originalNodes) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index 76e77de7bba..8086c27974a 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { c = f.ClientSet }) - ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func() { + ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func(ctx context.Context) { ginkgo.By("deleting a node on the cloud provider") nodeToDelete, err := e2enode.GetRandomReadySchedulableNode(c) diff --git a/test/e2e/common/network/networking.go b/test/e2e/common/network/networking.go index 6164f37c390..3f8d463399a 100644 --- a/test/e2e/common/network/networking.go +++ b/test/e2e/common/network/networking.go @@ -17,6 +17,8 @@ limitations under the License. package network import ( + "context" + "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -79,7 +81,7 @@ var _ = SIGDescribe("Networking", func() { Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes. The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ - framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func() { + framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func(ctx context.Context) { config := e2enetwork.NewCoreNetworkingTestConfig(f, false) checkPodToPodConnectivity(config, "http", e2enetwork.EndpointHTTPPort) }) @@ -90,7 +92,7 @@ var _ = SIGDescribe("Networking", func() { Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes. The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ - framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func() { + framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func(ctx context.Context) { config := e2enetwork.NewCoreNetworkingTestConfig(f, false) checkPodToPodConnectivity(config, "udp", e2enetwork.EndpointUDPPort) }) @@ -102,7 +104,7 @@ var _ = SIGDescribe("Networking", func() { The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. This test is marked LinuxOnly it breaks when using Overlay networking with Windows. */ - framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func(ctx context.Context) { config := e2enetwork.NewCoreNetworkingTestConfig(f, true) for _, endpointPod := range config.EndpointPods { err := config.DialFromNode("http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -119,7 +121,7 @@ var _ = SIGDescribe("Networking", func() { The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. This test is marked LinuxOnly it breaks when using Overlay networking with Windows. */ - framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func(ctx context.Context) { config := e2enetwork.NewCoreNetworkingTestConfig(f, true) for _, endpointPod := range config.EndpointPods { err := config.DialFromNode("udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -130,13 +132,13 @@ var _ = SIGDescribe("Networking", func() { }) // [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes - ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func() { + ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) checkPodToPodConnectivity(config, "sctp", e2enetwork.EndpointSCTPPort) }) // [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes - ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func() { + ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) { ginkgo.Skip("Skipping SCTP node to pod test until DialFromNode supports SCTP #96482") config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) for _, endpointPod := range config.EndpointPods { diff --git a/test/e2e/common/node/configmap.go b/test/e2e/common/node/configmap.go index 975e72f73c9..868e762a5f0 100644 --- a/test/e2e/common/node/configmap.go +++ b/test/e2e/common/node/configmap.go @@ -42,7 +42,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap, from environment field Description: Create a Pod with an environment variable value set using a value from ConfigMap. A ConfigMap value MUST be accessible in the container environment. */ - framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func() { + framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func(ctx context.Context) { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) @@ -90,7 +90,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap, from environment variables Description: Create a Pod with a environment source from ConfigMap. All ConfigMap values MUST be available as environment variables in the container. */ - framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() { + framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func(ctx context.Context) { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) @@ -135,12 +135,12 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap, with empty-key Description: Attempt to create a ConfigMap with an empty key. The creation MUST fail. */ - framework.ConformanceIt("should fail to create ConfigMap with empty key", func() { + framework.ConformanceIt("should fail to create ConfigMap with empty key", func(ctx context.Context) { configMap, err := newConfigMapWithEmptyKey(f) framework.ExpectError(err, "created configMap %q with empty key in namespace %q", configMap.Name, f.Namespace.Name) }) - ginkgo.It("should update ConfigMap successfully", func() { + ginkgo.It("should update ConfigMap successfully", func(ctx context.Context) { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating ConfigMap %v/%v", f.Namespace.Name, configMap.Name)) @@ -166,7 +166,7 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Attempt to create a ConfigMap. Patch the created ConfigMap. Fetching the ConfigMap MUST reflect changes. By fetching all the ConfigMaps via a Label selector it MUST find the ConfigMap by it's static label and updated value. The ConfigMap must be deleted by Collection. */ - framework.ConformanceIt("should run through a ConfigMap lifecycle", func() { + framework.ConformanceIt("should run through a ConfigMap lifecycle", func(ctx context.Context) { testNamespaceName := f.Namespace.Name testConfigMapName := "test-configmap" + string(uuid.NewUUID()) diff --git a/test/e2e/common/node/container_probe.go b/test/e2e/common/node/container_probe.go index f0844b96bba..7e3342fc7f4 100644 --- a/test/e2e/common/node/container_probe.go +++ b/test/e2e/common/node/container_probe.go @@ -69,7 +69,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod readiness probe, with initial delay Description: Create a Pod that is configured with a initial delay set on the readiness probe. Check the Pod Start time to compare to the initial delay. The Pod MUST be ready only after the specified initial delay. */ - framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() { + framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func(ctx context.Context) { containerName := "test-webserver" p := podClient.Create(testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80)) e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout) @@ -105,7 +105,7 @@ var _ = SIGDescribe("Probing container", func() { Description: Create a Pod with a readiness probe that fails consistently. When this Pod is created, then the Pod MUST never be ready, never be running and restart count MUST be zero. */ - framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() { + framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func(ctx context.Context) { p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80)) gomega.Consistently(func() (bool, error) { p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) @@ -132,7 +132,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using local file, restart Description: Create a Pod with liveness probe that uses ExecAction handler to cat /temp/health file. The Container deletes the file /temp/health after 10 second, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1. */ - framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() { + framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: execHandler([]string{"cat", "/tmp/health"}), @@ -149,7 +149,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using local file, no restart Description: Pod is created with liveness probe that uses 'exec' command to cat /temp/health file. Liveness probe MUST not fail to check health and the restart count should remain 0. */ - framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() { + framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: execHandler([]string{"cat", "/tmp/health"}), @@ -166,7 +166,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using http endpoint, restart Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. */ - framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() { + framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: httpGetHandler("/healthz", 8080), InitialDelaySeconds: 15, @@ -181,7 +181,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using tcp socket, no restart Description: A Pod is created with liveness probe on tcp socket 8080. The http handler on port 8080 will return http errors after 10 seconds, but the socket will remain open. Liveness probe MUST not fail to check health and the restart count should remain 0. */ - framework.ConformanceIt("should *not* be restarted with a tcp:8080 liveness probe [NodeConformance]", func() { + framework.ConformanceIt("should *not* be restarted with a tcp:8080 liveness probe [NodeConformance]", func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: tcpSocketHandler(8080), InitialDelaySeconds: 15, @@ -196,7 +196,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using http endpoint, multiple restarts (slow) Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment every time health check fails, measure up to 5 restart. */ - framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func() { + framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: httpGetHandler("/healthz", 8080), InitialDelaySeconds: 5, @@ -212,7 +212,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using http endpoint, failure Description: A Pod is created with liveness probe on http endpoint '/'. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero. */ - framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() { + framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: httpGetHandler("/", 80), InitialDelaySeconds: 15, @@ -228,7 +228,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, container exec timeout, restart Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod. */ - ginkgo.It("should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func() { + ginkgo.It("should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: execHandler([]string{"/bin/sh", "-c", "sleep 10"}), @@ -245,7 +245,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod readiness probe, container exec timeout, not ready Description: A Pod is created with readiness probe with a Exec action on the Pod. If the readiness probe call does not return within the timeout specified, readiness probe MUST not be Ready. */ - ginkgo.It("should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func() { + ginkgo.It("should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 600"} readinessProbe := &v1.Probe{ ProbeHandler: execHandler([]string{"/bin/sh", "-c", "sleep 10"}), @@ -262,7 +262,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, container exec timeout, restart Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod. When ExecProbeTimeout feature gate is disabled and cluster is using dockershim, the timeout is ignored BUT a failing liveness probe MUST restart the Pod. */ - ginkgo.It("should be restarted with a failing exec liveness probe that took longer than the timeout", func() { + ginkgo.It("should be restarted with a failing exec liveness probe that took longer than the timeout", func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: execHandler([]string{"/bin/sh", "-c", "sleep 10 & exit 1"}), @@ -279,7 +279,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod http liveness probe, redirected to a local address Description: A Pod is created with liveness probe on http endpoint /redirect?loc=healthz. The http handler on the /redirect will redirect to the /healthz endpoint, which will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. */ - ginkgo.It("should be restarted with a local redirect http liveness probe", func() { + ginkgo.It("should be restarted with a local redirect http liveness probe", func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: httpGetHandler("/redirect?loc="+url.QueryEscape("/healthz"), 8080), InitialDelaySeconds: 15, @@ -294,7 +294,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod http liveness probe, redirected to a non-local address Description: A Pod is created with liveness probe on http endpoint /redirect with a redirect to http://0.0.0.0/. The http handler on the /redirect should not follow the redirect, but instead treat it as a success and generate an event. */ - ginkgo.It("should *not* be restarted with a non-local redirect http liveness probe", func() { + ginkgo.It("should *not* be restarted with a non-local redirect http liveness probe", func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: httpGetHandler("/redirect?loc="+url.QueryEscape("http://0.0.0.0/"), 8080), InitialDelaySeconds: 15, @@ -318,7 +318,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod startup probe restart Description: A Pod is created with a failing startup probe. The Pod MUST be killed and restarted incrementing restart count to 1, even if liveness would succeed. */ - ginkgo.It("should be restarted startup probe fails", func() { + ginkgo.It("should be restarted startup probe fails", func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: v1.ProbeHandler{ @@ -347,7 +347,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe delayed (long) by startup probe Description: A Pod is created with failing liveness and startup probes. Liveness probe MUST NOT fail until startup probe expires. */ - ginkgo.It("should *not* be restarted by liveness probe because startup probe delays it", func() { + ginkgo.It("should *not* be restarted by liveness probe because startup probe delays it", func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: v1.ProbeHandler{ @@ -376,7 +376,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe fails after startup success Description: A Pod is created with failing liveness probe and delayed startup probe that uses 'exec' command to cat /temp/health file. The Container is started by creating /tmp/startup after 10 seconds, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1. */ - ginkgo.It("should be restarted by liveness probe after startup probe enables it", func() { + ginkgo.It("should be restarted by liveness probe after startup probe enables it", func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 10; echo ok >/tmp/startup; sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: v1.ProbeHandler{ @@ -405,7 +405,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod readiness probe, delayed by startup probe Description: A Pod is created with startup and readiness probes. The Container is started by creating /tmp/startup after 45 seconds, delaying the ready state by this amount of time. This is similar to the "Pod readiness probe, with initial delay" test. */ - ginkgo.It("should be ready immediately after startupProbe succeeds", func() { + ginkgo.It("should be ready immediately after startupProbe succeeds", func(ctx context.Context) { // Probe workers sleep at Kubelet start for a random time which is at most PeriodSeconds // this test requires both readiness and startup workers running before updating statuses // to avoid flakes, ensure sleep before startup (32s) > readinessProbe.PeriodSeconds @@ -460,7 +460,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Set terminationGracePeriodSeconds for livenessProbe Description: A pod with a long terminationGracePeriod is created with a shorter livenessProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used. */ - ginkgo.It("should override timeoutGracePeriodSeconds when LivenessProbe field is set [Feature:ProbeTerminationGracePeriod]", func() { + ginkgo.It("should override timeoutGracePeriodSeconds when LivenessProbe field is set [Feature:ProbeTerminationGracePeriod]", func(ctx context.Context) { pod := e2epod.NewAgnhostPod(f.Namespace.Name, "liveness-override-"+string(uuid.NewUUID()), nil, nil, nil, "/bin/sh", "-c", "sleep 1000") longGracePeriod := int64(500) pod.Spec.TerminationGracePeriodSeconds = &longGracePeriod @@ -488,7 +488,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Set terminationGracePeriodSeconds for startupProbe Description: A pod with a long terminationGracePeriod is created with a shorter startupProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used. */ - ginkgo.It("should override timeoutGracePeriodSeconds when StartupProbe field is set [Feature:ProbeTerminationGracePeriod]", func() { + ginkgo.It("should override timeoutGracePeriodSeconds when StartupProbe field is set [Feature:ProbeTerminationGracePeriod]", func(ctx context.Context) { pod := e2epod.NewAgnhostPod(f.Namespace.Name, "startup-override-"+string(uuid.NewUUID()), nil, nil, nil, "/bin/sh", "-c", "sleep 1000") longGracePeriod := int64(500) pod.Spec.TerminationGracePeriodSeconds = &longGracePeriod @@ -521,7 +521,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using grpc call, success Description: A Pod is created with liveness probe on grpc service. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero. */ - ginkgo.It("should *not* be restarted with a GRPC liveness probe [NodeConformance]", func() { + ginkgo.It("should *not* be restarted with a GRPC liveness probe [NodeConformance]", func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: v1.ProbeHandler{ GRPC: &v1.GRPCAction{ @@ -544,7 +544,7 @@ var _ = SIGDescribe("Probing container", func() { Description: A Pod is created with liveness probe on grpc service. Liveness probe on this endpoint should fail because of wrong probe port. When liveness probe does fail then the restart count should +1. */ - ginkgo.It("should be restarted with a GRPC liveness probe [NodeConformance]", func() { + ginkgo.It("should be restarted with a GRPC liveness probe [NodeConformance]", func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: v1.ProbeHandler{ GRPC: &v1.GRPCAction{ @@ -559,7 +559,7 @@ var _ = SIGDescribe("Probing container", func() { RunLivenessTest(f, pod, 1, defaultObservationTimeout) }) - ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func() { + ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func(ctx context.Context) { podName := "probe-test-" + string(uuid.NewUUID()) podClient := e2epod.NewPodClient(f) terminationGracePeriod := int64(30) @@ -623,7 +623,7 @@ done framework.ExpectNoError(err) }) - ginkgo.It("should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating", func() { + ginkgo.It("should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating", func(ctx context.Context) { podName := "probe-test-" + string(uuid.NewUUID()) podClient := e2epod.NewPodClient(f) terminationGracePeriod := int64(30) diff --git a/test/e2e/common/node/containers.go b/test/e2e/common/node/containers.go index 0aa07fc25e9..6b3370ef2a9 100644 --- a/test/e2e/common/node/containers.go +++ b/test/e2e/common/node/containers.go @@ -17,6 +17,8 @@ limitations under the License. package node import ( + "context" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" @@ -36,7 +38,7 @@ var _ = SIGDescribe("Containers", func() { Testname: Containers, without command and arguments Description: Default command and arguments from the container image entrypoint MUST be used when Pod does not specify the container command */ - framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() { + framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name) pod.Spec.Containers[0].Args = nil pod = e2epod.NewPodClient(f).Create(pod) @@ -56,7 +58,7 @@ var _ = SIGDescribe("Containers", func() { Testname: Containers, with arguments Description: Default command and from the container image entrypoint MUST be used when Pod does not specify the container command but the arguments from Pod spec MUST override when specified. */ - framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func() { + framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") e2epodoutput.TestContainerOutput(f, "override arguments", pod, 0, []string{ "[/agnhost entrypoint-tester override arguments]", @@ -70,7 +72,7 @@ var _ = SIGDescribe("Containers", func() { Testname: Containers, with command Description: Default command from the container image entrypoint MUST NOT be used when Pod specifies the container command. Command from Pod spec MUST override the command in the image. */ - framework.ConformanceIt("should be able to override the image's default command (container entrypoint) [NodeConformance]", func() { + framework.ConformanceIt("should be able to override the image's default command (container entrypoint) [NodeConformance]", func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester") pod.Spec.Containers[0].Command = []string{"/agnhost-2"} @@ -84,7 +86,7 @@ var _ = SIGDescribe("Containers", func() { Testname: Containers, with command and arguments Description: Default command and arguments from the container image entrypoint MUST NOT be used when Pod specifies the container command and arguments. Command and arguments from Pod spec MUST override the command and arguments in the image. */ - framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() { + framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") pod.Spec.Containers[0].Command = []string{"/agnhost-2"} diff --git a/test/e2e/common/node/downwardapi.go b/test/e2e/common/node/downwardapi.go index 5b16fbe02d2..c43c8c9a0e3 100644 --- a/test/e2e/common/node/downwardapi.go +++ b/test/e2e/common/node/downwardapi.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -41,7 +42,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for name, namespace and ip Description: Downward API MUST expose Pod and Container fields as environment variables. Specify Pod Name, namespace and IP as environment variable in the Pod Spec are visible at runtime in the container. */ - framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func() { + framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -87,7 +88,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for host ip Description: Downward API MUST expose Pod and Container fields as environment variables. Specify host IP as environment variable in the Pod Spec are visible at runtime in the container. */ - framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() { + framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -108,7 +109,7 @@ var _ = SIGDescribe("Downward API", func() { testDownwardAPI(f, podName, env, expectations) }) - ginkgo.It("should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]", func() { + ginkgo.It("should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -163,7 +164,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for CPU and memory limits and requests Description: Downward API MUST expose CPU request and Memory request set through environment variables at runtime in the container. */ - framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func() { + framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -214,7 +215,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for default CPU and memory limits and requests Description: Downward API MUST expose CPU request and Memory limits set through environment variables at runtime in the container. */ - framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func() { + framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -264,7 +265,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for Pod UID Description: Downward API MUST expose Pod UID set through environment variables at runtime in the container. */ - framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() { + framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -291,7 +292,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("Downward API tests for hugepages", func() { - ginkgo.It("should provide container's limits.hugepages- and requests.hugepages- as env vars", func() { + ginkgo.It("should provide container's limits.hugepages- and requests.hugepages- as env vars", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -346,7 +347,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI testDownwardAPIUsingPod(f, pod, env, expectations) }) - ginkgo.It("should provide default limits.hugepages- from node allocatable", func() { + ginkgo.It("should provide default limits.hugepages- from node allocatable", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { diff --git a/test/e2e/common/node/ephemeral_containers.go b/test/e2e/common/node/ephemeral_containers.go index affe1cb12e7..09c3edf2f2a 100644 --- a/test/e2e/common/node/ephemeral_containers.go +++ b/test/e2e/common/node/ephemeral_containers.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -42,7 +43,7 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() { // Release: 1.25 // Testname: Ephemeral Container Creation // Description: Adding an ephemeral container to pod.spec MUST result in the container running. - framework.ConformanceIt("will start an ephemeral container in an existing pod", func() { + framework.ConformanceIt("will start an ephemeral container in an existing pod", func(ctx context.Context) { ginkgo.By("creating a target pod") pod := podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "ephemeral-containers-target-pod"}, diff --git a/test/e2e/common/node/expansion.go b/test/e2e/common/node/expansion.go index 28ddc1e60a8..c927b48a410 100644 --- a/test/e2e/common/node/expansion.go +++ b/test/e2e/common/node/expansion.go @@ -17,6 +17,8 @@ limitations under the License. package node import ( + "context" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -41,7 +43,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: Environment variables, expansion Description: Create a Pod with environment variables. Environment variables defined using previously defined environment variables MUST expand to proper values. */ - framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func() { + framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func(ctx context.Context) { envVars := []v1.EnvVar{ { Name: "FOO", @@ -70,7 +72,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: Environment variables, command expansion Description: Create a Pod with environment variables and container command using them. Container command using the defined environment variables MUST expand to proper values. */ - framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func() { + framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func(ctx context.Context) { envVars := []v1.EnvVar{ { Name: "TEST_VAR", @@ -89,7 +91,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: Environment variables, command argument expansion Description: Create a Pod with environment variables and container command arguments using them. Container command arguments using the defined environment variables MUST expand to proper values. */ - framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func() { + framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func(ctx context.Context) { envVars := []v1.EnvVar{ { Name: "TEST_VAR", @@ -109,7 +111,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: VolumeSubpathEnvExpansion, subpath expansion Description: Make sure a container's subpath can be set using an expansion of environment variables. */ - framework.ConformanceIt("should allow substituting values in a volume subpath", func() { + framework.ConformanceIt("should allow substituting values in a volume subpath", func(ctx context.Context) { envVars := []v1.EnvVar{ { Name: "POD_NAME", @@ -149,7 +151,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: VolumeSubpathEnvExpansion, subpath with backticks Description: Make sure a container's subpath can not be set using an expansion of environment variables when backticks are supplied. */ - framework.ConformanceIt("should fail substituting values in a volume subpath with backticks [Slow]", func() { + framework.ConformanceIt("should fail substituting values in a volume subpath with backticks [Slow]", func(ctx context.Context) { envVars := []v1.EnvVar{ { @@ -183,7 +185,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: VolumeSubpathEnvExpansion, subpath with absolute path Description: Make sure a container's subpath can not be set using an expansion of environment variables when absolute path is supplied. */ - framework.ConformanceIt("should fail substituting values in a volume subpath with absolute path [Slow]", func() { + framework.ConformanceIt("should fail substituting values in a volume subpath with absolute path [Slow]", func(ctx context.Context) { absolutePath := "/tmp" if framework.NodeOSDistroIs("windows") { // Windows does not typically have a C:\tmp folder. @@ -222,7 +224,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: VolumeSubpathEnvExpansion, subpath ready from failed state Description: Verify that a failing subpath expansion can be modified during the lifecycle of a container. */ - framework.ConformanceIt("should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow]", func() { + framework.ConformanceIt("should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow]", func(ctx context.Context) { envVars := []v1.EnvVar{ { @@ -294,7 +296,7 @@ var _ = SIGDescribe("Variable Expansion", func() { 3. successful expansion of the subpathexpr isn't required for volume cleanup */ - framework.ConformanceIt("should succeed in writing subpaths in container [Slow]", func() { + framework.ConformanceIt("should succeed in writing subpaths in container [Slow]", func(ctx context.Context) { envVars := []v1.EnvVar{ { diff --git a/test/e2e/common/node/init_container.go b/test/e2e/common/node/init_container.go index fd623a84a53..548d25d2b25 100644 --- a/test/e2e/common/node/init_container.go +++ b/test/e2e/common/node/init_container.go @@ -174,7 +174,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { and the system is not going to restart any of these containers when Pod has restart policy as RestartNever. */ - framework.ConformanceIt("should invoke init containers on a RestartNever pod", func() { + framework.ConformanceIt("should invoke init containers on a RestartNever pod", func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-init-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -220,7 +220,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { }, } var events []watch.Event - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout) defer cancel() event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w, recordEvents(events, conditions.PodCompleted), @@ -252,7 +252,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { and at least one container is still running or is in the process of being restarted when Pod has restart policy as RestartAlways. */ - framework.ConformanceIt("should invoke init containers on a RestartAlways pod", func() { + framework.ConformanceIt("should invoke init containers on a RestartAlways pod", func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-init-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -301,7 +301,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { }, } var events []watch.Event - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout) defer cancel() event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w, recordEvents(events, conditions.PodRunning)) framework.ExpectNoError(err) @@ -331,7 +331,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { and Pod has restarted for few occurrences and pod has restart policy as RestartAlways. */ - framework.ConformanceIt("should not start app containers if init containers fail on a RestartAlways pod", func() { + framework.ConformanceIt("should not start app containers if init containers fail on a RestartAlways pod", func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-init-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -382,7 +382,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { } var events []watch.Event - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout) defer cancel() event, err := watchtools.Until( ctx, @@ -455,7 +455,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { Description: Ensure that app container is not started when at least one InitContainer fails to start and Pod has restart policy as RestartNever. */ - framework.ConformanceIt("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() { + framework.ConformanceIt("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-init-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -507,7 +507,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { } var events []watch.Event - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout) defer cancel() event, err := watchtools.Until( ctx, startedPod.ResourceVersion, w, diff --git a/test/e2e/common/node/kubelet.go b/test/e2e/common/node/kubelet.go index 3848ff9592c..31184f4f4ac 100644 --- a/test/e2e/common/node/kubelet.go +++ b/test/e2e/common/node/kubelet.go @@ -49,7 +49,7 @@ var _ = SIGDescribe("Kubelet", func() { Testname: Kubelet, log output, default Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs. */ - framework.ConformanceIt("should print the output to logs [NodeConformance]", func() { + framework.ConformanceIt("should print the output to logs [NodeConformance]", func(ctx context.Context) { podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -107,7 +107,7 @@ var _ = SIGDescribe("Kubelet", func() { Testname: Kubelet, failed pod, terminated reason Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason. */ - framework.ConformanceIt("should have an terminated reason [NodeConformance]", func() { + framework.ConformanceIt("should have an terminated reason [NodeConformance]", func(ctx context.Context) { gomega.Eventually(func() error { podData, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { @@ -132,7 +132,7 @@ var _ = SIGDescribe("Kubelet", func() { Testname: Kubelet, failed pod, delete Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted. */ - framework.ConformanceIt("should be possible to delete [NodeConformance]", func() { + framework.ConformanceIt("should be possible to delete [NodeConformance]", func(ctx context.Context) { err := podClient.Delete(context.TODO(), podName, metav1.DeleteOptions{}) gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err)) }) @@ -145,7 +145,7 @@ var _ = SIGDescribe("Kubelet", func() { Testname: Kubelet, hostAliases Description: Create a Pod with hostAliases and a container with command to output /etc/hosts entries. Pod's logs MUST have matching entries of specified hostAliases to the output of /etc/hosts entries. */ - framework.ConformanceIt("should write entries to /etc/hosts [NodeConformance]", func() { + framework.ConformanceIt("should write entries to /etc/hosts [NodeConformance]", func(ctx context.Context) { pod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, nil, "etc-hosts") // Don't restart the Pod since it is expected to exit pod.Spec.RestartPolicy = v1.RestartPolicyNever @@ -181,7 +181,7 @@ var _ = SIGDescribe("Kubelet", func() { Description: Create a Pod with security context set with ReadOnlyRootFileSystem set to true. The Pod then tries to write to the /file on the root, write operation to the root filesystem MUST fail as expected. This test is marked LinuxOnly since Windows does not support creating containers with read-only access. */ - framework.ConformanceIt("should not write to root filesystem [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should not write to root filesystem [LinuxOnly] [NodeConformance]", func(ctx context.Context) { isReadOnly := true podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e/common/node/kubelet_etc_hosts.go b/test/e2e/common/node/kubelet_etc_hosts.go index 2350304067f..3e01cf557b8 100644 --- a/test/e2e/common/node/kubelet_etc_hosts.go +++ b/test/e2e/common/node/kubelet_etc_hosts.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "strings" "time" @@ -60,7 +61,7 @@ var _ = SIGDescribe("KubeletManagedEtcHosts", func() { 3. The Pod with hostNetwork=true , /etc/hosts file MUST not be managed by the Kubelet. This test is marked LinuxOnly since Windows cannot mount individual files in Containers. */ - framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func(ctx context.Context) { ginkgo.By("Setting up the test") config.setup() diff --git a/test/e2e/common/node/lease.go b/test/e2e/common/node/lease.go index 4d090e71ce2..7bcb7d1cd3a 100644 --- a/test/e2e/common/node/lease.go +++ b/test/e2e/common/node/lease.go @@ -69,7 +69,7 @@ var _ = SIGDescribe("Lease", func() { return just the remaining lease. Delete the lease; delete MUST be successful. Get the lease; get MUST return not found error. */ - framework.ConformanceIt("lease API should be available", func() { + framework.ConformanceIt("lease API should be available", func(ctx context.Context) { leaseClient := f.ClientSet.CoordinationV1().Leases(f.Namespace.Name) name := "lease" diff --git a/test/e2e/common/node/lifecycle_hook.go b/test/e2e/common/node/lifecycle_hook.go index 6acc72b90a7..716d363760a 100644 --- a/test/e2e/common/node/lifecycle_hook.go +++ b/test/e2e/common/node/lifecycle_hook.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "strings" "time" @@ -131,7 +132,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, post start exec hook Description: When a post start handler is specified in the container lifecycle using a 'Exec' action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod using ExecAction to validate that the post start is executed. */ - framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() { + framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PostStart: &v1.LifecycleHandler{ Exec: &v1.ExecAction{ @@ -148,7 +149,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, prestop exec hook Description: When a pre-stop handler is specified in the container lifecycle using a 'Exec' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod using ExecAction to validate that the pre-stop is executed. */ - framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() { + framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PreStop: &v1.LifecycleHandler{ Exec: &v1.ExecAction{ @@ -164,7 +165,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, post start http hook Description: When a post start handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod on the same node with a container lifecycle specifying a post start that invokes the server pod to validate that the post start is executed. */ - framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() { + framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PostStart: &v1.LifecycleHandler{ HTTPGet: &v1.HTTPGetAction{ @@ -186,7 +187,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, poststart https hook Description: When a post-start handler is specified in the container lifecycle using a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve https requests, create a second pod on the same node with a container lifecycle specifying a post-start that invokes the server pod to validate that the post-start is executed. */ - ginkgo.It("should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func() { + ginkgo.It("should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PostStart: &v1.LifecycleHandler{ HTTPGet: &v1.HTTPGetAction{ @@ -209,7 +210,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, prestop http hook Description: When a pre-stop handler is specified in the container lifecycle using a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod on the same node with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed. */ - framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() { + framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PreStop: &v1.LifecycleHandler{ HTTPGet: &v1.HTTPGetAction{ @@ -231,7 +232,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, prestop https hook Description: When a pre-stop handler is specified in the container lifecycle using a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve https requests, create a second pod on the same node with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed. */ - ginkgo.It("should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func() { + ginkgo.It("should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PreStop: &v1.LifecycleHandler{ HTTPGet: &v1.HTTPGetAction{ diff --git a/test/e2e/common/node/node_lease.go b/test/e2e/common/node/node_lease.go index 43ddefa50b8..7cd0fdb385f 100644 --- a/test/e2e/common/node/node_lease.go +++ b/test/e2e/common/node/node_lease.go @@ -49,7 +49,7 @@ var _ = SIGDescribe("NodeLease", func() { }) ginkgo.Context("NodeLease", func() { - ginkgo.It("the kubelet should create and update a lease in the kube-node-lease namespace", func() { + ginkgo.It("the kubelet should create and update a lease in the kube-node-lease namespace", func(ctx context.Context) { leaseClient := f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease) var ( err error @@ -87,7 +87,7 @@ var _ = SIGDescribe("NodeLease", func() { time.Duration(*lease.Spec.LeaseDurationSeconds/4)*time.Second) }) - ginkgo.It("should have OwnerReferences set", func() { + ginkgo.It("should have OwnerReferences set", func(ctx context.Context) { leaseClient := f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease) var ( err error @@ -111,7 +111,7 @@ var _ = SIGDescribe("NodeLease", func() { } }) - ginkgo.It("the kubelet should report node status infrequently", func() { + ginkgo.It("the kubelet should report node status infrequently", func(ctx context.Context) { ginkgo.By("wait until node is ready") e2enode.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute) diff --git a/test/e2e/common/node/pod_admission.go b/test/e2e/common/node/pod_admission.go index 899db70c9c8..5ec022528f8 100644 --- a/test/e2e/common/node/pod_admission.go +++ b/test/e2e/common/node/pod_admission.go @@ -36,7 +36,7 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() { f := framework.NewDefaultFramework("pod-os-rejection") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline ginkgo.Context("Kubelet", func() { - ginkgo.It("should reject pod when the node OS doesn't match pod's OS", func() { + ginkgo.It("should reject pod when the node OS doesn't match pod's OS", func(ctx context.Context) { linuxNode, err := findLinuxNode(f) framework.ExpectNoError(err) pod := &v1.Pod{ diff --git a/test/e2e/common/node/pods.go b/test/e2e/common/node/pods.go index 80ea8661158..b017a2a80b6 100644 --- a/test/e2e/common/node/pods.go +++ b/test/e2e/common/node/pods.go @@ -201,7 +201,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, assigned hostip Description: Create a Pod. Pod status MUST return successfully and contains a valid IP address. */ - framework.ConformanceIt("should get a host IP [NodeConformance]", func() { + framework.ConformanceIt("should get a host IP [NodeConformance]", func(ctx context.Context) { name := "pod-hostip-" + string(uuid.NewUUID()) testHostIP(podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -223,7 +223,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, lifecycle Description: A Pod is created with a unique label. Pod MUST be accessible when queried using the label selector upon creation. Add a watch, check if the Pod is running. Pod then deleted, The pod deletion timestamp is observed. The watch MUST return the pod deleted event. Query with the original selector for the Pod MUST return empty list. */ - framework.ConformanceIt("should be submitted and removed [NodeConformance]", func() { + framework.ConformanceIt("should be submitted and removed [NodeConformance]", func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-submit-remove-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -266,7 +266,7 @@ var _ = SIGDescribe("Pods", func() { _, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{}) defer w.Stop() - ctx, cancelCtx := context.WithTimeout(context.TODO(), wait.ForeverTestTimeout) + ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) defer cancelCtx() if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { framework.Failf("Timeout while waiting to Pod informer to sync") @@ -341,7 +341,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, update Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. Update the pod to change the value of the Label. Query for the Pod with the new value for the label MUST be successful. */ - framework.ConformanceIt("should be updated [NodeConformance]", func() { + framework.ConformanceIt("should be updated [NodeConformance]", func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-update-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -395,7 +395,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, ActiveDeadlineSeconds Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds set on the Pod spec. Pod MUST terminate of the specified time elapses. */ - framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func() { + framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -441,7 +441,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, service environment variables Description: Create a server Pod listening on port 9376. A Service called fooservice is created for the server Pod listening on port 8765 targeting port 8080. If a new Pod is created in the cluster then the Pod MUST have the fooservice environment variables available from this new Pod. The new create Pod MUST have environment variables such as FOOSERVICE_SERVICE_HOST, FOOSERVICE_SERVICE_PORT, FOOSERVICE_PORT, FOOSERVICE_PORT_8765_TCP_PORT, FOOSERVICE_PORT_8765_TCP_PROTO, FOOSERVICE_PORT_8765_TCP and FOOSERVICE_PORT_8765_TCP_ADDR that are populated with proper values. */ - framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func() { + framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func(ctx context.Context) { // Make a pod that will be a service. // This pod serves its hostname via HTTP. serverName := "server-envvars-" + string(uuid.NewUUID()) @@ -533,7 +533,7 @@ var _ = SIGDescribe("Pods", func() { Description: A Pod is created. Websocket is created to retrieve exec command output from this pod. Message retrieved form Websocket MUST match with expected exec command output. */ - framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() { + framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "unable to get base config") @@ -615,7 +615,7 @@ var _ = SIGDescribe("Pods", func() { Description: A Pod is created. Websocket is created to retrieve log of a container from this pod. Message retrieved form Websocket MUST match with container's output. */ - framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() { + framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "unable to get base config") @@ -673,7 +673,7 @@ var _ = SIGDescribe("Pods", func() { }) // Slow (~7 mins) - ginkgo.It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func() { + ginkgo.It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func(ctx context.Context) { podName := "pod-back-off-image" containerName := "back-off" pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ @@ -714,7 +714,7 @@ var _ = SIGDescribe("Pods", func() { }) // Slow by design (~27 mins) issue #19027 - ginkgo.It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func() { + ginkgo.It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func(ctx context.Context) { podName := "back-off-cap" containerName := "back-off-cap" pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ @@ -768,7 +768,7 @@ var _ = SIGDescribe("Pods", func() { } }) - ginkgo.It("should support pod readiness gates [NodeConformance]", func() { + ginkgo.It("should support pod readiness gates [NodeConformance]", func(ctx context.Context) { podName := "pod-ready" readinessGate1 := "k8s.io/test-condition1" readinessGate2 := "k8s.io/test-condition2" @@ -842,7 +842,7 @@ var _ = SIGDescribe("Pods", func() { Description: A set of pods is created with a label selector which MUST be found when listed. The set of pods is deleted and MUST NOT show up when listed by its label selector. */ - framework.ConformanceIt("should delete a collection of pods", func() { + framework.ConformanceIt("should delete a collection of pods", func(ctx context.Context) { podTestNames := []string{"test-pod-1", "test-pod-2", "test-pod-3"} one := int64(1) @@ -893,7 +893,7 @@ var _ = SIGDescribe("Pods", func() { patching the label and the pod data. When checking and replacing the PodStatus it MUST succeed. It MUST succeed when deleting the Pod. */ - framework.ConformanceIt("should run through the lifecycle of Pods and PodStatus", func() { + framework.ConformanceIt("should run through the lifecycle of Pods and PodStatus", func(ctx context.Context) { podResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} testNamespaceName := f.Namespace.Name testPodName := "pod-test" @@ -932,7 +932,7 @@ var _ = SIGDescribe("Pods", func() { framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName) ginkgo.By("watching for Pod to be ready") - ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart) + ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() _, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if pod, ok := event.Object.(*v1.Pod); ok { @@ -1080,7 +1080,7 @@ var _ = SIGDescribe("Pods", func() { MUST succeed. Given the patching of the pod status, the fields MUST equal the new values. */ - framework.ConformanceIt("should patch a pod status", func() { + framework.ConformanceIt("should patch a pod status", func(ctx context.Context) { ns := f.Namespace.Name podClient := f.ClientSet.CoreV1().Pods(ns) podName := "pod-" + utilrand.String(5) diff --git a/test/e2e/common/node/podtemplates.go b/test/e2e/common/node/podtemplates.go index 5ba6a2e092c..bf11b7148ac 100644 --- a/test/e2e/common/node/podtemplates.go +++ b/test/e2e/common/node/podtemplates.go @@ -50,7 +50,7 @@ var _ = SIGDescribe("PodTemplates", func() { Description: Attempt to create a PodTemplate. Patch the created PodTemplate. Fetching the PodTemplate MUST reflect changes. By fetching all the PodTemplates via a Label selector it MUST find the PodTemplate by it's static label and updated value. The PodTemplate must be deleted. */ - framework.ConformanceIt("should run the lifecycle of PodTemplates", func() { + framework.ConformanceIt("should run the lifecycle of PodTemplates", func(ctx context.Context) { testNamespaceName := f.Namespace.Name podTemplateName := "nginx-pod-template-" + string(uuid.NewUUID()) @@ -119,7 +119,7 @@ var _ = SIGDescribe("PodTemplates", func() { Description: A set of Pod Templates is created with a label selector which MUST be found when listed. The set of Pod Templates is deleted and MUST NOT show up when listed by its label selector. */ - framework.ConformanceIt("should delete a collection of pod templates", func() { + framework.ConformanceIt("should delete a collection of pod templates", func(ctx context.Context) { podTemplateNames := []string{"test-podtemplate-1", "test-podtemplate-2", "test-podtemplate-3"} ginkgo.By("Create set of pod templates") @@ -173,7 +173,7 @@ var _ = SIGDescribe("PodTemplates", func() { Attempt to replace the PodTemplate to include a new annotation which MUST succeed. The annotation MUST be found in the new PodTemplate. */ - framework.ConformanceIt("should replace a pod template", func() { + framework.ConformanceIt("should replace a pod template", func(ctx context.Context) { ptClient := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name) ptName := "podtemplate-" + utilrand.String(5) diff --git a/test/e2e/common/node/privileged.go b/test/e2e/common/node/privileged.go index 7d7d7b9e032..5d0db7718a2 100644 --- a/test/e2e/common/node/privileged.go +++ b/test/e2e/common/node/privileged.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "github.com/onsi/ginkgo/v2" @@ -50,7 +51,7 @@ var _ = SIGDescribe("PrivilegedPod [NodeConformance]", func() { notPrivilegedContainer: "not-privileged-container", } - ginkgo.It("should enable privileged commands [LinuxOnly]", func() { + ginkgo.It("should enable privileged commands [LinuxOnly]", func(ctx context.Context) { // Windows does not support privileged containers. ginkgo.By("Creating a pod with a privileged container") config.createPods() diff --git a/test/e2e/common/node/runtime.go b/test/e2e/common/node/runtime.go index 116e6148e9e..e8c2f106fc1 100644 --- a/test/e2e/common/node/runtime.go +++ b/test/e2e/common/node/runtime.go @@ -49,7 +49,7 @@ var _ = SIGDescribe("Container Runtime", func() { Testname: Container Runtime, Restart Policy, Pod Phases Description: If the restart policy is set to 'Always', Pod MUST be restarted when terminated, If restart policy is 'OnFailure', Pod MUST be started only if it is terminated with non-zero exit code. If the restart policy is 'Never', Pod MUST never be restarted. All these three test cases MUST verify the restart counts accordingly. */ - framework.ConformanceIt("should run with the expected status [NodeConformance]", func() { + framework.ConformanceIt("should run with the expected status [NodeConformance]", func(ctx context.Context) { restartCountVolumeName := "restart-count" restartCountVolumePath := "/restart-count" testContainer := v1.Container{ @@ -171,7 +171,7 @@ while true; do sleep 1; done gomega.Expect(c.Delete()).To(gomega.Succeed()) } - ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func() { + ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -192,7 +192,7 @@ while true; do sleep 1; done Testname: Container Runtime, TerminationMessagePath, non-root user and non-default path Description: Create a pod with a container to run it as a non-root user with a custom TerminationMessagePath set. Pod redirects the output to the provided path successfully. When the container is terminated, the termination message MUST match the expected output logged in the provided custom path. */ - framework.ConformanceIt("should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance]", func() { + framework.ConformanceIt("should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance]", func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -213,7 +213,7 @@ while true; do sleep 1; done Testname: Container Runtime, TerminationMessage, from container's log output of failing container Description: Create a pod with an container. Container's output is recorded in log and container exits with an error. When container is terminated, termination message MUST match the expected output recorded from container's log. */ - framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func() { + framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -229,7 +229,7 @@ while true; do sleep 1; done Testname: Container Runtime, TerminationMessage, from log output of succeeding container Description: Create a pod with an container. Container's output is recorded in log and container exits successfully without an error. When container is terminated, terminationMessage MUST have no content as container succeed. */ - framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func() { + framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -245,7 +245,7 @@ while true; do sleep 1; done Testname: Container Runtime, TerminationMessage, from file of succeeding container Description: Create a pod with an container. Container's output is recorded in a file and the container exits successfully without an error. When container is terminated, terminationMessage MUST match with the content from file. */ - framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func() { + framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -368,23 +368,23 @@ while true; do sleep 1; done } } - ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func() { + ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.InvalidRegistryImage) imagePullTest(image, false, v1.PodPending, true, false) }) - ginkgo.It("should be able to pull image [NodeConformance]", func() { + ginkgo.It("should be able to pull image [NodeConformance]", func(ctx context.Context) { // NOTE(claudiub): The agnhost image is supposed to work on both Linux and Windows. image := imageutils.GetE2EImage(imageutils.Agnhost) imagePullTest(image, false, v1.PodRunning, false, false) }) - ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func() { + ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine) imagePullTest(image, false, v1.PodPending, true, false) }) - ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func() { + ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine) isWindows := false if framework.NodeOSDistroIs("windows") { diff --git a/test/e2e/common/node/runtimeclass.go b/test/e2e/common/node/runtimeclass.go index e3c05bdadad..d8b4ca93345 100644 --- a/test/e2e/common/node/runtimeclass.go +++ b/test/e2e/common/node/runtimeclass.go @@ -52,13 +52,13 @@ var _ = SIGDescribe("RuntimeClass", func() { Testname: Pod with the non-existing RuntimeClass is rejected. Description: The Pod requesting the non-existing RuntimeClass must be rejected. */ - framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass [NodeConformance]", func() { + framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass [NodeConformance]", func(ctx context.Context) { rcName := f.Namespace.Name + "-nonexistent" expectPodRejection(f, e2enode.NewRuntimeClassPod(rcName)) }) // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed. - ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func() { + ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) { handler := f.Namespace.Name + "-handler" rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil) defer deleteRuntimeClass(f, rcName) @@ -82,7 +82,7 @@ var _ = SIGDescribe("RuntimeClass", func() { // This test requires that the PreconfiguredRuntimeClassHandler has already been set up on nodes. // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler installed and working. - ginkgo.It("should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]", func() { + ginkgo.It("should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) { // Requires special setup of test-handler which is only done in GCE kube-up environment // see https://github.com/kubernetes/kubernetes/blob/eb729620c522753bc7ae61fc2c7b7ea19d4aad2f/cluster/gce/gci/configure-helper.sh#L3069-L3076 e2eskipper.SkipUnlessProviderIs("gce") @@ -101,7 +101,7 @@ var _ = SIGDescribe("RuntimeClass", func() { depends on container runtime and preconfigured handler. Runtime-specific functionality is not being tested here. */ - framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func() { + framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) { rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil) defer deleteRuntimeClass(f, rcName) pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName)) @@ -126,7 +126,7 @@ var _ = SIGDescribe("RuntimeClass", func() { depends on container runtime and preconfigured handler. Runtime-specific functionality is not being tested here. */ - framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance]", func() { + framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance]", func(ctx context.Context) { rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{ PodFixed: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"), @@ -153,7 +153,7 @@ var _ = SIGDescribe("RuntimeClass", func() { Testname: Pod with the deleted RuntimeClass is rejected. Description: Pod requesting the deleted RuntimeClass must be rejected. */ - framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass [NodeConformance]", func() { + framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass [NodeConformance]", func(ctx context.Context) { rcName := createRuntimeClass(f, "delete-me", "runc", nil) rcClient := f.ClientSet.NodeV1().RuntimeClasses() @@ -186,7 +186,7 @@ var _ = SIGDescribe("RuntimeClass", func() { The runtimeclasses resource MUST exist in the /apis/node.k8s.io/v1 discovery document. The runtimeclasses resource must support create, get, list, watch, update, patch, delete, and deletecollection. */ - framework.ConformanceIt(" should support RuntimeClasses API operations", func() { + framework.ConformanceIt(" should support RuntimeClasses API operations", func(ctx context.Context) { // Setup rcVersion := "v1" rcClient := f.ClientSet.NodeV1().RuntimeClasses() diff --git a/test/e2e/common/node/secrets.go b/test/e2e/common/node/secrets.go index 330b9fa4b05..8305aedc1b3 100644 --- a/test/e2e/common/node/secrets.go +++ b/test/e2e/common/node/secrets.go @@ -43,7 +43,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets, pod environment field Description: Create a secret. Create a Pod with Container that declares a environment variable which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret. */ - framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func(ctx context.Context) { name := "secret-test-" + string(uuid.NewUUID()) secret := secretForTest(f.Namespace.Name, name) @@ -92,7 +92,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets, pod environment from source Description: Create a secret. Create a Pod with Container that declares a environment variable using 'EnvFrom' which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret. */ - framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() { + framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func(ctx context.Context) { name := "secret-test-" + string(uuid.NewUUID()) secret := secretForTest(f.Namespace.Name, name) ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) @@ -137,7 +137,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets, with empty-key Description: Attempt to create a Secret with an empty key. The creation MUST fail. */ - framework.ConformanceIt("should fail to create secret due to empty secret key", func() { + framework.ConformanceIt("should fail to create secret due to empty secret key", func(ctx context.Context) { secret, err := createEmptyKeySecretForTest(f) framework.ExpectError(err, "created secret %q with empty key in namespace %q", secret.Name, f.Namespace.Name) }) @@ -151,7 +151,7 @@ var _ = SIGDescribe("Secrets", func() { The Secret is deleted by it's static Label. Secrets are listed finally, the list MUST NOT include the originally created Secret. */ - framework.ConformanceIt("should patch a secret", func() { + framework.ConformanceIt("should patch a secret", func(ctx context.Context) { ginkgo.By("creating a secret") secretTestName := "test-secret-" + string(uuid.NewUUID()) diff --git a/test/e2e/common/node/security_context.go b/test/e2e/common/node/security_context.go index f8b9aa215d0..99c390edc96 100644 --- a/test/e2e/common/node/security_context.go +++ b/test/e2e/common/node/security_context.go @@ -72,7 +72,7 @@ var _ = SIGDescribe("Security Context", func() { } } - ginkgo.It("must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() { + ginkgo.It("must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func(ctx context.Context) { // with hostUsers=false the pod must use a new user namespace podClient := e2epod.PodClientNS(f, f.Namespace.Name) @@ -110,7 +110,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() { + ginkgo.It("must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func(ctx context.Context) { // with hostUsers=true the pod must use the host user namespace pod := makePod(true) // When running in the host's user namespace, the /proc/self/uid_map file content looks like: @@ -121,7 +121,7 @@ var _ = SIGDescribe("Security Context", func() { }) }) - ginkgo.It("should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() { + ginkgo.It("should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func(ctx context.Context) { // Create all volume types supported: configmap, secret, downwardAPI, projected. // Create configmap. @@ -245,7 +245,7 @@ var _ = SIGDescribe("Security Context", func() { }) }) - ginkgo.It("should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() { + ginkgo.It("should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func(ctx context.Context) { // Create configmap. name := "userns-volumes-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) @@ -344,7 +344,7 @@ var _ = SIGDescribe("Security Context", func() { Description: Container is created with runAsUser option by passing uid 65534 to run as unpriviledged user. Pod MUST be in Succeeded phase. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. */ - framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { createAndWaitUserPod(65534) }) @@ -355,7 +355,7 @@ var _ = SIGDescribe("Security Context", func() { This e2e can not be promoted to Conformance because a Conformant platform may not allow to run containers with 'uid 0' or running privileged operations. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. */ - ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func() { + ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { createAndWaitUserPod(0) }) }) @@ -385,7 +385,7 @@ var _ = SIGDescribe("Security Context", func() { } } - ginkgo.It("should run with an explicit non-root user ID [LinuxOnly]", func() { + ginkgo.It("should run with an explicit non-root user ID [LinuxOnly]", func(ctx context.Context) { // creates a pod with RunAsUser, which is not supported on Windows. e2eskipper.SkipIfNodeOSDistroIs("windows") name := "explicit-nonroot-uid" @@ -395,7 +395,7 @@ var _ = SIGDescribe("Security Context", func() { podClient.WaitForSuccess(name, framework.PodStartTimeout) framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1000")) }) - ginkgo.It("should not run with an explicit root user ID [LinuxOnly]", func() { + ginkgo.It("should not run with an explicit root user ID [LinuxOnly]", func(ctx context.Context) { // creates a pod with RunAsUser, which is not supported on Windows. e2eskipper.SkipIfNodeOSDistroIs("windows") name := "explicit-root-uid" @@ -407,7 +407,7 @@ var _ = SIGDescribe("Security Context", func() { gomega.Expect(ev).NotTo(gomega.BeNil()) framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer) }) - ginkgo.It("should run with an image specified user ID", func() { + ginkgo.It("should run with an image specified user ID", func(ctx context.Context) { name := "implicit-nonroot-uid" pod := makeNonRootPod(name, nonRootImage, nil) podClient.Create(pod) @@ -415,7 +415,7 @@ var _ = SIGDescribe("Security Context", func() { podClient.WaitForSuccess(name, framework.PodStartTimeout) framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1234")) }) - ginkgo.It("should not run without a specified user ID", func() { + ginkgo.It("should not run without a specified user ID", func(ctx context.Context) { name := "implicit-root-uid" pod := makeNonRootPod(name, rootImage, nil) pod = podClient.Create(pod) @@ -473,7 +473,7 @@ var _ = SIGDescribe("Security Context", func() { At this moment we are not considering this test for Conformance due to use of SecurityContext. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support creating containers with read-only access. */ - ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func() { + ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func(ctx context.Context) { createAndWaitUserPod(true) }) @@ -483,7 +483,7 @@ var _ = SIGDescribe("Security Context", func() { Description: Container is configured to run with readOnlyRootFilesystem to false. Write operation MUST be allowed and Pod MUST be in Succeeded state. */ - framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func() { + framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func(ctx context.Context) { createAndWaitUserPod(false) }) }) @@ -525,7 +525,7 @@ var _ = SIGDescribe("Security Context", func() { Description: Create a container to run in unprivileged mode by setting pod's SecurityContext Privileged option as false. Pod MUST be in Succeeded phase. [LinuxOnly]: This test is marked as LinuxOnly since it runs a Linux-specific command. */ - framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := createAndWaitUserPod(false) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { @@ -538,7 +538,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]", func() { + ginkgo.It("should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]", func(ctx context.Context) { podName := createAndWaitUserPod(true) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { @@ -591,7 +591,7 @@ var _ = SIGDescribe("Security Context", func() { This e2e Can not be promoted to Conformance as it is Container Runtime dependent and not all conformant platforms will require this behavior. [LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID, or privilege escalation. */ - ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func() { + ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "alpine-nnp-nil-" + string(uuid.NewUUID()) if err := createAndMatchOutput(podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil { framework.Failf("Match output for pod %q failed: %v", podName, err) @@ -606,7 +606,7 @@ var _ = SIGDescribe("Security Context", func() { When the container is run, container's output MUST match with expected output verifying container ran with given uid i.e. uid=1000. [LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID, or privilege escalation. */ - framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "alpine-nnp-false-" + string(uuid.NewUUID()) apeFalse := false if err := createAndMatchOutput(podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil { @@ -623,7 +623,7 @@ var _ = SIGDescribe("Security Context", func() { This e2e Can not be promoted to Conformance as it is Container Runtime dependent and runtime may not allow to run. [LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID. */ - ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func() { + ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "alpine-nnp-true-" + string(uuid.NewUUID()) apeTrue := true if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil { diff --git a/test/e2e/common/node/sysctl.go b/test/e2e/common/node/sysctl.go index 136b2661360..3cad7c6d76b 100644 --- a/test/e2e/common/node/sysctl.go +++ b/test/e2e/common/node/sysctl.go @@ -74,7 +74,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { Description: Pod is created with kernel.shm_rmid_forced sysctl. Kernel.shm_rmid_forced must be set to 1 [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls */ - framework.ConformanceIt("should support sysctls [MinimumKubeletVersion:1.21]", func() { + framework.ConformanceIt("should support sysctls [MinimumKubeletVersion:1.21]", func(ctx context.Context) { pod := testPod() pod.Spec.SecurityContext = &v1.PodSecurityContext{ Sysctls: []v1.Sysctl{ @@ -120,7 +120,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { Description: Pod is created with one valid and two invalid sysctls. Pod should not apply invalid sysctls. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls */ - framework.ConformanceIt("should reject invalid sysctls [MinimumKubeletVersion:1.21]", func() { + framework.ConformanceIt("should reject invalid sysctls [MinimumKubeletVersion:1.21]", func(ctx context.Context) { pod := testPod() pod.Spec.SecurityContext = &v1.PodSecurityContext{ Sysctls: []v1.Sysctl{ @@ -156,7 +156,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { }) // Pod is created with kernel.msgmax, an unsafe sysctl. - ginkgo.It("should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21]", func() { + ginkgo.It("should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21]", func(ctx context.Context) { pod := testPod() pod.Spec.SecurityContext = &v1.PodSecurityContext{ Sysctls: []v1.Sysctl{ @@ -182,7 +182,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { Description: Pod is created with kernel/shm_rmid_forced sysctl. Support slashes as sysctl separator. The '/' separator is also accepted in place of a '.' [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls */ - ginkgo.It("should support sysctls with slashes as separator [MinimumKubeletVersion:1.23]", func() { + ginkgo.It("should support sysctls with slashes as separator [MinimumKubeletVersion:1.23]", func(ctx context.Context) { pod := testPod() pod.Spec.SecurityContext = &v1.PodSecurityContext{ Sysctls: []v1.Sysctl{ diff --git a/test/e2e/common/storage/configmap_volume.go b/test/e2e/common/storage/configmap_volume.go index 0a61b2d6a89..610ba3f87dc 100644 --- a/test/e2e/common/storage/configmap_volume.go +++ b/test/e2e/common/storage/configmap_volume.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, without mapping Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644. */ - framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { doConfigMapE2EWithoutMappings(f, false, 0, nil) }) @@ -54,12 +54,12 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of '0x400' This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0400) doConfigMapE2EWithoutMappings(f, false, 0, &defaultMode) }) - ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ @@ -71,11 +71,11 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, without mapping, non-root user Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) { doConfigMapE2EWithoutMappings(f, true, 0, nil) }) - ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doConfigMapE2EWithoutMappings(f, true, 1001, nil) @@ -86,7 +86,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, with mapping Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { doConfigMapE2EWithMappings(f, false, 0, nil) }) @@ -96,7 +96,7 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of '0x400' This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { mode := int32(0400) doConfigMapE2EWithMappings(f, false, 0, &mode) }) @@ -106,11 +106,11 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, with mapping, non-root user Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) { doConfigMapE2EWithMappings(f, true, 0, nil) }) - ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doConfigMapE2EWithMappings(f, true, 1001, nil) @@ -121,7 +121,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, update Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. */ - framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() { + framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) @@ -172,7 +172,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, text data, binary data Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod. */ - framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func() { + framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) @@ -237,7 +237,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, create, update and delete Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file). */ - framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() { + framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -420,7 +420,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, multiple volume maps Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to multiple paths in the Pod. The content MUST be accessible from all the mapped volume mounts. */ - framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() { + framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func(ctx context.Context) { var ( name = "configmap-test-volume-" + string(uuid.NewUUID()) volumeName = "configmap-volume" @@ -501,7 +501,7 @@ var _ = SIGDescribe("ConfigMap", func() { Try to update the ConfigMap`s metadata (labels), the update must succeed. Try to delete the ConfigMap, the deletion must succeed. */ - framework.ConformanceIt("should be immutable if `immutable` field is set", func() { + framework.ConformanceIt("should be immutable if `immutable` field is set", func(ctx context.Context) { name := "immutable" configMap := newConfigMap(f, name) @@ -554,7 +554,7 @@ var _ = SIGDescribe("ConfigMap", func() { // The pod is in pending during volume creation until the configMap objects are available // or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional. // Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() { + ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/configmap-volumes" pod, err := createNonOptionalConfigMapPod(f, volumeMountPath) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) @@ -563,7 +563,7 @@ var _ = SIGDescribe("ConfigMap", func() { // ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap, // the volume setup will error unless it is marked optional, during the pod creation. // Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() { + ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/configmap-volumes" pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) diff --git a/test/e2e/common/storage/downwardapi.go b/test/e2e/common/storage/downwardapi.go index ecb08e1e2ad..cbe295259bc 100644 --- a/test/e2e/common/storage/downwardapi.go +++ b/test/e2e/common/storage/downwardapi.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -36,7 +37,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("Downward API tests for local ephemeral storage", func() { - ginkgo.It("should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars", func() { + ginkgo.It("should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -64,7 +65,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag testDownwardAPIForEphemeralStorage(f, podName, env, expectations) }) - ginkgo.It("should provide default limits.ephemeral-storage from node allocatable", func() { + ginkgo.It("should provide default limits.ephemeral-storage from node allocatable", func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { diff --git a/test/e2e/common/storage/downwardapi_volume.go b/test/e2e/common/storage/downwardapi_volume.go index bdfcd2148ed..24c818f5356 100644 --- a/test/e2e/common/storage/downwardapi_volume.go +++ b/test/e2e/common/storage/downwardapi_volume.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "time" @@ -50,7 +51,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, pod name Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide podname only [NodeConformance]", func() { + framework.ConformanceIt("should provide podname only [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") @@ -65,7 +66,7 @@ var _ = SIGDescribe("Downward API volume", func() { Description: A Pod is configured with DownwardAPIVolumeSource with the volumesource mode set to -r-------- and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) defaultMode := int32(0400) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) @@ -81,7 +82,7 @@ var _ = SIGDescribe("Downward API volume", func() { Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) mode := int32(0400) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) @@ -91,7 +92,7 @@ var _ = SIGDescribe("Downward API volume", func() { }) }) - ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -106,7 +107,7 @@ var _ = SIGDescribe("Downward API volume", func() { }) }) - ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -127,7 +128,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, update label Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume. */ - framework.ConformanceIt("should update labels on modification [NodeConformance]", func() { + framework.ConformanceIt("should update labels on modification [NodeConformance]", func(ctx context.Context) { labels := map[string]string{} labels["key1"] = "value1" labels["key2"] = "value2" @@ -159,7 +160,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, update annotations Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume. */ - framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() { + framework.ConformanceIt("should update annotations on modification [NodeConformance]", func(ctx context.Context) { annotations := map[string]string{} annotations["builder"] = "bar" podName := "annotationupdate" + string(uuid.NewUUID()) @@ -190,7 +191,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, CPU limits Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() { + framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -204,7 +205,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, memory limits Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() { + framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") @@ -218,7 +219,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, CPU request Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() { + framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") @@ -232,7 +233,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, memory request Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() { + framework.ConformanceIt("should provide container's memory request [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") @@ -246,7 +247,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, CPU limit, default node allocatable Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable. */ - framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() { + framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -258,7 +259,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, memory limit, default node allocatable Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable. */ - framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() { + framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") diff --git a/test/e2e/common/storage/empty_dir.go b/test/e2e/common/storage/empty_dir.go index f78eb699aed..9224fd5bb3b 100644 --- a/test/e2e/common/storage/empty_dir.go +++ b/test/e2e/common/storage/empty_dir.go @@ -53,27 +53,27 @@ var _ = SIGDescribe("EmptyDir volumes", func() { e2eskipper.SkipIfNodeOSDistroIs("windows") }) - ginkgo.It("new files should be created with FSGroup ownership when container is root", func() { + ginkgo.It("new files should be created with FSGroup ownership when container is root", func(ctx context.Context) { doTestSetgidFSGroup(f, 0, v1.StorageMediumMemory) }) - ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func() { + ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func(ctx context.Context) { doTestSetgidFSGroup(f, nonRootUID, v1.StorageMediumMemory) }) - ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func() { + ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func(ctx context.Context) { doTestSubPathFSGroup(f, nonRootUID, v1.StorageMediumMemory) }) - ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func() { + ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func(ctx context.Context) { doTest0644FSGroup(f, 0, v1.StorageMediumMemory) }) - ginkgo.It("volume on default medium should have the correct mode using FSGroup", func() { + ginkgo.It("volume on default medium should have the correct mode using FSGroup", func(ctx context.Context) { doTestVolumeModeFSGroup(f, 0, v1.StorageMediumDefault) }) - ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func() { + ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func(ctx context.Context) { doTestVolumeModeFSGroup(f, 0, v1.StorageMediumMemory) }) }) @@ -84,7 +84,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or the medium = 'Memory'. */ - framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTestVolumeMode(f, 0, v1.StorageMediumMemory) }) @@ -94,7 +94,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0644(f, 0, v1.StorageMediumMemory) }) @@ -104,7 +104,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0666(f, 0, v1.StorageMediumMemory) }) @@ -114,7 +114,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0777(f, 0, v1.StorageMediumMemory) }) @@ -124,7 +124,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0644(f, nonRootUID, v1.StorageMediumMemory) }) @@ -134,7 +134,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0666(f, nonRootUID, v1.StorageMediumMemory) }) @@ -144,7 +144,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0777(f, nonRootUID, v1.StorageMediumMemory) }) @@ -154,7 +154,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTestVolumeMode(f, 0, v1.StorageMediumDefault) }) @@ -164,7 +164,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0644(f, 0, v1.StorageMediumDefault) }) @@ -174,7 +174,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0666(f, 0, v1.StorageMediumDefault) }) @@ -184,7 +184,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0777(f, 0, v1.StorageMediumDefault) }) @@ -194,7 +194,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0644(f, nonRootUID, v1.StorageMediumDefault) }) @@ -204,7 +204,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0666(f, nonRootUID, v1.StorageMediumDefault) }) @@ -214,7 +214,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { doTest0777(f, nonRootUID, v1.StorageMediumDefault) }) @@ -224,7 +224,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, should share volumes between the containeres in the pod. The two busybox image containers should share the volumes mounted to the pod. The main container should wait until the sub container drops a file, and main container access the shared data. */ - framework.ConformanceIt("pod should support shared volumes between containers", func() { + framework.ConformanceIt("pod should support shared volumes between containers", func(ctx context.Context) { var ( volumeName = "shared-data" busyBoxMainVolumeMountPath = "/usr/share/volumeshare" @@ -296,7 +296,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Testname: EmptyDir, Memory backed volume is sized to specified limit Description: A Pod created with an 'emptyDir' Volume backed by memory should be sized to user provided value. */ - ginkgo.It("pod should support memory backed volumes of specified size", func() { + ginkgo.It("pod should support memory backed volumes of specified size", func(ctx context.Context) { var ( volumeName = "shared-data" busyBoxMainVolumeMountPath = "/usr/share/volumeshare" diff --git a/test/e2e/common/storage/host_path.go b/test/e2e/common/storage/host_path.go index 6ff9d8a3401..0c8de0350fe 100644 --- a/test/e2e/common/storage/host_path.go +++ b/test/e2e/common/storage/host_path.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "os" "path" @@ -48,7 +49,7 @@ var _ = SIGDescribe("HostPath", func() { Create a Pod with host volume mounted. The volume mounted MUST be a directory with permissions mode -rwxrwxrwx and that is has the sticky bit (mode flag t) set. This test is marked LinuxOnly since Windows does not support setting the sticky bit (mode flag t). */ - ginkgo.It("should give a volume the correct mode [LinuxOnly] [NodeConformance]", func() { + ginkgo.It("should give a volume the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { source := &v1.HostPathVolumeSource{ Path: "/tmp", } @@ -65,7 +66,7 @@ var _ = SIGDescribe("HostPath", func() { }) // This test requires mounting a folder into a container with write privileges. - ginkgo.It("should support r/w [NodeConformance]", func() { + ginkgo.It("should support r/w [NodeConformance]", func(ctx context.Context) { filePath := path.Join(volumePath, "test-file") retryDuration := 180 source := &v1.HostPathVolumeSource{ @@ -93,7 +94,7 @@ var _ = SIGDescribe("HostPath", func() { }) }) - ginkgo.It("should support subPath [NodeConformance]", func() { + ginkgo.It("should support subPath [NodeConformance]", func(ctx context.Context) { subPath := "sub-path" fileName := "test-file" retryDuration := 180 diff --git a/test/e2e/common/storage/projected_combined.go b/test/e2e/common/storage/projected_combined.go index b420d75280c..c4f9522c5bd 100644 --- a/test/e2e/common/storage/projected_combined.go +++ b/test/e2e/common/storage/projected_combined.go @@ -41,7 +41,7 @@ var _ = SIGDescribe("Projected combined", func() { Testname: Projected Volume, multiple projections Description: A Pod is created with a projected volume source for secrets, configMap and downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the secrets, configMap values and the cpu and memory limits as well as cpu and memory requests from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func() { + framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func(ctx context.Context) { var err error podName := "projected-volume-" + string(uuid.NewUUID()) secretName := "secret-projected-all-test-volume-" + string(uuid.NewUUID()) diff --git a/test/e2e/common/storage/projected_configmap.go b/test/e2e/common/storage/projected_configmap.go index c761c5789f7..a255e9a4609 100644 --- a/test/e2e/common/storage/projected_configmap.go +++ b/test/e2e/common/storage/projected_configmap.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, volume mode default Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. */ - framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { doProjectedConfigMapE2EWithoutMappings(f, false, 0, nil) }) @@ -54,12 +54,12 @@ var _ = SIGDescribe("Projected configMap", func() { Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with permission mode set to 0400. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -r--------. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0400) doProjectedConfigMapE2EWithoutMappings(f, false, 0, &defaultMode) }) - ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ @@ -71,11 +71,11 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, non-root user Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) { doProjectedConfigMapE2EWithoutMappings(f, true, 0, nil) }) - ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doProjectedConfigMapE2EWithoutMappings(f, true, 1001, nil) @@ -86,7 +86,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, mapped Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw-r--r--. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { doProjectedConfigMapE2EWithMappings(f, false, 0, nil) }) @@ -96,7 +96,7 @@ var _ = SIGDescribe("Projected configMap", func() { Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with permission mode set to 0400. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { mode := int32(0400) doProjectedConfigMapE2EWithMappings(f, false, 0, &mode) }) @@ -106,11 +106,11 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, mapped, non-root user Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) { doProjectedConfigMapE2EWithMappings(f, true, 0, nil) }) - ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doProjectedConfigMapE2EWithMappings(f, true, 1001, nil) @@ -121,7 +121,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, update Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2. */ - framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() { + framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) @@ -171,7 +171,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, create, update and delete Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as 'value-1'. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container. */ - framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() { + framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -372,7 +372,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, multiple volume paths Description: A Pod is created with a projected volume source 'ConfigMap' to store a configMap. The configMap is mapped to two different volume mounts. Pod MUST be able to read the content of the configMap successfully from the two volume mounts. */ - framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() { + framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func(ctx context.Context) { var ( name = "projected-configmap-test-volume-" + string(uuid.NewUUID()) volumeName = "projected-configmap-volume" @@ -460,7 +460,7 @@ var _ = SIGDescribe("Projected configMap", func() { //The pod is in pending during volume creation until the configMap objects are available //or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional. //Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() { + ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/projected-configmap-volumes" pod, err := createNonOptionalConfigMapPod(f, volumeMountPath) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) @@ -469,7 +469,7 @@ var _ = SIGDescribe("Projected configMap", func() { //ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap, // the volume setup will error unless it is marked optional, during the pod creation. //Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() { + ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/configmap-volumes" pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath) framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name) diff --git a/test/e2e/common/storage/projected_downwardapi.go b/test/e2e/common/storage/projected_downwardapi.go index 1c5902bbbac..7de8c266052 100644 --- a/test/e2e/common/storage/projected_downwardapi.go +++ b/test/e2e/common/storage/projected_downwardapi.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "time" @@ -50,7 +51,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, pod name Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide podname only [NodeConformance]", func() { + framework.ConformanceIt("should provide podname only [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") @@ -65,7 +66,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) defaultMode := int32(0400) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) @@ -81,7 +82,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) mode := int32(0400) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) @@ -91,7 +92,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { }) }) - ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -106,7 +107,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { }) }) - ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func() { + ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -127,7 +128,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, update labels Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and label items. Pod MUST be able to read the labels from the mounted DownwardAPIVolumeFiles. Labels are then updated. Pod MUST be able to read the updated values for the Labels. */ - framework.ConformanceIt("should update labels on modification [NodeConformance]", func() { + framework.ConformanceIt("should update labels on modification [NodeConformance]", func(ctx context.Context) { labels := map[string]string{} labels["key1"] = "value1" labels["key2"] = "value2" @@ -159,7 +160,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, update annotation Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and annotation items. Pod MUST be able to read the annotations from the mounted DownwardAPIVolumeFiles. Annotations are then updated. Pod MUST be able to read the updated values for the Annotations. */ - framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() { + framework.ConformanceIt("should update annotations on modification [NodeConformance]", func(ctx context.Context) { annotations := map[string]string{} annotations["builder"] = "bar" podName := "annotationupdate" + string(uuid.NewUUID()) @@ -190,7 +191,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, CPU limits Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu limits from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() { + framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -204,7 +205,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, memory limits Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory limits from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() { + framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") @@ -218,7 +219,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, CPU request Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu request from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() { + framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") @@ -232,7 +233,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, memory request Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory request from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() { + framework.ConformanceIt("should provide container's memory request [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") @@ -246,7 +247,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, CPU limit, node allocatable Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default cpu limits from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() { + framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -258,7 +259,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, memory limit, node allocatable Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default memory limits from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() { + framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") diff --git a/test/e2e/common/storage/projected_secret.go b/test/e2e/common/storage/projected_secret.go index 9a5f9bb5ed5..62d6e3a059d 100644 --- a/test/e2e/common/storage/projected_secret.go +++ b/test/e2e/common/storage/projected_secret.go @@ -43,7 +43,7 @@ var _ = SIGDescribe("Projected secret", func() { Testname: Projected Volume, Secrets, volume mode default Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default. */ - framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -53,7 +53,7 @@ var _ = SIGDescribe("Projected secret", func() { Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with permission mode set to 0x400 on the Pod. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0400) doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -64,7 +64,7 @@ var _ = SIGDescribe("Projected secret", func() { Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key. The volume has permission mode set to 0440, fsgroup set to 1001 and user set to non-root uid of 1000. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--r-----. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ fsGroup := int64(1001) doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) @@ -75,7 +75,7 @@ var _ = SIGDescribe("Projected secret", func() { Testname: Projected Volume, Secrets, mapped Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------on the mapped volume. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { doProjectedSecretE2EWithMapping(f, nil) }) @@ -85,12 +85,12 @@ var _ = SIGDescribe("Projected secret", func() { Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with permission mode set to 0400. The secret is also mapped to a specific name. Pod MUST be able to read the content of the key successfully and the mode MUST be -r-------- on the mapped volume. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { mode := int32(0400) doProjectedSecretE2EWithMapping(f, &mode) }) - ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() { + ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) { var ( namespace2 *v1.Namespace err error @@ -116,7 +116,7 @@ var _ = SIGDescribe("Projected secret", func() { Testname: Projected Volume, Secrets, mapped, multiple paths Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key. The secret is mapped to two different volume mounts. Pod MUST be able to read the content of the key successfully from the two volume mounts and the mode MUST be -r-------- on the mapped volumes. */ - framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() { + framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func(ctx context.Context) { // This test ensures that the same secret can be mounted in multiple // volumes in the same pod. This test case exists to prevent // regressions that break this use-case. @@ -212,7 +212,7 @@ var _ = SIGDescribe("Projected secret", func() { Testname: Projected Volume, Secrets, create, update delete Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container. */ - framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() { + framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -411,7 +411,7 @@ var _ = SIGDescribe("Projected secret", func() { //The secret is in pending during volume creation until the secret objects are available //or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional. //Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() { + ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/projected-secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) err := createNonOptionalSecretPod(f, volumeMountPath, podName) @@ -421,7 +421,7 @@ var _ = SIGDescribe("Projected secret", func() { //Secret object defined for the pod, If a key is specified which is not present in the secret, // the volume setup will error unless it is marked optional, during the pod creation. //Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() { + ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName) diff --git a/test/e2e/common/storage/secrets_volume.go b/test/e2e/common/storage/secrets_volume.go index 6e6326bb121..1713bdc7b76 100644 --- a/test/e2e/common/storage/secrets_volume.go +++ b/test/e2e/common/storage/secrets_volume.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, default Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default. */ - framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -54,7 +54,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r-------- by default. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0400) doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -65,7 +65,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x440 as a non-root user with uid 1000 and fsGroup id 1001. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r--r-----by default. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ fsGroup := int64(1001) doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) @@ -76,7 +76,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, mapping Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw-r--r-- by default. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { doSecretE2EWithMapping(f, nil) }) @@ -86,7 +86,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path and file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -r--r--r--. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { mode := int32(0400) doSecretE2EWithMapping(f, &mode) }) @@ -96,7 +96,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, volume mode default, secret with same name in different namespace Description: Create a secret with same name in two namespaces. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secrets from the mounted volume from the container runtime and only secrets which are associated with namespace where pod is created. The file mode of the secret MUST be -rw-r--r-- by default. */ - framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() { + framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) { var ( namespace2 *v1.Namespace err error @@ -122,7 +122,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, mapping multiple volume paths Description: Create a secret. Create a Pod with two secret volume sources configured into the container in to two different custom paths. Pod MUST be able to read the secret from the both the mounted volumes from the two specified custom paths. */ - framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() { + framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func(ctx context.Context) { // This test ensures that the same secret can be mounted in multiple // volumes in the same pod. This test case exists to prevent // regressions that break this use-case. @@ -202,7 +202,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, create, update and delete Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container. */ - framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() { + framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -383,7 +383,7 @@ var _ = SIGDescribe("Secrets", func() { Try to update the secret`s metadata (labels), the update must succeed. Try to delete the secret, the deletion must succeed. */ - framework.ConformanceIt("should be immutable if `immutable` field is set", func() { + framework.ConformanceIt("should be immutable if `immutable` field is set", func(ctx context.Context) { name := "immutable" secret := secretForTest(f.Namespace.Name, name) @@ -436,7 +436,7 @@ var _ = SIGDescribe("Secrets", func() { // The secret is in pending during volume creation until the secret objects are available // or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional. // Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() { + ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) err := createNonOptionalSecretPod(f, volumeMountPath, podName) @@ -446,7 +446,7 @@ var _ = SIGDescribe("Secrets", func() { // Secret object defined for the pod, If a key is specified which is not present in the secret, // the volume setup will error unless it is marked optional, during the pod creation. // Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() { + ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) { volumeMountPath := "/etc/secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName) diff --git a/test/e2e/common/storage/volumes.go b/test/e2e/common/storage/volumes.go index 9e8dc0e0c89..86751bc82ab 100644 --- a/test/e2e/common/storage/volumes.go +++ b/test/e2e/common/storage/volumes.go @@ -43,6 +43,8 @@ limitations under the License. package storage import ( + "context" + v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -74,7 +76,7 @@ var _ = SIGDescribe("Volumes", func() { // NFS //////////////////////////////////////////////////////////////////////// ginkgo.Describe("NFSv4", func() { - ginkgo.It("should be mountable for NFSv4", func() { + ginkgo.It("should be mountable for NFSv4", func(ctx context.Context) { config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{}) defer e2evolume.TestServerCleanup(f, config) @@ -98,7 +100,7 @@ var _ = SIGDescribe("Volumes", func() { }) ginkgo.Describe("NFSv3", func() { - ginkgo.It("should be mountable for NFSv3", func() { + ginkgo.It("should be mountable for NFSv3", func(ctx context.Context) { config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{}) defer e2evolume.TestServerCleanup(f, config) diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index ef1a3ebcba2..cabebc24e9a 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -52,7 +52,6 @@ func networkResources() app.Resources { var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", func() { f := framework.NewDefaultFramework("dra") - ctx := context.Background() // The driver containers have to run with sufficient privileges to // modify /var/lib/kubelet/plugins. @@ -62,12 +61,12 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu nodes := NewNodes(f, 1, 1) driver := NewDriver(f, nodes, networkResources) // All tests get their own driver instance. b := newBuilder(f, driver) - ginkgo.It("registers plugin", func() { + ginkgo.It("registers plugin", func(ctx context.Context) { ginkgo.By("the driver is running") }) // This test does not pass at the moment because kubelet doesn't retry. - ginkgo.It("must retry NodePrepareResource", func() { + ginkgo.It("must retry NodePrepareResource", func(ctx context.Context) { // We have exactly one host. m := MethodInstance{driver.Nodenames()[0], NodePrepareResourceMethod} @@ -96,7 +95,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu framework.Fail("NodePrepareResource should have been called again") } }) - ginkgo.It("must not run a pod if a claim is not reserved for it", func() { + ginkgo.It("must not run a pod if a claim is not reserved for it", func(ctx context.Context) { parameters := b.parameters() claim := b.externalClaim(resourcev1alpha1.AllocationModeImmediate) pod := b.podExternal() @@ -119,7 +118,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu return nil }, 20*time.Second, 200*time.Millisecond).Should(gomega.BeNil()) }) - ginkgo.It("must unprepare resources for force-deleted pod", func() { + ginkgo.It("must unprepare resources for force-deleted pod", func(ctx context.Context) { parameters := b.parameters() claim := b.externalClaim(resourcev1alpha1.AllocationModeImmediate) pod := b.podExternal() @@ -151,7 +150,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.parametersCounter = 1 b.classParametersName = b.parametersName() - ginkgo.It("supports claim and class parameters", func() { + ginkgo.It("supports claim and class parameters", func(ctx context.Context) { classParameters := b.parameters("x", "y") claimParameters := b.parameters() pod, template := b.podInline(resourcev1alpha1.AllocationModeWaitForFirstConsumer) @@ -170,7 +169,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu // claimTests tries out several different combinations of pods with // claims, both inline and external. claimTests := func(allocationMode resourcev1alpha1.AllocationMode) { - ginkgo.It("supports simple pod referencing inline resource claim", func() { + ginkgo.It("supports simple pod referencing inline resource claim", func(ctx context.Context) { parameters := b.parameters() pod, template := b.podInline(allocationMode) b.create(ctx, parameters, pod, template) @@ -178,7 +177,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.testPod(f.ClientSet, pod) }) - ginkgo.It("supports inline claim referenced by multiple containers", func() { + ginkgo.It("supports inline claim referenced by multiple containers", func(ctx context.Context) { parameters := b.parameters() pod, template := b.podInlineMultiple(allocationMode) b.create(ctx, parameters, pod, template) @@ -186,7 +185,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.testPod(f.ClientSet, pod) }) - ginkgo.It("supports simple pod referencing external resource claim", func() { + ginkgo.It("supports simple pod referencing external resource claim", func(ctx context.Context) { parameters := b.parameters() pod := b.podExternal() b.create(ctx, parameters, b.externalClaim(allocationMode), pod) @@ -194,7 +193,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.testPod(f.ClientSet, pod) }) - ginkgo.It("supports external claim referenced by multiple pods", func() { + ginkgo.It("supports external claim referenced by multiple pods", func(ctx context.Context) { parameters := b.parameters() pod1 := b.podExternal() pod2 := b.podExternal() @@ -207,7 +206,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu } }) - ginkgo.It("supports external claim referenced by multiple containers of multiple pods", func() { + ginkgo.It("supports external claim referenced by multiple containers of multiple pods", func(ctx context.Context) { parameters := b.parameters() pod1 := b.podExternalMultiple() pod2 := b.podExternalMultiple() @@ -220,7 +219,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu } }) - ginkgo.It("supports init containers", func() { + ginkgo.It("supports init containers", func(ctx context.Context) { parameters := b.parameters() pod, template := b.podInline(allocationMode) pod.Spec.InitContainers = []v1.Container{pod.Spec.Containers[0]} @@ -248,7 +247,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu driver := NewDriver(f, nodes, networkResources) b := newBuilder(f, driver) - ginkgo.It("schedules onto different nodes", func() { + ginkgo.It("schedules onto different nodes", func(ctx context.Context) { parameters := b.parameters() label := "app.kubernetes.io/instance" instance := f.UniqueName + "-test-app" @@ -295,7 +294,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b := newBuilder(f, driver) tests := func(allocationMode resourcev1alpha1.AllocationMode) { - ginkgo.It("uses all resources", func() { + ginkgo.It("uses all resources", func(ctx context.Context) { var objs = []klog.KMetadata{ b.parameters(), } @@ -360,7 +359,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu }) b := newBuilder(f, driver) - ginkgo.It("works", func() { + ginkgo.It("works", func(ctx context.Context) { // A pod with two claims can run on a node, but // only if allocation of both succeeds. This // tests simulates the scenario where one claim @@ -474,7 +473,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu driver2.NameSuffix = "-other" b2 := newBuilder(f, driver2) - ginkgo.It("work", func() { + ginkgo.It("work", func(ctx context.Context) { parameters1 := b1.parameters() parameters2 := b2.parameters() claim1 := b1.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer) diff --git a/test/e2e/framework/README.md b/test/e2e/framework/README.md index 23194dec9e1..f8ed1eff260 100644 --- a/test/e2e/framework/README.md +++ b/test/e2e/framework/README.md @@ -57,7 +57,7 @@ ginkgo.AfterEach(func() { # Do something with f.ClientSet. } -ginkgo.It("test something", func() { +ginkgo.It("test something", func(ctx context.Context) { # The actual test. }) ``` diff --git a/test/e2e/framework/internal/output/output.go b/test/e2e/framework/internal/output/output.go index c778a72576e..54d28bda574 100644 --- a/test/e2e/framework/internal/output/output.go +++ b/test/e2e/framework/internal/output/output.go @@ -124,8 +124,8 @@ func stripAddresses(in string) string { // locally) or one of a few relative paths (built in the Kubernetes CI). var stackLocation = regexp.MustCompile(`(?:/|vendor/|test/|GOROOT/).*/([[:^space:]]+.go:[[:digit:]]+)( \+0x[0-9a-fA-F]+)?`) -// functionArgs matches "(...)". -var functionArgs = regexp.MustCompile(`([[:alpha:]]+)\(.*\)`) +// functionArgs matches "(...)" where may be an anonymous function (e.g. "pod_test.glob..func1.1") +var functionArgs = regexp.MustCompile(`([[:alpha:][:digit:].]+)\(.*\)`) // klogPrefix matches "I0822 16:10:39.343790 989127 " var klogPrefix = regexp.MustCompile(`(?m)^[IEF][[:digit:]]{4} [[:digit:]]{2}:[[:digit:]]{2}:[[:digit:]]{2}\.[[:digit:]]{6}[[:space:]]+[[:digit:]]+ `) diff --git a/test/e2e/framework/internal/unittests/cleanup/cleanup_test.go b/test/e2e/framework/internal/unittests/cleanup/cleanup_test.go index eab2af891d9..c47fd14a568 100644 --- a/test/e2e/framework/internal/unittests/cleanup/cleanup_test.go +++ b/test/e2e/framework/internal/unittests/cleanup/cleanup_test.go @@ -22,6 +22,7 @@ limitations under the License. package cleanup import ( + "context" "flag" "regexp" "testing" @@ -46,7 +47,6 @@ import ( // // // -// // This must be line #50. var _ = ginkgo.Describe("e2e", func() { @@ -77,7 +77,7 @@ var _ = ginkgo.Describe("e2e", func() { framework.Logf("after #2") }) - ginkgo.It("works", func() { + ginkgo.It("works", func(ctx context.Context) { // DeferCleanup invokes in first-in-last-out order ginkgo.DeferCleanup(func() { framework.Logf("cleanup last") diff --git a/test/e2e/framework/log_test.go b/test/e2e/framework/log_test.go index 8eff74c06dd..91ffe3d78d5 100644 --- a/test/e2e/framework/log_test.go +++ b/test/e2e/framework/log_test.go @@ -154,12 +154,12 @@ k8s.io/kubernetes/test/e2e/framework_test.glob..func1.5() FAIL: I'm failing. Full Stack Trace -k8s.io/kubernetes/test/e2e/framework_test.glob..func1.3.1(...) +k8s.io/kubernetes/test/e2e/framework_test.glob..func1.3.1() log_test.go:56 k8s.io/kubernetes/test/e2e/framework_test.glob..func1.3() log_test.go:57` + commonOutput, Failure: "I'm failing.", - Stack: `k8s.io/kubernetes/test/e2e/framework_test.glob..func1.3.1(...) + Stack: `k8s.io/kubernetes/test/e2e/framework_test.glob..func1.3.1() log_test.go:56 k8s.io/kubernetes/test/e2e/framework_test.glob..func1.3() log_test.go:57`, diff --git a/test/e2e/framework/pod/wait_test.go b/test/e2e/framework/pod/wait_test.go index f3d28b3ef73..22cd8c5bb73 100644 --- a/test/e2e/framework/pod/wait_test.go +++ b/test/e2e/framework/pod/wait_test.go @@ -17,6 +17,7 @@ limitations under the License. package pod_test import ( + "context" "strings" "testing" "time" @@ -48,15 +49,14 @@ import ( // // // -// // This must be line #52. var _ = ginkgo.Describe("pod", func() { - ginkgo.It("not found", func() { + ginkgo.It("not found", func(ctx context.Context) { framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(clientSet, "no-such-pod", "default", timeout /* no explanation here to cover that code path */)) }) - ginkgo.It("not running", func() { + ginkgo.It("not running", func(ctx context.Context) { framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(clientSet, podName, podNamespace, timeout), "wait for pod %s running", podName /* tests printf formatting */) }) }) diff --git a/test/e2e/framework/skipper/skipper_test.go b/test/e2e/framework/skipper/skipper_test.go index c0c4cd44bcf..1fb58784db5 100644 --- a/test/e2e/framework/skipper/skipper_test.go +++ b/test/e2e/framework/skipper/skipper_test.go @@ -17,6 +17,7 @@ limitations under the License. package skipper_test import ( + "context" "flag" "testing" @@ -46,11 +47,10 @@ import ( // // // -// // This must be line #50. var _ = ginkgo.Describe("e2e", func() { - ginkgo.It("skips", func() { + ginkgo.It("skips", func(ctx context.Context) { e2eskipper.Skipf("skipping %d, %d, %d", 1, 3, 4) }) }) diff --git a/test/e2e/instrumentation/core_events.go b/test/e2e/instrumentation/core_events.go index 9c020a50f01..a7b74275255 100644 --- a/test/e2e/instrumentation/core_events.go +++ b/test/e2e/instrumentation/core_events.go @@ -54,7 +54,7 @@ var _ = common.SIGDescribe("Events", func() { this update. The event is deleted and MUST NOT show up when listing all events. */ - framework.ConformanceIt("should manage the lifecycle of an event", func() { + framework.ConformanceIt("should manage the lifecycle of an event", func(ctx context.Context) { // As per SIG-Arch meeting 14 July 2022 this e2e test now supersede // e2e test "Event resource lifecycle", which has been removed. @@ -172,7 +172,7 @@ var _ = common.SIGDescribe("Events", func() { Description: A set of events is created with a label selector which MUST be found when listed. The set of events is deleted and MUST NOT show up when listed by its label selector. */ - framework.ConformanceIt("should delete a collection of events", func() { + framework.ConformanceIt("should delete a collection of events", func(ctx context.Context) { eventTestNames := []string{"test-event-1", "test-event-2", "test-event-3"} ginkgo.By("Create set of events") diff --git a/test/e2e/instrumentation/events.go b/test/e2e/instrumentation/events.go index 25b043dc668..0086ea9d826 100644 --- a/test/e2e/instrumentation/events.go +++ b/test/e2e/instrumentation/events.go @@ -95,7 +95,7 @@ var _ = common.SIGDescribe("Events API", func() { The event is updated with a new series, the check MUST have the update series. The event is deleted and MUST NOT show up when listing all events. */ - framework.ConformanceIt("should ensure that an event can be fetched, patched, deleted, and listed", func() { + framework.ConformanceIt("should ensure that an event can be fetched, patched, deleted, and listed", func(ctx context.Context) { eventName := "event-test" ginkgo.By("creating a test event") @@ -204,7 +204,7 @@ var _ = common.SIGDescribe("Events API", func() { Description: Create a list of events, the events MUST exist. The events are deleted and MUST NOT show up when listing all events. */ - framework.ConformanceIt("should delete a collection of events", func() { + framework.ConformanceIt("should delete a collection of events", func(ctx context.Context) { eventNames := []string{"test-event-1", "test-event-2", "test-event-3"} ginkgo.By("Create set of events") diff --git a/test/e2e/instrumentation/logging/generic_soak.go b/test/e2e/instrumentation/logging/generic_soak.go index 588c582c422..60096ebfe2f 100644 --- a/test/e2e/instrumentation/logging/generic_soak.go +++ b/test/e2e/instrumentation/logging/generic_soak.go @@ -17,6 +17,7 @@ limitations under the License. package logging import ( + "context" "fmt" "strconv" "strings" @@ -55,7 +56,7 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti // This can expose problems in your docker configuration (logging), log searching infrastructure, to tune deployments to match high load // scenarios. TODO jayunit100 add this to the kube CI in a follow on infra patch. - ginkgo.It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v", kbRateInSeconds, totalLogTime), func() { + ginkgo.It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v", kbRateInSeconds, totalLogTime), func(ctx context.Context) { ginkgo.By(fmt.Sprintf("scaling up to %v pods per node", loggingSoak.Scale)) defer ginkgo.GinkgoRecover() var wg sync.WaitGroup diff --git a/test/e2e/instrumentation/monitoring/accelerator.go b/test/e2e/instrumentation/monitoring/accelerator.go index 2e95349f7e8..a74a3a82eca 100644 --- a/test/e2e/instrumentation/monitoring/accelerator.go +++ b/test/e2e/instrumentation/monitoring/accelerator.go @@ -56,7 +56,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f := framework.NewDefaultFramework("stackdriver-monitoring") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func() { + ginkgo.It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func(ctx context.Context) { testStackdriverAcceleratorMonitoring(f) }) diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index 3e9c2b9d7fb..38923214171 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -56,7 +56,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f := framework.NewDefaultFramework("stackdriver-monitoring") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should run Custom Metrics - Stackdriver Adapter for old resource model [Feature:StackdriverCustomMetrics]", func() { + ginkgo.It("should run Custom Metrics - Stackdriver Adapter for old resource model [Feature:StackdriverCustomMetrics]", func(ctx context.Context) { kubeClient := f.ClientSet config, err := framework.LoadConfig() if err != nil { @@ -71,7 +71,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel) }) - ginkgo.It("should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]", func() { + ginkgo.It("should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]", func(ctx context.Context) { kubeClient := f.ClientSet config, err := framework.LoadConfig() if err != nil { @@ -86,7 +86,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel) }) - ginkgo.It("should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]", func() { + ginkgo.It("should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]", func(ctx context.Context) { kubeClient := f.ClientSet config, err := framework.LoadConfig() if err != nil { diff --git a/test/e2e/instrumentation/monitoring/metrics_grabber.go b/test/e2e/instrumentation/monitoring/metrics_grabber.go index fbea23b52e4..b3bea127270 100644 --- a/test/e2e/instrumentation/monitoring/metrics_grabber.go +++ b/test/e2e/instrumentation/monitoring/metrics_grabber.go @@ -17,6 +17,7 @@ limitations under the License. package monitoring import ( + "context" "errors" "fmt" "time" @@ -51,7 +52,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { }, 5*time.Minute, 10*time.Second).Should(gomega.BeNil()) }) - ginkgo.It("should grab all metrics from API server.", func() { + ginkgo.It("should grab all metrics from API server.", func(ctx context.Context) { ginkgo.By("Connecting to /metrics endpoint") response, err := grabber.GrabFromAPIServer() if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { @@ -61,7 +62,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { gomega.Expect(response).NotTo(gomega.BeEmpty()) }) - ginkgo.It("should grab all metrics from a Kubelet.", func() { + ginkgo.It("should grab all metrics from a Kubelet.", func(ctx context.Context) { ginkgo.By("Proxying to Node through the API server") node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { @@ -73,7 +74,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { gomega.Expect(response).NotTo(gomega.BeEmpty()) }) - ginkgo.It("should grab all metrics from a Scheduler.", func() { + ginkgo.It("should grab all metrics from a Scheduler.", func(ctx context.Context) { ginkgo.By("Proxying to Pod through the API server") response, err := grabber.GrabFromScheduler() if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { @@ -83,7 +84,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { gomega.Expect(response).NotTo(gomega.BeEmpty()) }) - ginkgo.It("should grab all metrics from a ControllerManager.", func() { + ginkgo.It("should grab all metrics from a ControllerManager.", func(ctx context.Context) { ginkgo.By("Proxying to Pod through the API server") response, err := grabber.GrabFromControllerManager() if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { diff --git a/test/e2e/instrumentation/monitoring/stackdriver.go b/test/e2e/instrumentation/monitoring/stackdriver.go index cdea5d0a692..5693d4416ff 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver.go +++ b/test/e2e/instrumentation/monitoring/stackdriver.go @@ -68,7 +68,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f := framework.NewDefaultFramework("stackdriver-monitoring") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should have cluster metrics [Feature:StackdriverMonitoring]", func() { + ginkgo.It("should have cluster metrics [Feature:StackdriverMonitoring]", func(ctx context.Context) { testStackdriverMonitoring(f, 1, 100, 200) }) diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go index bdc37e6909c..303a6c9717d 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go +++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go @@ -54,7 +54,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var kubeClient clientset.Interface - ginkgo.It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func() { + ginkgo.It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func(ctx context.Context) { kubeClient = f.ClientSet testAgent(f, kubeClient) }) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index a1f433a4868..f4ce3c0b579 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -336,7 +336,7 @@ var _ = SIGDescribe("Kubectl client", func() { Testname: Kubectl, replication controller Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. */ - framework.ConformanceIt("should create and stop a replication controller ", func() { + framework.ConformanceIt("should create and stop a replication controller ", func(ctx context.Context) { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) ginkgo.By("creating a replication controller") @@ -349,7 +349,7 @@ var _ = SIGDescribe("Kubectl client", func() { Testname: Kubectl, scale replication controller Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Update the replicaset to 1. Number of running instances of the Pod MUST be 1. Update the replicaset to 2. Number of running instances of the Pod MUST be 2. */ - framework.ConformanceIt("should scale a replication controller ", func() { + framework.ConformanceIt("should scale a replication controller ", func(ctx context.Context) { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) ginkgo.By("creating a replication controller") @@ -391,7 +391,7 @@ var _ = SIGDescribe("Kubectl client", func() { Testname: Kubectl, guestbook application Description: Create Guestbook application that contains an agnhost primary server, 2 agnhost replicas, frontend application, frontend service and agnhost primary service and agnhost replica service. Using frontend service, the test will write an entry into the guestbook application which will store the entry into the backend agnhost store. Application flow MUST work as expected and the data written MUST be available to read. */ - framework.ConformanceIt("should create and stop a working application ", func() { + framework.ConformanceIt("should create and stop a working application ", func(ctx context.Context) { defer forEachGBFile(func(contents string) { cleanupKubectlInputs(contents, ns) }) @@ -418,7 +418,7 @@ var _ = SIGDescribe("Kubectl client", func() { cleanupKubectlInputs(podYaml, ns, simplePodSelector) }) - ginkgo.It("should support exec", func() { + ginkgo.It("should support exec", func(ctx context.Context) { ginkgo.By("executing a command in the container") execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { @@ -458,7 +458,7 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - ginkgo.It("should support exec using resource/name", func() { + ginkgo.It("should support exec using resource/name", func(ctx context.Context) { ginkgo.By("executing a command in the container") execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodResourceName, "--", "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { @@ -466,7 +466,7 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - ginkgo.It("should support exec through an HTTP proxy", func() { + ginkgo.It("should support exec through an HTTP proxy", func(ctx context.Context) { // Fail if the variable isn't set if framework.TestContext.Host == "" { framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") @@ -500,7 +500,7 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - ginkgo.It("should support exec through kubectl proxy", func() { + ginkgo.It("should support exec through kubectl proxy", func(ctx context.Context) { // Fail if the variable isn't set if framework.TestContext.Host == "" { framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") @@ -527,12 +527,12 @@ var _ = SIGDescribe("Kubectl client", func() { }) ginkgo.Context("should return command exit codes", func() { - ginkgo.It("execing into a container with a successful command", func() { + ginkgo.It("execing into a container with a successful command", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) }) - ginkgo.It("execing into a container with a failing command", func() { + ginkgo.It("execing into a container with a failing command", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok := err.(uexec.ExitError) if !ok { @@ -541,12 +541,12 @@ var _ = SIGDescribe("Kubectl client", func() { framework.ExpectEqual(ee.ExitStatus(), 42) }) - ginkgo.It("running a successful command", func() { + ginkgo.It("running a successful command", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) }) - ginkgo.It("running a failing command", func() { + ginkgo.It("running a failing command", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok := err.(uexec.ExitError) if !ok { @@ -555,7 +555,7 @@ var _ = SIGDescribe("Kubectl client", func() { framework.ExpectEqual(ee.ExitStatus(), 42) }) - ginkgo.It("[Slow] running a failing command without --restart=Never", func() { + ginkgo.It("[Slow] running a failing command without --restart=Never", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() @@ -568,7 +568,7 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func() { + ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() @@ -582,7 +582,7 @@ var _ = SIGDescribe("Kubectl client", func() { e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) }) - ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func() { + ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). WithStdinData("abcd1234"). Exec() @@ -590,7 +590,7 @@ var _ = SIGDescribe("Kubectl client", func() { }) }) - ginkgo.It("should support inline execution and attach", func() { + ginkgo.It("should support inline execution and attach", func(ctx context.Context) { waitForStdinContent := func(pod, content string) string { var logOutput string err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { @@ -650,7 +650,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil()) }) - ginkgo.It("should contain last line of the log", func() { + ginkgo.It("should contain last line of the log", func(ctx context.Context) { podName := "run-log-test" ginkgo.By("executing a command with run") @@ -664,7 +664,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF")) }) - ginkgo.It("should support port-forward", func() { + ginkgo.It("should support port-forward", func(ctx context.Context) { ginkgo.By("forwarding the container port to a local port") cmd := runPortForward(ns, simplePodName, simplePodPort) defer cmd.Stop() @@ -681,7 +681,7 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - ginkgo.It("should handle in-cluster config", func() { + ginkgo.It("should handle in-cluster config", func(ctx context.Context) { // This test does not work for dynamically linked kubectl binaries; only statically linked ones. The // problem happens when the kubectl binary is copied to a pod in the cluster. For dynamically linked // binaries, the necessary libraries are not also copied. For this reason, the test can not be @@ -821,7 +821,7 @@ metadata: Testname: Kubectl, check version v1 Description: Run kubectl to get api versions, output MUST contain returned versions with 'v1' listed. */ - framework.ConformanceIt("should check if v1 is in available api versions ", func() { + framework.ConformanceIt("should check if v1 is in available api versions ", func(ctx context.Context) { ginkgo.By("validating api versions") output := e2ekubectl.RunKubectlOrDie(ns, "api-versions") if !strings.Contains(output, "v1") { @@ -831,7 +831,7 @@ metadata: }) ginkgo.Describe("Kubectl get componentstatuses", func() { - ginkgo.It("should get componentstatuses", func() { + ginkgo.It("should get componentstatuses", func(ctx context.Context) { ginkgo.By("getting list of componentstatuses") output := e2ekubectl.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}") components := strings.Split(output, " ") @@ -844,7 +844,7 @@ metadata: }) ginkgo.Describe("Kubectl apply", func() { - ginkgo.It("should apply a new configuration to an existing RC", func() { + ginkgo.It("should apply a new configuration to an existing RC", func(ctx context.Context) { controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) ginkgo.By("creating Agnhost RC") @@ -857,7 +857,7 @@ metadata: ginkgo.By("checking the result") forEachReplicationController(c, ns, "app", "agnhost", validateReplicationControllerConfiguration) }) - ginkgo.It("should reuse port when apply to an existing SVC", func() { + ginkgo.It("should reuse port when apply to an existing SVC", func(ctx context.Context) { serviceJSON := readTestFileOrDie(agnhostServiceFilename) ginkgo.By("creating Agnhost SVC") @@ -878,7 +878,7 @@ metadata: } }) - ginkgo.It("apply set/view last-applied", func() { + ginkgo.It("apply set/view last-applied", func(ctx context.Context) { deployment1Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment1Filename))) deployment2Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment2Filename))) deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename))) @@ -928,7 +928,7 @@ metadata: Testname: Kubectl, diff Deployment Description: Create a Deployment with httpd image. Declare the same Deployment with a different image, busybox. Diff of live Deployment with declared Deployment MUST include the difference between live and declared image. */ - framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func() { + framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func(ctx context.Context) { ginkgo.By("create deployment with httpd image") deployment := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename))) e2ekubectl.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-") @@ -959,7 +959,7 @@ metadata: Testname: Kubectl, server-side dry-run Pod Description: The command 'kubectl run' must create a pod with the specified image name. After, the command 'kubectl patch pod -p {...} --dry-run=server' should update the Pod with the new image name and server-side dry-run enabled. The image name must not change. */ - framework.ConformanceIt("should check if kubectl can dry-run update Pods", func() { + framework.ConformanceIt("should check if kubectl can dry-run update Pods", func(ctx context.Context) { ginkgo.By("running the image " + httpdImage) podName := "e2e-test-httpd-pod" e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) @@ -1033,7 +1033,7 @@ metadata: } ginkgo.Describe("Kubectl validation", func() { - ginkgo.It("should create/apply a CR with unknown fields for CRD with no validation schema", func() { + ginkgo.It("should create/apply a CR with unknown fields for CRD with no validation schema", func(ctx context.Context) { ginkgo.By("create CRD with no validation schema") crd, err := crd.CreateTestCRD(f) if err != nil { @@ -1051,7 +1051,7 @@ metadata: } }) - ginkgo.It("should create/apply a valid CR for CRD with validation schema", func() { + ginkgo.It("should create/apply a valid CR for CRD with validation schema", func(ctx context.Context) { ginkgo.By("prepare CRD with validation schema") crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) { props := &apiextensionsv1.JSONSchemaProps{} @@ -1077,7 +1077,7 @@ metadata: } }) - ginkgo.It("should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema", func() { + ginkgo.It("should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema", func(ctx context.Context) { ginkgo.By("prepare CRD with partially-specified validation schema") crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) { props := &apiextensionsv1.JSONSchemaProps{} @@ -1118,7 +1118,7 @@ metadata: framework.ExpectNoError(err, "creating custom resource") }) - ginkgo.It("should detect unknown metadata fields in both the root and embedded object of a CR", func() { + ginkgo.It("should detect unknown metadata fields in both the root and embedded object of a CR", func(ctx context.Context) { ginkgo.By("prepare CRD with x-kubernetes-embedded-resource: true") opt := func(crd *apiextensionsv1.CustomResourceDefinition) { props := &apiextensionsv1.JSONSchemaProps{} @@ -1194,7 +1194,7 @@ metadata: } }) - ginkgo.It("should detect unknown metadata fields of a typed object", func() { + ginkgo.It("should detect unknown metadata fields of a typed object", func(ctx context.Context) { ginkgo.By("calling kubectl create deployment") invalidMetaDeployment := ` { @@ -1247,7 +1247,7 @@ metadata: Testname: Kubectl, cluster info Description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes control plane SHOULD be running. */ - framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info ", func() { + framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info ", func(ctx context.Context) { ginkgo.By("validating cluster-info") output := e2ekubectl.RunKubectlOrDie(ns, "cluster-info") // Can't check exact strings due to terminal control commands (colors) @@ -1261,7 +1261,7 @@ metadata: }) ginkgo.Describe("Kubectl cluster-info dump", func() { - ginkgo.It("should check if cluster-info dump succeeds", func() { + ginkgo.It("should check if cluster-info dump succeeds", func(ctx context.Context) { ginkgo.By("running cluster-info dump") e2ekubectl.RunKubectlOrDie(ns, "cluster-info", "dump") }) @@ -1273,7 +1273,7 @@ metadata: Testname: Kubectl, describe pod or rc Description: Deploy an agnhost controller and an agnhost service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return proper information. */ - framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() { + framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func(ctx context.Context) { controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) serviceJSON := readTestFileOrDie(agnhostServiceFilename) @@ -1370,7 +1370,7 @@ metadata: // Quota and limitrange are skipped for now. }) - ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func() { + ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func(ctx context.Context) { ginkgo.By("creating a cronjob") cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml.in"))) e2ekubectl.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-") @@ -1412,7 +1412,7 @@ metadata: Testname: Kubectl, create service, replication controller Description: Create a Pod running agnhost listening to port 6379. Using kubectl expose the agnhost primary replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that agnhost primary is listening. Using kubectl expose the agnhost primary as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that agnhost primary is listening. */ - framework.ConformanceIt("should create services for rc ", func() { + framework.ConformanceIt("should create services for rc ", func(ctx context.Context) { controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) agnhostPort := 6379 @@ -1506,7 +1506,7 @@ metadata: Testname: Kubectl, label update Description: When a Pod is running, update a Label using 'kubectl label' command. The label MUST be created in the Pod. A 'kubectl get pod' with -l option on the container MUST verify that the label can be read back. Use 'kubectl label label-' to remove the label. 'kubectl get pod' with -l option SHOULD not list the deleted label as the label is removed. */ - framework.ConformanceIt("should update the label on a resource ", func() { + framework.ConformanceIt("should update the label on a resource ", func(ctx context.Context) { labelName := "testing-label" labelValue := "testing-label-value" @@ -1545,7 +1545,7 @@ metadata: Testname: Kubectl, copy Description: When a Pod is running, copy a known file from it to a temporary local destination. */ - ginkgo.It("should copy a file from a running Pod", func() { + ginkgo.It("should copy a file from a running Pod", func(ctx context.Context) { remoteContents := "foobar\n" podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName) tempDestination, err := os.CreateTemp(os.TempDir(), "copy-foobar") @@ -1589,7 +1589,7 @@ metadata: 'kubectl --since=1s' should output logs that are only 1 second older from now 'kubectl --since=24h' should output logs that are only 1 day older from now */ - framework.ConformanceIt("should be able to retrieve and filter logs ", func() { + framework.ConformanceIt("should be able to retrieve and filter logs ", func(ctx context.Context) { // Split("something\n", "\n") returns ["something", ""], so // strip trailing newline first lines := func(out string) []string { @@ -1649,7 +1649,7 @@ metadata: Testname: Kubectl, patch to annotate Description: Start running agnhost and a replication controller. When the pod is running, using 'kubectl patch' command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller. */ - framework.ConformanceIt("should add annotations for pods in rc ", func() { + framework.ConformanceIt("should add annotations for pods in rc ", func(ctx context.Context) { controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename))) ginkgo.By("creating Agnhost RC") e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-") @@ -1682,7 +1682,7 @@ metadata: Testname: Kubectl, version Description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to. */ - framework.ConformanceIt("should check is all data is printed ", func() { + framework.ConformanceIt("should check is all data is printed ", func(ctx context.Context) { versionString := e2ekubectl.RunKubectlOrDie(ns, "version") // we expect following values for: Major -> digit, Minor -> numeric followed by an optional '+', GitCommit -> alphanumeric requiredItems := []string{"Client Version: ", "Server Version: "} @@ -1710,7 +1710,7 @@ metadata: Testname: Kubectl, run pod Description: Command 'kubectl run' MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image. */ - framework.ConformanceIt("should create a pod from an image when restart is Never ", func() { + framework.ConformanceIt("should create a pod from an image when restart is Never ", func(ctx context.Context) { ginkgo.By("running the image " + httpdImage) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage) ginkgo.By("verifying the pod " + podName + " was created") @@ -1744,7 +1744,7 @@ metadata: Testname: Kubectl, replace Description: Command 'kubectl replace' on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to 'kubectl replace' SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image. */ - framework.ConformanceIt("should update a single-container pod's image ", func() { + framework.ConformanceIt("should update a single-container pod's image ", func(ctx context.Context) { ginkgo.By("running the image " + httpdImage) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) @@ -1784,7 +1784,7 @@ metadata: Testname: Kubectl, proxy port zero Description: Start a proxy server on port zero by running 'kubectl proxy' with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string. */ - framework.ConformanceIt("should support proxy with --port 0 ", func() { + framework.ConformanceIt("should support proxy with --port 0 ", func(ctx context.Context) { ginkgo.By("starting the proxy server") port, cmd, err := startProxyServer(ns) if cmd != nil { @@ -1809,7 +1809,7 @@ metadata: Testname: Kubectl, proxy socket Description: Start a proxy server on by running 'kubectl proxy' with --unix-socket=. Call the proxy server by requesting api versions from http://locahost:0/api. The proxy server MUST provide at least one version string */ - framework.ConformanceIt("should support --unix-socket=/path ", func() { + framework.ConformanceIt("should support --unix-socket=/path ", func(ctx context.Context) { ginkgo.By("Starting the proxy") tmpdir, err := os.MkdirTemp("", "kubectl-proxy-unix") if err != nil { @@ -1842,7 +1842,7 @@ metadata: // This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on // it, which will affect anything else running in parallel. ginkgo.Describe("Kubectl taint [Serial]", func() { - ginkgo.It("should update the taint on a node", func() { + ginkgo.It("should update the taint on a node", func(ctx context.Context) { testTaint := v1.Taint{ Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())), Value: "testing-taint-value", @@ -1873,7 +1873,7 @@ metadata: } }) - ginkgo.It("should remove all the taints with the same key off a node", func() { + ginkgo.It("should remove all the taints with the same key off a node", func(ctx context.Context) { testTaint := v1.Taint{ Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())), Value: "testing-taint-value", @@ -1942,7 +1942,7 @@ metadata: }) ginkgo.Describe("Kubectl events", func() { - ginkgo.It("should show event when pod is created", func() { + ginkgo.It("should show event when pod is created", func(ctx context.Context) { podName := "e2e-test-httpd-pod" ginkgo.By("running the image " + httpdImage) e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) @@ -1972,7 +1972,7 @@ metadata: }) ginkgo.Describe("Kubectl create quota", func() { - ginkgo.It("should create a quota without scopes", func() { + ginkgo.It("should create a quota without scopes", func(ctx context.Context) { quotaName := "million" ginkgo.By("calling kubectl quota") @@ -2000,7 +2000,7 @@ metadata: } }) - ginkgo.It("should create a quota with scopes", func() { + ginkgo.It("should create a quota with scopes", func(ctx context.Context) { quotaName := "scopes" ginkgo.By("calling kubectl quota") @@ -2027,7 +2027,7 @@ metadata: } }) - ginkgo.It("should reject quota with invalid scopes", func() { + ginkgo.It("should reject quota with invalid scopes", func(ctx context.Context) { quotaName := "scopes" ginkgo.By("calling kubectl quota") @@ -2039,7 +2039,7 @@ metadata: }) ginkgo.Describe("kubectl wait", func() { - ginkgo.It("should ignore not found error with --for=delete", func() { + ginkgo.It("should ignore not found error with --for=delete", func(ctx context.Context) { ginkgo.By("calling kubectl wait --for=delete") e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod/doesnotexist") e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod", "--selector=app.kubernetes.io/name=noexist") diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index 11a9a5999c6..e6510df361b 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -453,21 +453,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() { ginkgo.Describe("With a server listening on 0.0.0.0", func() { ginkgo.Describe("that expects a client request", func() { - ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func(ctx context.Context) { doTestMustConnectSendNothing("0.0.0.0", f) }) - ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) { doTestMustConnectSendDisconnect("0.0.0.0", f) }) }) ginkgo.Describe("that expects NO client request", func() { - ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) { doTestConnectSendDisconnect("0.0.0.0", f) }) }) - ginkgo.It("should support forwarding over websockets", func() { + ginkgo.It("should support forwarding over websockets", func(ctx context.Context) { doTestOverWebSockets("0.0.0.0", f) }) }) @@ -475,21 +475,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() { // kubectl port-forward may need elevated privileges to do its job. ginkgo.Describe("With a server listening on localhost", func() { ginkgo.Describe("that expects a client request", func() { - ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func(ctx context.Context) { doTestMustConnectSendNothing("localhost", f) }) - ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) { doTestMustConnectSendDisconnect("localhost", f) }) }) ginkgo.Describe("that expects NO client request", func() { - ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) { doTestConnectSendDisconnect("localhost", f) }) }) - ginkgo.It("should support forwarding over websockets", func() { + ginkgo.It("should support forwarding over websockets", func(ctx context.Context) { doTestOverWebSockets("localhost", f) }) }) diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go index bf61e83b6f3..20ba887d3ca 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go @@ -55,7 +55,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { c = f.ClientSet }) - ginkgo.It("should sign the new added bootstrap tokens", func() { + ginkgo.It("should sign the new added bootstrap tokens", func(ctx context.Context) { ginkgo.By("create a new bootstrap token secret") tokenID, err := GenerateTokenID() framework.ExpectNoError(err) @@ -70,7 +70,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial][Disruptive]", func() { + ginkgo.It("should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial][Disruptive]", func(ctx context.Context) { ginkgo.By("create a new bootstrap token secret") tokenID, err := GenerateTokenID() framework.ExpectNoError(err) @@ -111,7 +111,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted", func() { + ginkgo.It("should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted", func(ctx context.Context) { ginkgo.By("create a new bootstrap token secret") tokenID, err := GenerateTokenID() framework.ExpectNoError(err) diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go index c786ab92e4d..01aeff34eea 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go @@ -50,7 +50,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) } }) - ginkgo.It("should delete the token secret when the secret expired", func() { + ginkgo.It("should delete the token secret when the secret expired", func(ctx context.Context) { ginkgo.By("create a new expired bootstrap token secret") tokenID, err := GenerateTokenID() framework.ExpectNoError(err) @@ -68,7 +68,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should not delete the token secret when the secret is not expired", func() { + ginkgo.It("should not delete the token secret when the secret is not expired", func(ctx context.Context) { ginkgo.By("create a new expired bootstrap token secret") tokenID, err := GenerateTokenID() framework.ExpectNoError(err) diff --git a/test/e2e/network/conntrack.go b/test/e2e/network/conntrack.go index 69e1fc4d787..3d22fb714a6 100644 --- a/test/e2e/network/conntrack.go +++ b/test/e2e/network/conntrack.go @@ -129,7 +129,7 @@ var _ = common.SIGDescribe("Conntrack", func() { } }) - ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a NodePort service", func() { + ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a NodePort service", func(ctx context.Context) { // Create a NodePort service udpJig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -205,7 +205,7 @@ var _ = common.SIGDescribe("Conntrack", func() { } }) - ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a ClusterIP service", func() { + ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a ClusterIP service", func(ctx context.Context) { // Create a ClusterIP service udpJig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -292,7 +292,7 @@ var _ = common.SIGDescribe("Conntrack", func() { // endpoint is ready. If some traffic arrives to since kube-proxy clear the entries (see the endpoint slice) and // installs the corresponding iptables rules (the endpoint is ready), a conntrack entry will be generated blackholing // subsequent traffic. - ginkgo.It("should be able to preserve UDP traffic when initial unready endpoints get ready", func() { + ginkgo.It("should be able to preserve UDP traffic when initial unready endpoints get ready", func(ctx context.Context) { // Create a ClusterIP service udpJig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -360,7 +360,7 @@ var _ = common.SIGDescribe("Conntrack", func() { // IP could result in the connection being closed with the error "Connection reset by // peer" // xref: https://kubernetes.io/blog/2019/03/29/kube-proxy-subtleties-debugging-an-intermittent-connection-reset/ - ginkgo.It("should drop INVALID conntrack entries [Privileged]", func() { + ginkgo.It("should drop INVALID conntrack entries [Privileged]", func(ctx context.Context) { serverLabel := map[string]string{ "app": "boom-server", } diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index db314510449..b9d5b0262d1 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -47,7 +47,7 @@ var _ = common.SIGDescribe("DNS", func() { Testname: DNS, cluster Description: When a Pod is created, the pod MUST be able to resolve cluster dns entries such as kubernetes.default via DNS. */ - framework.ConformanceIt("should provide DNS for the cluster ", func() { + framework.ConformanceIt("should provide DNS for the cluster ", func(ctx context.Context) { // All the names we need to be able to resolve. // TODO: Spin up a separate test service and test that dns works for that service. // NOTE: This only contains the FQDN and the Host name, for testing partial name, see the test below @@ -67,7 +67,7 @@ var _ = common.SIGDescribe("DNS", func() { }) // Added due to #8512. This is critical for GCE and GKE deployments. - ginkgo.It("should provide DNS for the cluster [Provider:GCE]", func() { + ginkgo.It("should provide DNS for the cluster [Provider:GCE]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") namesToResolve := []string{"google.com"} @@ -89,7 +89,7 @@ var _ = common.SIGDescribe("DNS", func() { }) // [LinuxOnly]: As Windows currently does not support resolving PQDNs. - ginkgo.It("should resolve DNS of partial qualified names for the cluster [LinuxOnly]", func() { + ginkgo.It("should resolve DNS of partial qualified names for the cluster [LinuxOnly]", func(ctx context.Context) { // All the names we need to be able to resolve. namesToResolve := []string{ "kubernetes.default", @@ -114,7 +114,7 @@ var _ = common.SIGDescribe("DNS", func() { Testname: DNS, cluster Description: When a Pod is created, the pod MUST be able to resolve cluster dns entries such as kubernetes.default via /etc/hosts. */ - framework.ConformanceIt("should provide /etc/hosts entries for the cluster", func() { + framework.ConformanceIt("should provide /etc/hosts entries for the cluster", func(ctx context.Context) { hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) hostEntries := []string{hostFQDN, dnsTestPodHostName} // TODO: Validate both IPv4 and IPv6 families for dual-stack @@ -134,7 +134,7 @@ var _ = common.SIGDescribe("DNS", func() { Testname: DNS, services Description: When a headless service is created, the service MUST be able to resolve all the required service endpoints. When the service is created, any pod in the same namespace must be able to resolve the service by all of the expected DNS names. */ - framework.ConformanceIt("should provide DNS for services ", func() { + framework.ConformanceIt("should provide DNS for services ", func(ctx context.Context) { // NOTE: This only contains the FQDN and the Host name, for testing partial name, see the test below // Create a test headless service. ginkgo.By("Creating a test headless service") @@ -190,7 +190,7 @@ var _ = common.SIGDescribe("DNS", func() { Description: Create a headless service and normal service. Both the services MUST be able to resolve partial qualified DNS entries of their service endpoints by serving A records and SRV records. [LinuxOnly]: As Windows currently does not support resolving PQDNs. */ - framework.ConformanceIt("should resolve DNS of partial qualified names for services [LinuxOnly]", func() { + framework.ConformanceIt("should resolve DNS of partial qualified names for services [LinuxOnly]", func(ctx context.Context) { // Create a test headless service. ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ @@ -245,7 +245,7 @@ var _ = common.SIGDescribe("DNS", func() { Description: Create a headless service with label. Create a Pod with label to match service's label, with hostname and a subdomain same as service name. Pod MUST be able to resolve its fully qualified domain name as well as hostname by serving an A record at that name. */ - framework.ConformanceIt("should provide DNS for pods for Hostname", func() { + framework.ConformanceIt("should provide DNS for pods for Hostname", func(ctx context.Context) { // Create a test headless service. ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ @@ -287,7 +287,7 @@ var _ = common.SIGDescribe("DNS", func() { Description: Create a headless service with label. Create a Pod with label to match service's label, with hostname and a subdomain same as service name. Pod MUST be able to resolve its fully qualified domain name as well as subdomain by serving an A record at that name. */ - framework.ConformanceIt("should provide DNS for pods for Subdomain", func() { + framework.ConformanceIt("should provide DNS for pods for Subdomain", func(ctx context.Context) { // Create a test headless service. ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ @@ -330,7 +330,7 @@ var _ = common.SIGDescribe("DNS", func() { Description: Create a service with externalName. Pod MUST be able to resolve the address for this service via CNAME. When externalName of this service is changed, Pod MUST resolve to new DNS entry for the service. Change the service type from externalName to ClusterIP, Pod MUST resolve DNS to the service by serving A records. */ - framework.ConformanceIt("should provide DNS for ExternalName services", func() { + framework.ConformanceIt("should provide DNS for ExternalName services", func(ctx context.Context) { // Create a test ExternalName service. ginkgo.By("Creating a test externalName service") serviceName := "dns-test-service-3" @@ -408,7 +408,7 @@ var _ = common.SIGDescribe("DNS", func() { Description: Create a Pod with DNSPolicy as None and custom DNS configuration, specifying nameservers and search path entries. Pod creation MUST be successful and provided DNS configuration MUST be configured in the Pod. */ - framework.ConformanceIt("should support configurable pod DNS nameservers", func() { + framework.ConformanceIt("should support configurable pod DNS nameservers", func(ctx context.Context) { ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...") testServerIP := "1.1.1.1" testSearchPath := "resolv.conf.local" @@ -457,7 +457,7 @@ var _ = common.SIGDescribe("DNS", func() { } }) - ginkgo.It("should support configurable pod resolv.conf", func() { + ginkgo.It("should support configurable pod resolv.conf", func(ctx context.Context) { ginkgo.By("Preparing a test DNS service with injected DNS names...") testInjectedIP := "1.1.1.1" testDNSNameShort := "notexistname" @@ -569,7 +569,7 @@ var _ = common.SIGDescribe("DNS", func() { // TODO: Add more test cases for other DNSPolicies. }) - ginkgo.It("should work with the pod containing more than 6 DNS search paths and longer than 256 search list characters", func() { + ginkgo.It("should work with the pod containing more than 6 DNS search paths and longer than 256 search list characters", func(ctx context.Context) { ginkgo.By("Getting the kube-dns IP") svc, err := f.ClientSet.CoreV1().Services("kube-system").Get(context.TODO(), "kube-dns", metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get kube-dns service") @@ -619,7 +619,7 @@ var _ = common.SIGDescribe("DNS HostNetwork", func() { f := framework.NewDefaultFramework("hostnetworkdns") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy: ClusterFirstWithHostNet [LinuxOnly]", func() { + ginkgo.It("should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy: ClusterFirstWithHostNet [LinuxOnly]", func(ctx context.Context) { // Create a test headless service. ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index d1a45c3f3aa..ea2802bbda3 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -320,7 +320,7 @@ var _ = common.SIGDescribe("DNS configMap nameserver", func() { ginkgo.Context("Change stubDomain", func() { nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()} - ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func() { + ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func(ctx context.Context) { nsTest.c = nsTest.f.ClientSet nsTest.run(framework.TestContext.ClusterIsIPv6()) }) @@ -329,7 +329,7 @@ var _ = common.SIGDescribe("DNS configMap nameserver", func() { ginkgo.Context("Forward PTR lookup", func() { fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()} - ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func(ctx context.Context) { fwdTest.c = fwdTest.f.ClientSet fwdTest.run(framework.TestContext.ClusterIsIPv6()) }) @@ -338,7 +338,7 @@ var _ = common.SIGDescribe("DNS configMap nameserver", func() { ginkgo.Context("Forward external name lookup", func() { externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()} - ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func(ctx context.Context) { externalNameTest.c = externalNameTest.f.ClientSet externalNameTest.run(framework.TestContext.ClusterIsIPv6()) }) diff --git a/test/e2e/network/dns_scale_records.go b/test/e2e/network/dns_scale_records.go index 6b1d403054c..7c53ba4a04f 100644 --- a/test/e2e/network/dns_scale_records.go +++ b/test/e2e/network/dns_scale_records.go @@ -55,7 +55,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { }) // answers dns for service - creates the maximum number of services, and then check dns record for one - ginkgo.It("Should answer DNS query for maximum number of services per cluster", func() { + ginkgo.It("Should answer DNS query for maximum number of services per cluster", func(ctx context.Context) { // get integer ceiling of maxServicesPerCluster / maxServicesPerNamespace numNs := (maxServicesPerCluster + maxServicesPerNamespace - 1) / maxServicesPerNamespace diff --git a/test/e2e/network/dual_stack.go b/test/e2e/network/dual_stack.go index 5858487e14e..b1ae6c0739c 100644 --- a/test/e2e/network/dual_stack.go +++ b/test/e2e/network/dual_stack.go @@ -56,7 +56,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { podClient = e2epod.NewPodClient(f) }) - ginkgo.It("should have ipv4 and ipv6 internal node ip", func() { + ginkgo.It("should have ipv4 and ipv6 internal node ip", func(ctx context.Context) { // TODO (aramase) can switch to new function to get all nodes nodeList, err := e2enode.GetReadySchedulableNodes(cs) framework.ExpectNoError(err) @@ -73,7 +73,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should create pod, add ipv6 and ipv4 ip to pod ips", func() { + ginkgo.It("should create pod, add ipv6 and ipv4 ip to pod ips", func(ctx context.Context) { podName := "pod-dualstack-ips" pod := &v1.Pod{ @@ -112,7 +112,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { }) // takes close to 140s to complete, so doesn't need to be marked [SLOW] - ginkgo.It("should be able to reach pod on ipv4 and ipv6 ip", func() { + ginkgo.It("should be able to reach pod on ipv4 and ipv6 ip", func(ctx context.Context) { serverDeploymentName := "dualstack-server" clientDeploymentName := "dualstack-client" @@ -201,7 +201,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { assertNetworkConnectivity(f, *serverPods, *clientPods, "dualstack-test-client", "80") }) - ginkgo.It("should create a single stack service with cluster ip from primary service range", func() { + ginkgo.It("should create a single stack service with cluster ip from primary service range", func(ctx context.Context) { serviceName := "defaultclusterip" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -248,7 +248,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should create service with ipv4 cluster ip", func() { + ginkgo.It("should create service with ipv4 cluster ip", func(ctx context.Context) { serviceName := "ipv4clusterip" ns := f.Namespace.Name @@ -293,7 +293,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should create service with ipv6 cluster ip", func() { + ginkgo.It("should create service with ipv6 cluster ip", func(ctx context.Context) { serviceName := "ipv6clusterip" ns := f.Namespace.Name ipv6 := v1.IPv6Protocol @@ -338,7 +338,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should create service with ipv4,v6 cluster ip", func() { + ginkgo.It("should create service with ipv4,v6 cluster ip", func(ctx context.Context) { serviceName := "ipv4ipv6clusterip" ns := f.Namespace.Name @@ -383,7 +383,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should create service with ipv6,v4 cluster ip", func() { + ginkgo.It("should create service with ipv6,v4 cluster ip", func(ctx context.Context) { serviceName := "ipv6ipv4clusterip" ns := f.Namespace.Name @@ -434,7 +434,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // but using the secondary IP, so we run the same tests for each ClusterIP family ginkgo.Describe("Granular Checks: Services Secondary IP Family [LinuxOnly]", func() { - ginkgo.It("should function for pod-Service: http", func() { + ginkgo.It("should function for pod-Service: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) err := config.DialFromTestContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -448,7 +448,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should function for pod-Service: udp", func() { + ginkgo.It("should function for pod-Service: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) err := config.DialFromTestContainer("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -463,7 +463,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { }) // [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes - ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func() { + ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort)) err := config.DialFromTestContainer("sctp", config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -478,7 +478,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should function for node-Service: http", func() { + ginkgo.It("should function for node-Service: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) err := config.DialFromNode("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -493,7 +493,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should function for node-Service: udp", func() { + ginkgo.It("should function for node-Service: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.SecondaryNodeIP, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) err := config.DialFromNode("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -508,7 +508,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should function for endpoint-Service: http", func() { + ginkgo.It("should function for endpoint-Service: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) err := config.DialFromEndpointContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -522,7 +522,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should function for endpoint-Service: udp", func() { + ginkgo.It("should function for endpoint-Service: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) err := config.DialFromEndpointContainer("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -536,7 +536,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should update endpoints: http", func() { + ginkgo.It("should update endpoints: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) err := config.DialFromTestContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -552,7 +552,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should update endpoints: udp", func() { + ginkgo.It("should update endpoints: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) err := config.DialFromTestContainer("udp", config.SecondaryClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -570,7 +570,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func() { + ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterHTTPPort)) @@ -588,7 +588,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func() { + ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIPs[1], e2enetwork.ClusterUDPPort)) @@ -605,14 +605,14 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should be able to handle large requests: http", func() { + ginkgo.It("should be able to handle large requests: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort)) message := strings.Repeat("42", 1000) config.DialEchoFromTestContainer("http", config.SecondaryClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message) }) - ginkgo.It("should be able to handle large requests: udp", func() { + ginkgo.It("should be able to handle large requests: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterUDPPort)) message := "n" + strings.Repeat("o", 1999) @@ -622,7 +622,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { // if the endpoints pods use hostNetwork, several tests can't run in parallel // because the pods will try to acquire the same port in the host. // We run the test in serial, to avoid port conflicts. - ginkgo.It("should function for service endpoints using hostNetwork", func() { + ginkgo.It("should function for service endpoints using hostNetwork", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack, e2enetwork.UseHostNetwork, e2enetwork.EndpointsUseHostNetwork) ginkgo.By("pod-Service(hostNetwork): http") diff --git a/test/e2e/network/endpointslice.go b/test/e2e/network/endpointslice.go index 8bcb4df6b4c..03f350c8094 100644 --- a/test/e2e/network/endpointslice.go +++ b/test/e2e/network/endpointslice.go @@ -63,7 +63,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { The cluster MUST have a service named "kubernetes" on the default namespace referencing the API servers. The "kubernetes.default" service MUST have Endpoints and EndpointSlices pointing to each API server instance. */ - framework.ConformanceIt("should have Endpoints and EndpointSlices pointing to API Server", func() { + framework.ConformanceIt("should have Endpoints and EndpointSlices pointing to API Server", func(ctx context.Context) { namespace := "default" name := "kubernetes" // verify "kubernetes.default" service exist @@ -99,7 +99,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1 discovery document. The endpointslice controller should create and delete EndpointSlices for Pods matching a Service. */ - framework.ConformanceIt("should create and delete Endpoints and EndpointSlices for a Service with a selector specified", func() { + framework.ConformanceIt("should create and delete Endpoints and EndpointSlices for a Service with a selector specified", func(ctx context.Context) { svc := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "example-empty-selector", @@ -202,7 +202,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1 discovery document. The endpointslice controller must create EndpointSlices for Pods mataching a Service. */ - framework.ConformanceIt("should create Endpoints and EndpointSlices for Pods matching a Service", func() { + framework.ConformanceIt("should create Endpoints and EndpointSlices for Pods matching a Service", func(ctx context.Context) { labelPod1 := "pod1" labelPod2 := "pod2" labelPod3 := "pod3" @@ -350,7 +350,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() { The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1 discovery document. The endpointslices resource must support create, get, list, watch, update, patch, delete, and deletecollection. */ - framework.ConformanceIt("should support creating EndpointSlice API operations", func() { + framework.ConformanceIt("should support creating EndpointSlice API operations", func(ctx context.Context) { // Setup ns := f.Namespace.Name epsVersion := "v1" diff --git a/test/e2e/network/endpointslicemirroring.go b/test/e2e/network/endpointslicemirroring.go index d74792e0da8..ddfbc8f345c 100644 --- a/test/e2e/network/endpointslicemirroring.go +++ b/test/e2e/network/endpointslicemirroring.go @@ -52,7 +52,7 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() { The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1 discovery document. The endpointslices mirrorowing must mirror endpoint create, update, and delete actions. */ - framework.ConformanceIt("should mirror a custom Endpoints resource through create update and delete", func() { + framework.ConformanceIt("should mirror a custom Endpoints resource through create update and delete", func(ctx context.Context) { svc := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "example-custom-endpoints", @@ -202,7 +202,7 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() { }) }) - ginkgo.It("should mirror a custom Endpoint with multiple subsets and same IP address", func() { + ginkgo.It("should mirror a custom Endpoint with multiple subsets and same IP address", func(ctx context.Context) { ns := f.Namespace.Name svc := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index e2b4540e64d..14c73017582 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -75,7 +75,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { return string(data) } - ginkgo.It("should create pod that uses dns", func() { + ginkgo.It("should create pod that uses dns", func(ctx context.Context) { // contrary to the example, this test does not use contexts, for simplicity // namespaces are passed directly. // Also, for simplicity, we don't use yamls with namespaces, but we diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index 137d6d99942..d8a00acedaa 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -71,7 +71,7 @@ var _ = common.SIGDescribe("Firewall rule", func() { }) // This test takes around 6 minutes to run - ginkgo.It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() { + ginkgo.It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func(ctx context.Context) { ns := f.Namespace.Name // This source ranges is just used to examine we have exact same things on LB firewall rules firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"} @@ -204,7 +204,7 @@ var _ = common.SIGDescribe("Firewall rule", func() { framework.ExpectNoError(err) }) - ginkgo.It("should have correct firewall rules for e2e cluster", func() { + ginkgo.It("should have correct firewall rules for e2e cluster", func(ctx context.Context) { ginkgo.By("Checking if e2e firewall rules are correct") for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) { fw, err := gceCloud.GetFirewall(expFw.Name) @@ -214,7 +214,7 @@ var _ = common.SIGDescribe("Firewall rule", func() { } }) - ginkgo.It("control plane should not expose well-known ports", func() { + ginkgo.It("control plane should not expose well-known ports", func(ctx context.Context) { nodes, err := e2enode.GetReadySchedulableNodes(cs) framework.ExpectNoError(err) diff --git a/test/e2e/network/funny_ips.go b/test/e2e/network/funny_ips.go index 282c3553786..c60f9b0f896 100644 --- a/test/e2e/network/funny_ips.go +++ b/test/e2e/network/funny_ips.go @@ -90,7 +90,7 @@ var _ = common.SIGDescribe("CVE-2021-29923", func() { IMPORTANT: CoreDNS since version 1.8.5 discard IPs with leading zeros so Services are not resolvable, and is probably that most of the ecosystem has done the same, however, Kubernetes doesn't impose any restriction, users should migrate their IPs. */ - ginkgo.It("IPv4 Service Type ClusterIP with leading zeros should work interpreted as decimal", func() { + ginkgo.It("IPv4 Service Type ClusterIP with leading zeros should work interpreted as decimal", func(ctx context.Context) { serviceName := "funny-ip" // Use a very uncommon port to reduce the risk of conflicts with other tests that create services. servicePort := 7180 diff --git a/test/e2e/network/hostport.go b/test/e2e/network/hostport.go index d6e048856a7..eb5f7272a2d 100644 --- a/test/e2e/network/hostport.go +++ b/test/e2e/network/hostport.go @@ -60,7 +60,7 @@ var _ = common.SIGDescribe("HostPort", func() { Windows. */ - framework.ConformanceIt("validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly]", func() { + framework.ConformanceIt("validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly]", func(ctx context.Context) { localhost := "127.0.0.1" family := v1.IPv4Protocol diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 4523e3c8643..e7f706481e2 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -117,7 +117,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) }) - ginkgo.It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func(ctx context.Context) { conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) for _, t := range conformanceTests { ginkgo.By(t.EntryLog) @@ -162,7 +162,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) }) - ginkgo.It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func(ctx context.Context) { jig.PollInterval = 5 * time.Second conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{ e2eingress.NEGAnnotation: `{"ingress": true}`, @@ -177,7 +177,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { } }) - ginkgo.It("should be able to switch between IG and NEG modes", func() { + ginkgo.It("should be able to switch between IG and NEG modes", func(ctx context.Context) { var err error propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(f.ClientSet) ginkgo.By("Create a basic HTTP ingress using NEG") @@ -223,7 +223,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { jig.WaitForIngress(true) }) - ginkgo.It("should be able to create a ClusterIP service", func() { + ginkgo.It("should be able to create a ClusterIP service", func(ctx context.Context) { ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) @@ -237,7 +237,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { } }) - ginkgo.It("should sync endpoints to NEG", func() { + ginkgo.It("should sync endpoints to NEG", func(ctx context.Context) { name := "hostname" scaleAndValidateNEG := func(num int) { scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(context.TODO(), name, metav1.GetOptions{}) @@ -281,7 +281,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { scaleAndValidateNEG(3) }) - ginkgo.It("rolling update backend pods should not cause service disruption", func() { + ginkgo.It("rolling update backend pods should not cause service disruption", func(ctx context.Context) { name := "hostname" replicas := 8 ginkgo.By("Create a basic HTTP ingress using NEG") @@ -336,7 +336,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) }) - ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() { + ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func(ctx context.Context) { name := "hostname" expectedKeys := []int32{80, 443} @@ -420,7 +420,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { scaleAndValidateExposedNEG(3) }) - ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() { + ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func(ctx context.Context) { ginkgo.By("Create a basic HTTP ingress using standalone NEG") jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) @@ -549,7 +549,7 @@ var _ = common.SIGDescribe("Ingress API", func() { The ingresses/status resource must support update and patch */ - framework.ConformanceIt("should support creating Ingress API operations", func() { + framework.ConformanceIt("should support creating Ingress API operations", func(ctx context.Context) { // Setup ns := f.Namespace.Name ingVersion := "v1" diff --git a/test/e2e/network/ingress_scale.go b/test/e2e/network/ingress_scale.go index 97bffc77a0f..da489f0818a 100644 --- a/test/e2e/network/ingress_scale.go +++ b/test/e2e/network/ingress_scale.go @@ -17,6 +17,8 @@ limitations under the License. package network import ( + "context" + "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" @@ -58,7 +60,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7 Scalability", func() { } }) - ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() { + ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func(ctx context.Context) { if errs := scaleFramework.RunScaleTest(); len(errs) != 0 { framework.Failf("Unexpected error while running ingress scale test: %v", errs) } diff --git a/test/e2e/network/ingressclass.go b/test/e2e/network/ingressclass.go index 1092b6060d6..c158fac9152 100644 --- a/test/e2e/network/ingressclass.go +++ b/test/e2e/network/ingressclass.go @@ -45,7 +45,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { cs = f.ClientSet }) - ginkgo.It("should set default value on new IngressClass [Serial]", func() { + ginkgo.It("should set default value on new IngressClass [Serial]", func(ctx context.Context) { ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName) framework.ExpectNoError(err) defer deleteIngressClass(cs, ingressClass1.Name) @@ -82,7 +82,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { } }) - ginkgo.It("should not set default value if no default IngressClass [Serial]", func() { + ginkgo.It("should not set default value if no default IngressClass [Serial]", func(ctx context.Context) { ingressClass1, err := createIngressClass(cs, "ingressclass1", false, f.UniqueName) framework.ExpectNoError(err) defer deleteIngressClass(cs, ingressClass1.Name) @@ -116,7 +116,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { } }) - ginkgo.It("should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default[Serial]", func() { + ginkgo.It("should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default[Serial]", func(ctx context.Context) { ingressClass1, err := createIngressClass(cs, "ingressclass1", true, f.UniqueName) framework.ExpectNoError(err) defer deleteIngressClass(cs, ingressClass1.Name) @@ -164,7 +164,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { } }) - ginkgo.It("should allow IngressClass to have Namespace-scoped parameters [Serial]", func() { + ginkgo.It("should allow IngressClass to have Namespace-scoped parameters [Serial]", func(ctx context.Context) { ingressClass := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{ Name: "ingressclass1", @@ -263,7 +263,7 @@ var _ = common.SIGDescribe("IngressClass API", func() { - The ingressclasses resource MUST exist in the /apis/networking.k8s.io/v1 discovery document. - The ingressclass resource must support create, get, list, watch, update, patch, delete, and deletecollection. */ - framework.ConformanceIt(" should support creating IngressClass API operations", func() { + framework.ConformanceIt(" should support creating IngressClass API operations", func(ctx context.Context) { // Setup icClient := f.ClientSet.NetworkingV1().IngressClasses() diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index 0e301e46a14..effedd3f76d 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "math" "net" @@ -52,7 +53,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { fr := framework.NewDefaultFramework("kube-proxy") fr.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should set TCP CLOSE_WAIT timeout [Privileged]", func() { + ginkgo.It("should set TCP CLOSE_WAIT timeout [Privileged]", func(ctx context.Context) { nodes, err := e2enode.GetBoundedReadySchedulableNodes(fr.ClientSet, 2) framework.ExpectNoError(err) if len(nodes.Items) < 2 { diff --git a/test/e2e/network/loadbalancer.go b/test/e2e/network/loadbalancer.go index 9f8f794fcc5..21a5d5d5c36 100644 --- a/test/e2e/network/loadbalancer.go +++ b/test/e2e/network/loadbalancer.go @@ -74,7 +74,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } }) - ginkgo.It("should be able to change the type and ports of a TCP service [Slow]", func() { + ginkgo.It("should be able to change the type and ports of a TCP service [Slow]", func(ctx context.Context) { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") @@ -284,7 +284,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) }) - ginkgo.It("should be able to change the type and ports of a UDP service [Slow]", func() { + ginkgo.It("should be able to change the type and ports of a UDP service [Slow]", func(ctx context.Context) { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke") @@ -484,7 +484,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) }) - ginkgo.It("should only allow access from service loadbalancer source ranges [Slow]", func() { + ginkgo.It("should only allow access from service loadbalancer source ranges [Slow]", func(ctx context.Context) { // this feature currently supported only on GCE/GKE/AWS e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws", "azure") @@ -568,7 +568,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP) }) - ginkgo.It("should be able to create an internal type load balancer [Slow]", func() { + ginkgo.It("should be able to create an internal type load balancer [Slow]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("azure", "gke", "gce") createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) @@ -693,7 +693,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // This test creates a load balancer, make sure its health check interval // equals to gceHcCheckIntervalSeconds. Then the interval is manipulated // to be something else, see if the interval will be reconciled. - ginkgo.It("should reconcile LB health check interval [Slow][Serial][Disruptive]", func() { + ginkgo.It("should reconcile LB health check interval [Slow][Serial][Disruptive]", func(ctx context.Context) { const gceHcCheckIntervalSeconds = int64(8) // This test is for clusters on GCE. // (It restarts kube-controller-manager, which we don't support on GKE) @@ -766,7 +766,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [LinuxOnly]", func() { + ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [LinuxOnly]", func(ctx context.Context) { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. e2eskipper.SkipIfProviderIs("aws") @@ -777,7 +777,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [LinuxOnly]", func() { + ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [LinuxOnly]", func(ctx context.Context) { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. e2eskipper.SkipIfProviderIs("aws") @@ -788,7 +788,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [LinuxOnly]", func() { + ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [LinuxOnly]", func(ctx context.Context) { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. e2eskipper.SkipIfProviderIs("aws") @@ -799,7 +799,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [LinuxOnly]", func() { + ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [LinuxOnly]", func(ctx context.Context) { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. e2eskipper.SkipIfProviderIs("aws") @@ -815,7 +815,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // 2. Update service to type=ClusterIP. Finalizer should be removed. // 3. Update service to type=LoadBalancer. Finalizer should be added. // 4. Delete service with type=LoadBalancer. Finalizer should be removed. - ginkgo.It("should handle load balancer cleanup finalizer for service [Slow]", func() { + ginkgo.It("should handle load balancer cleanup finalizer for service [Slow]", func(ctx context.Context) { jig := e2eservice.NewTestJig(cs, f.Namespace.Name, "lb-finalizer") ginkgo.By("Create load balancer service") @@ -847,7 +847,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true) }) - ginkgo.It("should be able to create LoadBalancer Service without NodePort and change it [Slow]", func() { + ginkgo.It("should be able to create LoadBalancer Service without NodePort and change it [Slow]", func(ctx context.Context) { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") @@ -973,7 +973,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) }) - ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes", func() { + ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes", func(ctx context.Context) { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws", "azure") ns := f.Namespace.Name @@ -1105,7 +1105,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } }) - ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes", func() { + ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes", func(ctx context.Context) { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws", "azure") ns := f.Namespace.Name @@ -1263,7 +1263,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { } }) - ginkgo.It("should work for type=LoadBalancer", func() { + ginkgo.It("should work for type=LoadBalancer", func(ctx context.Context) { namespace := f.Namespace.Name serviceName := "external-local-lb" jig := e2eservice.NewTestJig(cs, namespace, serviceName) @@ -1311,7 +1311,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { } }) - ginkgo.It("should work for type=NodePort", func() { + ginkgo.It("should work for type=NodePort", func(ctx context.Context) { namespace := f.Namespace.Name serviceName := "external-local-nodeport" jig := e2eservice.NewTestJig(cs, namespace, serviceName) @@ -1343,7 +1343,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { } }) - ginkgo.It("should only target nodes with endpoints", func() { + ginkgo.It("should only target nodes with endpoints", func(ctx context.Context) { namespace := f.Namespace.Name serviceName := "external-local-nodes" jig := e2eservice.NewTestJig(cs, namespace, serviceName) @@ -1419,7 +1419,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { } }) - ginkgo.It("should work from pods", func() { + ginkgo.It("should work from pods", func(ctx context.Context) { var err error namespace := f.Namespace.Name serviceName := "external-local-pods" @@ -1477,7 +1477,7 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { } }) - ginkgo.It("should handle updates to ExternalTrafficPolicy field", func() { + ginkgo.It("should handle updates to ExternalTrafficPolicy field", func(ctx context.Context) { namespace := f.Namespace.Name serviceName := "external-local-update" jig := e2eservice.NewTestJig(cs, namespace, serviceName) diff --git a/test/e2e/network/netpol/network_legacy.go b/test/e2e/network/netpol/network_legacy.go index 253d988e340..dd6ee5b5ffa 100644 --- a/test/e2e/network/netpol/network_legacy.go +++ b/test/e2e/network/netpol/network_legacy.go @@ -97,7 +97,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { cleanupServerPodAndService(f, podServer, service) }) - ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "deny-ingress", @@ -117,7 +117,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCannotConnect(f, f.Namespace, "client-cannot-connect", service, 80) }) - ginkgo.It("should support a 'default-deny-all' policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support a 'default-deny-all' policy [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ @@ -162,7 +162,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ @@ -221,7 +221,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ @@ -267,7 +267,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCanConnect(f, nsB, "client-b", service, 80) }) - ginkgo.It("should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy for the server which allows traffic from the pod 'client-a'.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -305,7 +305,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ @@ -353,7 +353,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCanConnect(f, nsB, "client-a", service, 80) }) - ginkgo.It("should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ @@ -400,7 +400,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCannotConnect(f, nsA, "client-c", service, 80) }) - ginkgo.It("should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ @@ -446,7 +446,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCanConnect(f, nsB, "client-b", service, 80) }) - ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ @@ -522,7 +522,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -552,7 +552,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCanConnect(f, f.Namespace, "client-b", service, 81) }) - ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -606,7 +606,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCanConnect(f, f.Namespace, "client-b", service, 81) }) - ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy which allows all traffic.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -629,7 +629,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCanConnect(f, f.Namespace, "client-b", service, 81) }) - ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-client-a-via-named-port-ingress-rule", @@ -662,7 +662,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ "ns-name": nsBName, @@ -705,7 +705,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCanConnect(f, nsB, "client-b", service, allowedPort) }) - ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { clientPodName := "client-a" policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -741,7 +741,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce updated policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce updated policy [Feature:NetworkPolicy]", func(ctx context.Context) { const ( clientAAllowedPort = 80 clientANotAllowedPort = 81 @@ -831,7 +831,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCanConnect(f, f.Namespace, "client-b", service, clientBAllowedPort) }) - ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" newNsBName := nsBName + "-updated" @@ -881,7 +881,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCanConnect(f, nsB, "client-b", service, allowedPort) }) - ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { const allowedPort = 80 ginkgo.By("Creating a network policy for the server which allows traffic from client-a-updated.") policy := &networkingv1.NetworkPolicy{ @@ -929,7 +929,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { checkConnectivity(f, f.Namespace, podClient, service) }) - ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func() { + ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { const allowedPort = 80 ginkgo.By("Creating a network policy for the server which denies all traffic.") policy := &networkingv1.NetworkPolicy{ @@ -964,7 +964,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { testCannotConnect(f, f.Namespace, "client-a", service, allowedPort) }) - ginkgo.It("should work with Ingress,Egress specified together [Feature:NetworkPolicy]", func() { + ginkgo.It("should work with Ingress,Egress specified together [Feature:NetworkPolicy]", func(ctx context.Context) { const allowedPort = 80 const notAllowedPort = 81 @@ -1041,7 +1041,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { var nsBserviceA, nsBserviceB *v1.Service var nsBpodServerA, nsBpodServerB *v1.Pod @@ -1121,7 +1121,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy for the server which allows traffic only from client-b.") policyAllowOnlyFromClientB := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -1188,7 +1188,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) { podServerB, serviceB := createServerPodAndService(f, f.Namespace, "server-b", []protocolPort{{80, v1.ProtocolTCP}}) defer cleanupServerPodAndService(f, podServerB, serviceB) @@ -1269,7 +1269,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should stop enforcing policies after they are deleted [Feature:NetworkPolicy]", func() { + ginkgo.It("should stop enforcing policies after they are deleted [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy for the server which denies all traffic.") policyDenyAll := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -1340,7 +1340,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should allow egress access to server in CIDR block [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow egress access to server in CIDR block [Feature:NetworkPolicy]", func(ctx context.Context) { var serviceB *v1.Service var podServerB *v1.Pod @@ -1411,7 +1411,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]", func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podServer.Name, metav1.GetOptions{}) if err != nil { @@ -1474,7 +1474,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]", func() { + ginkgo.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]", func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podServer.Name, metav1.GetOptions{}) if err != nil { @@ -1586,7 +1586,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) }) - ginkgo.It("should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]", func(ctx context.Context) { var serviceA, serviceB *v1.Service var podA, podB *v1.Pod var err error @@ -1686,7 +1686,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() { }) cleanupServerPodAndService(f, podA, serviceA) }) - ginkgo.It("should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy]", func() { + ginkgo.It("should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("getting the state of the sctp module on nodes") nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) framework.ExpectNoError(err) @@ -1764,7 +1764,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][ cleanupServerPodAndService(f, podServer, service) }) - ginkgo.It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func(ctx context.Context) { policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "deny-all", @@ -1784,7 +1784,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][ testCannotConnectProtocol(f, f.Namespace, "client-cannot-connect", service, 80, v1.ProtocolSCTP) }) - ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -1815,7 +1815,7 @@ var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][ testCanConnectProtocol(f, f.Namespace, "client-b", service, 81, v1.ProtocolSCTP) }) - ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { nsA := f.Namespace nsBName := f.BaseName + "-b" nsB, err := f.CreateNamespace(nsBName, map[string]string{ @@ -2199,7 +2199,7 @@ var _ = common.SIGDescribe("NetworkPolicy API", func() { - The NetworkPolicies resource must support create, get, list, watch, update, patch, delete, and deletecollection. */ - ginkgo.It("should support creating NetworkPolicy API operations", func() { + ginkgo.It("should support creating NetworkPolicy API operations", func(ctx context.Context) { // Setup ns := f.Namespace.Name npVersion := "v1" diff --git a/test/e2e/network/netpol/network_policy.go b/test/e2e/network/netpol/network_policy.go index 82c0523ae91..d658ba4cf10 100644 --- a/test/e2e/network/netpol/network_policy.go +++ b/test/e2e/network/netpol/network_policy.go @@ -117,7 +117,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.Context("NetworkPolicy between server and client", func() { var k8s *kubeManager - ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -131,7 +131,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should support a 'default-deny-all' policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support a 'default-deny-all' policy [Feature:NetworkPolicy]", func(ctx context.Context) { policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules(), SetSpecEgressRules()) protocols := []v1.Protocol{protocolTCP} ports := []int32{80} @@ -146,7 +146,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]", func(ctx context.Context) { allowedPods := metav1.LabelSelector{ MatchLabels: map[string]string{ "pod": "b", @@ -169,7 +169,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy] ", func() { + ginkgo.It("should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy] ", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -194,7 +194,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -208,7 +208,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -226,7 +226,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy]", func(ctx context.Context) { allowedPods := metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ Key: "pod", @@ -251,7 +251,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -276,7 +276,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -305,7 +305,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -335,7 +335,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -368,7 +368,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on any PodSelectors [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on any PodSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -390,7 +390,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -417,7 +417,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolTCP} ports := []int32{81} @@ -442,7 +442,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} @@ -484,7 +484,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityALLOW}) }) - ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy which allows all traffic.") policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all", map[string]string{}, SetSpecIngressRules(networkingv1.NetworkPolicyIngressRule{})) protocols := []v1.Protocol{protocolTCP} @@ -499,7 +499,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { IngressRules := networkingv1.NetworkPolicyIngressRule{} IngressRules.Ports = append(IngressRules.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-81-tcp"}}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all", map[string]string{}, SetSpecIngressRules(IngressRules)) @@ -520,7 +520,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort80}) }) - ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} k8s = initializeResources(f, protocols, ports) @@ -550,7 +550,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityFAIL}) }) - ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("validating egress from port 81 to port 80") egressRule := networkingv1.NetworkPolicyEgressRule{} egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80-tcp"}}) @@ -571,7 +571,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort81}) }) - ginkgo.It("should enforce updated policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce updated policy [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Using the simplest possible mutation: start with allow all, then switch to deny all") // part 1) allow all policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all-mutate-to-deny-all", map[string]string{}, SetSpecIngressRules(networkingv1.NetworkPolicyIngressRule{})) @@ -593,7 +593,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityDeny}) }) - ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -624,7 +624,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityWithLabel}) }) - ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -653,7 +653,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityWithLabel}) }) - ginkgo.It("should deny ingress from pods on other namespaces [Feature:NetworkPolicy]", func() { + ginkgo.It("should deny ingress from pods on other namespaces [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -670,7 +670,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func() { + ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -692,7 +692,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityIsolated}) }) - ginkgo.It("should deny egress from pods based on PodSelector [Feature:NetworkPolicy] ", func() { + ginkgo.It("should deny egress from pods based on PodSelector [Feature:NetworkPolicy] ", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -706,7 +706,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should deny egress from all pods in a namespace [Feature:NetworkPolicy] ", func() { + ginkgo.It("should deny egress from all pods in a namespace [Feature:NetworkPolicy] ", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -720,7 +720,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should work with Ingress, Egress specified together [Feature:NetworkPolicy]", func() { + ginkgo.It("should work with Ingress, Egress specified together [Feature:NetworkPolicy]", func(ctx context.Context) { allowedPodLabels := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "b"}} ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{PodSelector: allowedPodLabels}) @@ -758,7 +758,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort81}) }) - ginkgo.It("should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy]", func() { + ginkgo.It("should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy]", func(ctx context.Context) { // x/a --> y/a and y/b // Egress allowed to y/a only. Egress to y/b should be blocked // Ingress on y/a and y/b allow traffic from x/a @@ -854,7 +854,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -880,7 +880,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Feature:UDP]", func() { + ginkgo.It("should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Feature:UDP]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP, protocolUDP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -898,7 +898,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolUDP, Reachability: reachabilityUDP}) }) - ginkgo.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{81} k8s = initializeResources(f, protocols, ports) @@ -923,7 +923,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityAll}) }) - ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) { egressRule := networkingv1.NetworkPolicyEgressRule{} egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80}}) policyAllowPort80 := GenNetworkPolicyWithNameAndPodMatchLabel("allow-egress-port-80", map[string]string{}, SetSpecEgressRules(egressRule)) @@ -948,7 +948,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityAll}) }) - ginkgo.It("should stop enforcing policies after they are deleted [Feature:NetworkPolicy]", func() { + ginkgo.It("should stop enforcing policies after they are deleted [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy for the server which denies all traffic.") // Deny all traffic into and out of "x". @@ -976,7 +976,7 @@ var _ = common.SIGDescribe("Netpol", func() { // TODO, figure out how the next 3 tests should work with dual stack : do we need a different abstraction then just "podIP"? - ginkgo.It("should allow egress access to server in CIDR block [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow egress access to server in CIDR block [Feature:NetworkPolicy]", func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR protocols := []v1.Protocol{protocolTCP} ports := []int32{80} @@ -1003,7 +1003,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]", func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause protocols := []v1.Protocol{protocolTCP} ports := []int32{80} @@ -1037,7 +1037,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]", func() { + ginkgo.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]", func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause protocols := []v1.Protocol{protocolTCP} ports := []int32{80} @@ -1086,7 +1086,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityAllow}) }) - ginkgo.It("should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]", func(ctx context.Context) { /* Test steps: 1. Verify every pod in every namespace can talk to each other @@ -1123,7 +1123,7 @@ var _ = common.SIGDescribe("Netpol", func() { // This test *does* apply to plugins that do not implement SCTP. It is a // security hole if you fail this test, because you are allowing TCP // traffic that is supposed to be blocked. - ginkgo.It("should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy]", func() { + ginkgo.It("should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{81} k8s = initializeResources(f, protocols, ports) @@ -1149,7 +1149,7 @@ var _ = common.SIGDescribe("Netpol", func() { // This test *does* apply to plugins that do not implement SCTP. It is a // security hole if you fail this test, because you are allowing TCP // traffic that is supposed to be blocked. - ginkgo.It("should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy]", func() { + ginkgo.It("should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy for the server which allows traffic only via SCTP on port 80.") ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 80}, Protocol: &protocolSCTP}) @@ -1166,7 +1166,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy]", func() { + ginkgo.It("should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy]", func(ctx context.Context) { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 81}, Protocol: &protocolUDP}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-only-udp-ingress-on-port-81", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) @@ -1185,7 +1185,7 @@ var _ = common.SIGDescribe("Netpol", func() { }) // Note that this default ns functionality is maintained by the APIMachinery group, but we test it here anyways because its an important feature. - ginkgo.It("should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -1208,7 +1208,7 @@ var _ = common.SIGDescribe("Netpol", func() { }) // Note that this default ns functionality is maintained by the APIMachinery group, but we test it here anyways because its an important feature. - ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -1244,7 +1244,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ginkgo.Context("NetworkPolicy between server and client using UDP", func() { - ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolUDP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -1258,7 +1258,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolUDP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network policy allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolUDP} ports := []int32{81} @@ -1283,7 +1283,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolUDP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolUDP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -1324,7 +1324,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly][Disrupt ginkgo.Context("NetworkPolicy between server and client using SCTP", func() { - ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolSCTP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) @@ -1338,7 +1338,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly][Disrupt ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolSCTP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func(ctx context.Context) { ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolSCTP} ports := []int32{81} @@ -1362,7 +1362,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly][Disrupt ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolSCTP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { protocols := []v1.Protocol{protocolSCTP} ports := []int32{80} k8s = initializeResources(f, protocols, ports) diff --git a/test/e2e/network/netpol/network_policy_api.go b/test/e2e/network/netpol/network_policy_api.go index 20430ed9527..ecc0f1317a4 100644 --- a/test/e2e/network/netpol/network_policy_api.go +++ b/test/e2e/network/netpol/network_policy_api.go @@ -47,7 +47,7 @@ var _ = common.SIGDescribe("Netpol API", func() { - The NetworkPolicies resource must support create, get, list, watch, update, patch, delete, and deletecollection. */ - ginkgo.It("should support creating NetworkPolicy API operations", func() { + ginkgo.It("should support creating NetworkPolicy API operations", func(ctx context.Context) { // Setup ns := f.Namespace.Name npVersion := "v1" @@ -219,7 +219,7 @@ var _ = common.SIGDescribe("Netpol API", func() { - EndPort field cannot be defined if the Port field is defined as a named (string) port. - EndPort field must be equal or greater than port. */ - ginkgo.It("should support creating NetworkPolicy API with endport field", func() { + ginkgo.It("should support creating NetworkPolicy API with endport field", func(ctx context.Context) { ns := f.Namespace.Name npClient := f.ClientSet.NetworkingV1().NetworkPolicies(ns) @@ -278,7 +278,7 @@ var _ = common.SIGDescribe("Netpol API", func() { - Status should support conditions - Two conditions with the same type cannot exist. */ - ginkgo.It("should support creating NetworkPolicy with Status subresource [Feature:NetworkPolicyStatus]", func() { + ginkgo.It("should support creating NetworkPolicy with Status subresource [Feature:NetworkPolicyStatus]", func(ctx context.Context) { ns := f.Namespace.Name npClient := f.ClientSet.NetworkingV1().NetworkPolicies(ns) diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go index ee9b5ec3eaa..a280270d1bd 100644 --- a/test/e2e/network/network_tiers.go +++ b/test/e2e/network/network_tiers.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "net/http" "time" @@ -63,7 +64,7 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() { //reset serviceLBNames serviceLBNames = []string{} }) - ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func() { + ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func(ctx context.Context) { lagTimeout := e2eservice.LoadBalancerLagTimeoutDefault createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) diff --git a/test/e2e/network/networking.go b/test/e2e/network/networking.go index b20dd3b6c52..5831550fa76 100644 --- a/test/e2e/network/networking.go +++ b/test/e2e/network/networking.go @@ -82,13 +82,13 @@ var _ = common.SIGDescribe("Networking", func() { f := framework.NewDefaultFramework(svcname) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() { + ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv4]", func(ctx context.Context) { ginkgo.By("Running container which tries to connect to 8.8.8.8") framework.ExpectNoError( checkConnectivityToHost(f, "", "connectivity-test", "8.8.8.8", 53, 30)) }) - ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental][LinuxOnly]", func() { + ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental][LinuxOnly]", func(ctx context.Context) { // IPv6 is not supported on Windows. e2eskipper.SkipIfNodeOSDistroIs("windows") ginkgo.By("Running container which tries to connect to 2001:4860:4860::8888") @@ -96,14 +96,14 @@ var _ = common.SIGDescribe("Networking", func() { checkConnectivityToHost(f, "", "connectivity-test", "2001:4860:4860::8888", 53, 30)) }) - ginkgo.It("should provider Internet connection for containers using DNS [Feature:Networking-DNS]", func() { + ginkgo.It("should provider Internet connection for containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) { ginkgo.By("Running container which tries to connect to google.com") framework.ExpectNoError( checkConnectivityToHost(f, "", "connectivity-test", "google.com", 80, 30)) }) // First test because it has no dependencies on variables created later on. - ginkgo.It("should provide unchanging, static URL paths for kubernetes api services", func() { + ginkgo.It("should provide unchanging, static URL paths for kubernetes api services", func(ctx context.Context) { tests := []struct { path string }{ @@ -129,7 +129,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should check kube-proxy urls", func() { + ginkgo.It("should check kube-proxy urls", func(ctx context.Context) { // TODO: this is overkill we just need the host networking pod // to hit kube-proxy urls. config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) @@ -144,7 +144,7 @@ var _ = common.SIGDescribe("Networking", func() { ginkgo.Describe("Granular Checks: Services", func() { - ginkgo.It("should function for pod-Service: http", func() { + ginkgo.It("should function for pod-Service: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) err := config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -159,7 +159,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should function for pod-Service: udp", func() { + ginkgo.It("should function for pod-Service: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) err := config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -175,7 +175,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes - ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func() { + ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterSCTPPort)) err := config.DialFromTestContainer("sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -189,7 +189,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should function for node-Service: http", func() { + ginkgo.It("should function for node-Service: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterHTTPPort)) err := config.DialFromNode("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -203,7 +203,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should function for node-Service: udp", func() { + ginkgo.It("should function for node-Service: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterUDPPort)) err := config.DialFromNode("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -218,7 +218,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes - ginkgo.It("should function for node-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func() { + ginkgo.It("should function for node-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) { ginkgo.Skip("Skipping SCTP node to service test until DialFromNode supports SCTP #96482") config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterSCTPPort)) @@ -233,7 +233,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should function for endpoint-Service: http", func() { + ginkgo.It("should function for endpoint-Service: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) err := config.DialFromEndpointContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -247,7 +247,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should function for endpoint-Service: udp", func() { + ginkgo.It("should function for endpoint-Service: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) err := config.DialFromEndpointContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -263,7 +263,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes - ginkgo.It("should function for endpoint-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func() { + ginkgo.It("should function for endpoint-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterSCTPPort)) err := config.DialFromEndpointContainer("sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -280,7 +280,7 @@ var _ = common.SIGDescribe("Networking", func() { // This test ensures that in a situation where multiple services exist with the same selector, // deleting one of the services does not affect the connectivity of the remaining service - ginkgo.It("should function for multiple endpoint-Services with same selector", func() { + ginkgo.It("should function for multiple endpoint-Services with same selector", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By("creating a second service with same selector") svc2, httpPort := createSecondNodePortService(f, config) @@ -325,7 +325,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should update endpoints: http", func() { + ginkgo.It("should update endpoints: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) err := config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -342,7 +342,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should update endpoints: udp", func() { + ginkgo.It("should update endpoints: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) err := config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -360,7 +360,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. - ginkgo.It("should update nodePort: http [Slow]", func() { + ginkgo.It("should update nodePort: http [Slow]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) err := config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -381,7 +381,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // quick validation of udp, next test confirms that this services update as well after endpoints are removed, but is slower. - ginkgo.It("should support basic nodePort: udp functionality", func() { + ginkgo.It("should support basic nodePort: udp functionality", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeUDPPort)) err := config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -391,7 +391,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. - ginkgo.It("should update nodePort: udp [Slow]", func() { + ginkgo.It("should update nodePort: udp [Slow]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeUDPPort)) err := config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -413,7 +413,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func() { + ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort)) @@ -431,7 +431,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func() { + ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort)) @@ -448,7 +448,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should be able to handle large requests: http", func() { + ginkgo.It("should be able to handle large requests: http", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort)) message := strings.Repeat("42", 1000) @@ -458,7 +458,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should be able to handle large requests: udp", func() { + ginkgo.It("should be able to handle large requests: udp", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort)) message := "n" + strings.Repeat("o", 1999) @@ -471,7 +471,7 @@ var _ = common.SIGDescribe("Networking", func() { // if the endpoints pods use hostNetwork, several tests can't run in parallel // because the pods will try to acquire the same port in the host. // We run the test in serial, to avoid port conflicts. - ginkgo.It("should function for service endpoints using hostNetwork", func() { + ginkgo.It("should function for service endpoints using hostNetwork", func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.UseHostNetwork, e2enetwork.EndpointsUseHostNetwork) ginkgo.By("pod-Service(hostNetwork): http") @@ -548,7 +548,7 @@ var _ = common.SIGDescribe("Networking", func() { }) - ginkgo.It("should recreate its iptables rules if they are deleted [Disruptive]", func() { + ginkgo.It("should recreate its iptables rules if they are deleted [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) e2eskipper.SkipUnlessSSHKeyPresent() diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index a91abb53c55..d944527f93a 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -142,7 +142,7 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", f := framework.NewDefaultFramework("network-perf") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It(fmt.Sprintf("should run iperf2"), func() { + ginkgo.It(fmt.Sprintf("should run iperf2"), func(ctx context.Context) { readySchedulableNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) framework.ExpectNoError(err) diff --git a/test/e2e/network/no_snat.go b/test/e2e/network/no_snat.go index f64e876f36d..6d05bd7b079 100644 --- a/test/e2e/network/no_snat.go +++ b/test/e2e/network/no_snat.go @@ -65,7 +65,7 @@ var ( var _ = common.SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { f := framework.NewDefaultFramework("no-snat-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("Should be able to send traffic between Pods without SNAT", func() { + ginkgo.It("Should be able to send traffic between Pods without SNAT", func(ctx context.Context) { cs := f.ClientSet pc := cs.CoreV1().Pods(f.Namespace.Name) diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index 3c126e2d7d1..15a6149ed91 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -98,7 +98,7 @@ var _ = common.SIGDescribe("Proxy", func() { Testname: Proxy, logs service endpoint Description: Select any node in the cluster to invoke /logs endpoint using the /nodes/proxy subresource from the kubelet port. This endpoint MUST be reachable. */ - framework.ConformanceIt("should proxy through a service and a pod ", func() { + framework.ConformanceIt("should proxy through a service and a pod ", func(ctx context.Context) { start := time.Now() labels := map[string]string{"proxy-service-target": "true"} service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), &v1.Service{ @@ -283,7 +283,7 @@ var _ = common.SIGDescribe("Proxy", func() { ProxyWithPath using a list of http methods. A valid response MUST be returned for each endpoint. */ - framework.ConformanceIt("A set of valid responses are returned for both pod and service ProxyWithPath", func() { + framework.ConformanceIt("A set of valid responses are returned for both pod and service ProxyWithPath", func(ctx context.Context) { ns := f.Namespace.Name msg := "foo" @@ -377,7 +377,7 @@ var _ = common.SIGDescribe("Proxy", func() { Proxy using a list of http methods. A valid response MUST be returned for each endpoint. */ - framework.ConformanceIt("A set of valid responses are returned for both pod and service Proxy", func() { + framework.ConformanceIt("A set of valid responses are returned for both pod and service Proxy", func(ctx context.Context) { ns := f.Namespace.Name msg := "foo" diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 83a9b76d8f0..05bef333d0e 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -774,7 +774,7 @@ var _ = common.SIGDescribe("Services", func() { Testname: Kubernetes Service Description: By default when a kubernetes cluster is running there MUST be a 'kubernetes' service running in the cluster. */ - framework.ConformanceIt("should provide secure master service ", func() { + framework.ConformanceIt("should provide secure master service ", func(ctx context.Context) { _, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch the service object for the service named kubernetes") }) @@ -784,7 +784,7 @@ var _ = common.SIGDescribe("Services", func() { Testname: Service, endpoints Description: Create a service with a endpoint without any Pods, the service MUST run and show empty endpoints. Add a pod to the service and the service MUST validate to show all the endpoints for the ports exposed by the Pod. Add another Pod then the list of all Ports exposed by both the Pods MUST be valid and have corresponding service endpoint. Once the second Pod is deleted then set of endpoint MUST be validated to show only ports from the first container that are exposed. Once both pods are deleted the endpoints from the service MUST be empty. */ - framework.ConformanceIt("should serve a basic endpoint from pods ", func() { + framework.ConformanceIt("should serve a basic endpoint from pods ", func(ctx context.Context) { serviceName := "endpoint-test2" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -845,7 +845,7 @@ var _ = common.SIGDescribe("Services", func() { Testname: Service, endpoints with multiple ports Description: Create a service with two ports but no Pods are added to the service yet. The service MUST run and show empty set of endpoints. Add a Pod to the first port, service MUST list one endpoint for the Pod on that port. Add another Pod to the second port, service MUST list both the endpoints. Delete the first Pod and the service MUST list only the endpoint to the second Pod. Delete the second Pod and the service must now have empty set of endpoints. */ - framework.ConformanceIt("should serve multiport endpoints from pods ", func() { + framework.ConformanceIt("should serve multiport endpoints from pods ", func(ctx context.Context) { // repacking functionality is intentionally not tested here - it's better to test it in an integration test. serviceName := "multi-endpoint-test" ns := f.Namespace.Name @@ -926,7 +926,7 @@ var _ = common.SIGDescribe("Services", func() { validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{}) }) - ginkgo.It("should be updated after adding or deleting ports ", func() { + ginkgo.It("should be updated after adding or deleting ports ", func(ctx context.Context) { serviceName := "edit-port-test" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -1013,7 +1013,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) }) - ginkgo.It("should preserve source pod IP for traffic thru service cluster IP [LinuxOnly]", func() { + ginkgo.It("should preserve source pod IP for traffic thru service cluster IP [LinuxOnly]", func(ctx context.Context) { // this test is creating a pod with HostNetwork=true, which is not supported on Windows. e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -1096,7 +1096,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should allow pods to hairpin back to themselves through services", func() { + ginkgo.It("should allow pods to hairpin back to themselves through services", func(ctx context.Context) { serviceName := "hairpin-test" ns := f.Namespace.Name @@ -1125,7 +1125,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) }) - ginkgo.It("should be able to up and down services", func() { + ginkgo.It("should be able to up and down services", func(ctx context.Context) { ns := f.Namespace.Name numPods, servicePort := 3, defaultServeHostnameServicePort @@ -1171,7 +1171,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames3, svc3IP, servicePort)) }) - ginkgo.It("should work after the service has been recreated", func() { + ginkgo.It("should work after the service has been recreated", func(ctx context.Context) { serviceName := "service-deletion" ns := f.Namespace.Name numPods, servicePort := 1, defaultServeHostnameServicePort @@ -1209,7 +1209,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames, svc.Spec.ClusterIP, servicePort)) }) - ginkgo.It("should work after restarting kube-proxy [Disruptive]", func() { + ginkgo.It("should work after restarting kube-proxy [Disruptive]", func(ctx context.Context) { kubeProxyLabelSet := map[string]string{clusterAddonLabelKey: kubeProxyLabelName} e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(kubeProxyLabelName, cs, metav1.NamespaceSystem, kubeProxyLabelSet) @@ -1246,7 +1246,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, podNames2, svc2IP, servicePort)) }) - ginkgo.It("should work after restarting apiserver [Disruptive]", func() { + ginkgo.It("should work after restarting apiserver [Disruptive]", func(ctx context.Context) { if !framework.ProviderIs("gke") { e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(kubeAPIServerLabelName, cs, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) @@ -1299,7 +1299,7 @@ var _ = common.SIGDescribe("Services", func() { The client Pod MUST be able to access the NodePort service by service name and cluster IP on the service port, and on nodes' internal and external IPs on the NodePort. */ - framework.ConformanceIt("should be able to create a functioning NodePort service", func() { + framework.ConformanceIt("should be able to create a functioning NodePort service", func(ctx context.Context) { serviceName := "nodeport-test" ns := f.Namespace.Name @@ -1325,7 +1325,7 @@ var _ = common.SIGDescribe("Services", func() { The IP ranges here are reserved for documentation according to [RFC 5737](https://tools.ietf.org/html/rfc5737) Section 3 and should not be used by any host. */ - ginkgo.It("should be possible to connect to a service via ExternalIP when the external IP is not assigned to a node", func() { + ginkgo.It("should be possible to connect to a service via ExternalIP when the external IP is not assigned to a node", func(ctx context.Context) { serviceName := "externalip-test" ns := f.Namespace.Name externalIP := "203.0.113.250" @@ -1362,7 +1362,7 @@ var _ = common.SIGDescribe("Services", func() { When this NodePort service is updated to use two protocols i.e. TCP and UDP for same assigned service port 80, service update MUST be successful by allocating two NodePorts to the service and service MUST be able to serve both TCP and UDP requests over same service port 80. */ - ginkgo.It("should be able to update service type to NodePort listening on same port number but different protocols", func() { + ginkgo.It("should be able to update service type to NodePort listening on same port number but different protocols", func(ctx context.Context) { serviceName := "nodeport-update-service" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -1435,7 +1435,7 @@ var _ = common.SIGDescribe("Services", func() { Update the service from ExternalName to ClusterIP by removing ExternalName entry, assigning port 80 as service port and TCP as protocol. Service update MUST be successful by assigning ClusterIP to the service and it MUST be reachable over serviceName and ClusterIP on provided service port. */ - framework.ConformanceIt("should be able to change the type from ExternalName to ClusterIP", func() { + framework.ConformanceIt("should be able to change the type from ExternalName to ClusterIP", func(ctx context.Context) { serviceName := "externalname-service" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -1474,7 +1474,7 @@ var _ = common.SIGDescribe("Services", func() { service update MUST be successful by exposing service on every node's IP on dynamically assigned NodePort and, ClusterIP MUST be assigned to route service requests. Service MUST be reachable over serviceName and the ClusterIP on servicePort. Service MUST also be reachable over node's IP on NodePort. */ - framework.ConformanceIt("should be able to change the type from ExternalName to NodePort", func() { + framework.ConformanceIt("should be able to change the type from ExternalName to NodePort", func(ctx context.Context) { serviceName := "externalname-service" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -1512,7 +1512,7 @@ var _ = common.SIGDescribe("Services", func() { Update service type from ClusterIP to ExternalName by setting CNAME entry as externalName. Service update MUST be successful and service MUST not has associated ClusterIP. Service MUST be able to resolve to IP address by returning A records ensuring service is pointing to provided externalName. */ - framework.ConformanceIt("should be able to change the type from ClusterIP to ExternalName", func() { + framework.ConformanceIt("should be able to change the type from ClusterIP to ExternalName", func(ctx context.Context) { serviceName := "clusterip-service" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -1554,7 +1554,7 @@ var _ = common.SIGDescribe("Services", func() { Update the service type from NodePort to ExternalName by setting CNAME entry as externalName. Service update MUST be successful and, MUST not has ClusterIP associated with the service and, allocated NodePort MUST be released. Service MUST be able to resolve to IP address by returning A records ensuring service is pointing to provided externalName. */ - framework.ConformanceIt("should be able to change the type from NodePort to ExternalName", func() { + framework.ConformanceIt("should be able to change the type from NodePort to ExternalName", func(ctx context.Context) { serviceName := "nodeport-service" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -1591,7 +1591,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) }) - ginkgo.It("should prevent NodePort collisions", func() { + ginkgo.It("should prevent NodePort collisions", func(ctx context.Context) { // TODO: use the ServiceTestJig here baseName := "nodeport-collision-" serviceName1 := baseName + "1" @@ -1645,7 +1645,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns) }) - ginkgo.It("should check NodePort out-of-range", func() { + ginkgo.It("should check NodePort out-of-range", func(ctx context.Context) { // TODO: use the ServiceTestJig here serviceName := "nodeport-range-test" ns := f.Namespace.Name @@ -1712,7 +1712,7 @@ var _ = common.SIGDescribe("Services", func() { gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) }) - ginkgo.It("should release NodePorts on delete", func() { + ginkgo.It("should release NodePorts on delete", func(ctx context.Context) { // TODO: use the ServiceTestJig here serviceName := "nodeport-reuse" ns := f.Namespace.Name @@ -1775,7 +1775,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) }) - ginkgo.It("should create endpoints for unready pods", func() { + ginkgo.It("should create endpoints for unready pods", func(ctx context.Context) { serviceName := "tolerate-unready" ns := f.Namespace.Name @@ -1921,7 +1921,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is true", func() { + ginkgo.It("should be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is true", func(ctx context.Context) { nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) @@ -2040,7 +2040,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should not be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is false", func() { + ginkgo.It("should not be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is false", func(ctx context.Context) { nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2) framework.ExpectNoError(err) nodeCounts := len(nodes.Items) @@ -2188,13 +2188,13 @@ var _ = common.SIGDescribe("Services", func() { Service MUST be reachable over serviceName and the ClusterIP on servicePort. [LinuxOnly]: Windows does not support session affinity. */ - framework.ConformanceIt("should have session affinity work for service with type clusterIP [LinuxOnly]", func() { + framework.ConformanceIt("should have session affinity work for service with type clusterIP [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-clusterip") svc.Spec.Type = v1.ServiceTypeClusterIP execAffinityTestForNonLBService(f, cs, svc) }) - ginkgo.It("should have session affinity timeout work for service with type clusterIP [LinuxOnly]", func() { + ginkgo.It("should have session affinity timeout work for service with type clusterIP [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-clusterip-timeout") svc.Spec.Type = v1.ServiceTypeClusterIP execAffinityTestForSessionAffinityTimeout(f, cs, svc) @@ -2210,7 +2210,7 @@ var _ = common.SIGDescribe("Services", func() { Service MUST be reachable over serviceName and the ClusterIP on servicePort. [LinuxOnly]: Windows does not support session affinity. */ - framework.ConformanceIt("should be able to switch session affinity for service with type clusterIP [LinuxOnly]", func() { + framework.ConformanceIt("should be able to switch session affinity for service with type clusterIP [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-clusterip-transition") svc.Spec.Type = v1.ServiceTypeClusterIP execAffinityTestForNonLBServiceWithTransition(f, cs, svc) @@ -2225,13 +2225,13 @@ var _ = common.SIGDescribe("Services", func() { Service MUST be reachable over serviceName and the ClusterIP on servicePort. Service MUST also be reachable over node's IP on NodePort. [LinuxOnly]: Windows does not support session affinity. */ - framework.ConformanceIt("should have session affinity work for NodePort service [LinuxOnly]", func() { + framework.ConformanceIt("should have session affinity work for NodePort service [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-nodeport") svc.Spec.Type = v1.ServiceTypeNodePort execAffinityTestForNonLBService(f, cs, svc) }) - ginkgo.It("should have session affinity timeout work for NodePort service [LinuxOnly]", func() { + ginkgo.It("should have session affinity timeout work for NodePort service [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-nodeport-timeout") svc.Spec.Type = v1.ServiceTypeNodePort execAffinityTestForSessionAffinityTimeout(f, cs, svc) @@ -2247,13 +2247,13 @@ var _ = common.SIGDescribe("Services", func() { Service MUST be reachable over serviceName and the ClusterIP on servicePort. Service MUST also be reachable over node's IP on NodePort. [LinuxOnly]: Windows does not support session affinity. */ - framework.ConformanceIt("should be able to switch session affinity for NodePort service [LinuxOnly]", func() { + framework.ConformanceIt("should be able to switch session affinity for NodePort service [LinuxOnly]", func(ctx context.Context) { svc := getServeHostnameService("affinity-nodeport-transition") svc.Spec.Type = v1.ServiceTypeNodePort execAffinityTestForNonLBServiceWithTransition(f, cs, svc) }) - ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func() { + ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func(ctx context.Context) { ns := f.Namespace.Name numPods, servicePort := 3, defaultServeHostnameServicePort serviceProxyNameLabels := map[string]string{"service.kubernetes.io/service-proxy-name": "foo-bar"} @@ -2304,7 +2304,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcDisabledIP, servicePort)) }) - ginkgo.It("should implement service.kubernetes.io/headless", func() { + ginkgo.It("should implement service.kubernetes.io/headless", func(ctx context.Context) { ns := f.Namespace.Name numPods, servicePort := 3, defaultServeHostnameServicePort serviceHeadlessLabels := map[string]string{v1.IsHeadlessService: ""} @@ -2356,7 +2356,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(verifyServeHostnameServiceDown(cs, ns, svcHeadlessIP, servicePort)) }) - ginkgo.It("should be rejected when no endpoints exist", func() { + ginkgo.It("should be rejected when no endpoints exist", func(ctx context.Context) { namespace := f.Namespace.Name serviceName := "no-pods" jig := e2eservice.NewTestJig(cs, namespace, serviceName) @@ -2401,7 +2401,7 @@ var _ = common.SIGDescribe("Services", func() { }) // regression test for https://issues.k8s.io/109414 and https://issues.k8s.io/109718 - ginkgo.It("should be rejected for evicted pods (no endpoints exist)", func() { + ginkgo.It("should be rejected for evicted pods (no endpoints exist)", func(ctx context.Context) { namespace := f.Namespace.Name serviceName := "evicted-pods" jig := e2eservice.NewTestJig(cs, namespace, serviceName) @@ -2491,7 +2491,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should respect internalTrafficPolicy=Local Pod to Pod [Feature:ServiceInternalTrafficPolicy]", func() { + ginkgo.It("should respect internalTrafficPolicy=Local Pod to Pod [Feature:ServiceInternalTrafficPolicy]", func(ctx context.Context) { // windows kube-proxy does not support this feature yet // TODO: remove this skip when windows-based proxies implement internalTrafficPolicy e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -2569,7 +2569,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod [Feature:ServiceInternalTrafficPolicy]", func() { + ginkgo.It("should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod [Feature:ServiceInternalTrafficPolicy]", func(ctx context.Context) { // windows kube-proxy does not support this feature yet // TODO: remove this skip when windows-based proxies implement internalTrafficPolicy e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -2649,7 +2649,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should respect internalTrafficPolicy=Local Pod and Node, to Pod (hostNetwork: true) [Feature:ServiceInternalTrafficPolicy]", func() { + ginkgo.It("should respect internalTrafficPolicy=Local Pod and Node, to Pod (hostNetwork: true) [Feature:ServiceInternalTrafficPolicy]", func(ctx context.Context) { // windows kube-proxy does not support this feature yet // TODO: remove this skip when windows-based proxies implement internalTrafficPolicy e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -2761,7 +2761,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should fail health check node port if there are only terminating endpoints [Feature:ProxyTerminatingEndpoints]", func() { + ginkgo.It("should fail health check node port if there are only terminating endpoints [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -2860,7 +2860,7 @@ var _ = common.SIGDescribe("Services", func() { execHostnameTest(*pausePod0, nodePortAddress, webserverPod0.Name) }) - ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster [Feature:ProxyTerminatingEndpoints]", func() { + ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -2943,7 +2943,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local [Feature:ProxyTerminatingEndpoints]", func() { + ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -3031,7 +3031,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster [Feature:ProxyTerminatingEndpoints]", func() { + ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -3116,7 +3116,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local [Feature:ProxyTerminatingEndpoints]", func() { + ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -3216,7 +3216,7 @@ var _ = common.SIGDescribe("Services", func() { Testname: Find Kubernetes Service in default Namespace Description: List all Services in all Namespaces, response MUST include a Service named Kubernetes with the Namespace of default. */ - framework.ConformanceIt("should find a service from listing all namespaces", func() { + framework.ConformanceIt("should find a service from listing all namespaces", func(ctx context.Context) { ginkgo.By("fetching services") svcs, _ := f.ClientSet.CoreV1().Services("").List(context.TODO(), metav1.ListOptions{}) @@ -3241,7 +3241,7 @@ var _ = common.SIGDescribe("Services", func() { The endpoint is then patched with a new IPv4 address and port, a check after the patch MUST the changes. The endpoint is deleted by it's label, a watch listens for the deleted watch event. */ - framework.ConformanceIt("should test the lifecycle of an Endpoint", func() { + framework.ConformanceIt("should test the lifecycle of an Endpoint", func(ctx context.Context) { testNamespaceName := f.Namespace.Name testEndpointName := "testservice" testEndpoints := v1.Endpoints{ @@ -3275,7 +3275,7 @@ var _ = common.SIGDescribe("Services", func() { _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Create(context.TODO(), &testEndpoints, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Endpoint") ginkgo.By("waiting for available Endpoint") - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() _, err = watchtools.Until(ctx, endpointsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { @@ -3425,7 +3425,7 @@ var _ = common.SIGDescribe("Services", func() { When updating /status the action MUST be validated. When patching a service the action MUST be validated. */ - framework.ConformanceIt("should complete a service status lifecycle", func() { + framework.ConformanceIt("should complete a service status lifecycle", func(ctx context.Context) { ns := f.Namespace.Name svcResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} @@ -3465,7 +3465,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create Service") ginkgo.By("watching for the Service to be added") - ctx, cancel := context.WithTimeout(context.Background(), svcReadyTimeout) + ctx, cancel := context.WithTimeout(ctx, svcReadyTimeout) defer cancel() _, err = watchtools.Until(ctx, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { if svc, ok := event.Object.(*v1.Service); ok { @@ -3651,7 +3651,7 @@ var _ = common.SIGDescribe("Services", func() { of services via a label selector. It MUST locate only one service after deleting the service collection. */ - framework.ConformanceIt("should delete a collection of services", func() { + framework.ConformanceIt("should delete a collection of services", func(ctx context.Context) { ns := f.Namespace.Name svcClient := f.ClientSet.CoreV1().Services(ns) @@ -3729,7 +3729,7 @@ var _ = common.SIGDescribe("Services", func() { Testname: Service, same ports with different protocols on a Load Balancer Service Description: Create a LoadBalancer service with two ports that have the same value but use different protocols. Add a Pod that listens on both ports. The Pod must be reachable via the ClusterIP and both ports */ - ginkgo.It("should serve endpoints on same port and different protocol for internal traffic on Type LoadBalancer ", func() { + ginkgo.It("should serve endpoints on same port and different protocol for internal traffic on Type LoadBalancer ", func(ctx context.Context) { serviceName := "multiprotocol-lb-test" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -4340,7 +4340,7 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() { cs = f.ClientSet }) - ginkgo.It("should allow creating a basic SCTP service with pod and endpoints", func() { + ginkgo.It("should allow creating a basic SCTP service with pod and endpoints", func(ctx context.Context) { serviceName := "sctp-endpoint-test" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) @@ -4394,7 +4394,7 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() { } }) - ginkgo.It("should create a Pod with SCTP HostPort", func() { + ginkgo.It("should create a Pod with SCTP HostPort", func(ctx context.Context) { node, err := e2enode.GetRandomReadySchedulableNode(cs) framework.ExpectNoError(err) hostExec := utils.NewHostExec(f) @@ -4448,7 +4448,7 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() { framework.Failf("The state of the sctp module has changed due to the test case") } }) - ginkgo.It("should create a ClusterIP Service with SCTP ports", func() { + ginkgo.It("should create a ClusterIP Service with SCTP ports", func(ctx context.Context) { ginkgo.By("checking that kube-proxy is in iptables mode") if proxyMode, err := proxyMode(f); err != nil { e2eskipper.Skipf("Couldn't detect KubeProxy mode - skip, %v", err) diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index d25daa1b72b..cb5345a390a 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -56,7 +56,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() { Testname: Service endpoint latency, thresholds Description: Run 100 iterations of create service with the Pod running the pause image, measure the time it takes for creating the service and the endpoint with the service name is available. These durations are captured for 100 iterations, then the durations are sorted to compute 50th, 90th and 99th percentile. The single server latency MUST not exceed liberally set thresholds of 20s for 50th percentile and 50s for the 90th percentile. */ - framework.ConformanceIt("should not be very high ", func() { + framework.ConformanceIt("should not be very high ", func(ctx context.Context) { const ( // These are very generous criteria. Ideally we will // get this much lower in the future. See issue diff --git a/test/e2e/network/topology_hints.go b/test/e2e/network/topology_hints.go index 59c4014758e..4bbb25b15a4 100644 --- a/test/e2e/network/topology_hints.go +++ b/test/e2e/network/topology_hints.go @@ -52,7 +52,7 @@ var _ = common.SIGDescribe("[Feature:Topology Hints]", func() { e2eskipper.SkipUnlessMultizone(c) }) - ginkgo.It("should distribute endpoints evenly", func() { + ginkgo.It("should distribute endpoints evenly", func(ctx context.Context) { portNum := 9376 thLabels := map[string]string{labelKey: clientLabelValue} img := imageutils.GetE2EImage(imageutils.Agnhost) diff --git a/test/e2e/node/apparmor.go b/test/e2e/node/apparmor.go index 2600704f67a..d3276464b24 100644 --- a/test/e2e/node/apparmor.go +++ b/test/e2e/node/apparmor.go @@ -17,6 +17,8 @@ limitations under the License. package node import ( + "context" + "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -43,11 +45,11 @@ var _ = SIGDescribe("AppArmor", func() { e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) }) - ginkgo.It("should enforce an AppArmor profile", func() { + ginkgo.It("should enforce an AppArmor profile", func(ctx context.Context) { e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), false, true) }) - ginkgo.It("can disable an AppArmor profile, using unconfined", func() { + ginkgo.It("can disable an AppArmor profile, using unconfined", func(ctx context.Context) { e2esecurity.CreateAppArmorTestPod(f.Namespace.Name, f.ClientSet, e2epod.NewPodClient(f), true, true) }) }) diff --git a/test/e2e/node/crictl.go b/test/e2e/node/crictl.go index 4e358f3bbe1..42d80339699 100644 --- a/test/e2e/node/crictl.go +++ b/test/e2e/node/crictl.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "k8s.io/kubernetes/test/e2e/framework" @@ -37,7 +38,7 @@ var _ = SIGDescribe("crictl", func() { e2eskipper.SkipUnlessProviderIs("gce", "gke") }) - ginkgo.It("should be able to run crictl on the node", func() { + ginkgo.It("should be able to run crictl on the node", func(ctx context.Context) { nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxNodes) framework.ExpectNoError(err) diff --git a/test/e2e/node/events.go b/test/e2e/node/events.go index 53d6f364212..b1a61d2d39c 100644 --- a/test/e2e/node/events.go +++ b/test/e2e/node/events.go @@ -38,7 +38,7 @@ var _ = SIGDescribe("Events", func() { f := framework.NewDefaultFramework("events") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It("should be sent by kubelets and the scheduler about pods scheduling and running ", func() { + ginkgo.It("should be sent by kubelets and the scheduler about pods scheduling and running ", func(ctx context.Context) { podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) diff --git a/test/e2e/node/examples.go b/test/e2e/node/examples.go index 49e11b3df15..0cdbae3049a 100644 --- a/test/e2e/node/examples.go +++ b/test/e2e/node/examples.go @@ -68,7 +68,7 @@ var _ = SIGDescribe("[Feature:Example]", func() { }) ginkgo.Describe("Liveness", func() { - ginkgo.It("liveness pods should be automatically restarted", func() { + ginkgo.It("liveness pods should be automatically restarted", func(ctx context.Context) { test := "test/fixtures/doc-yaml/user-guide/liveness" execYaml := readFile(test, "exec-liveness.yaml.in") httpYaml := readFile(test, "http-liveness.yaml.in") @@ -115,7 +115,7 @@ var _ = SIGDescribe("[Feature:Example]", func() { }) ginkgo.Describe("Secret", func() { - ginkgo.It("should create a pod that reads a secret", func() { + ginkgo.It("should create a pod that reads a secret", func(ctx context.Context) { test := "test/fixtures/doc-yaml/user-guide/secrets" secretYaml := readFile(test, "secret.yaml") podYaml := readFile(test, "secret-pod.yaml.in") @@ -135,7 +135,7 @@ var _ = SIGDescribe("[Feature:Example]", func() { }) ginkgo.Describe("Downward API", func() { - ginkgo.It("should create a pod that prints his name and namespace", func() { + ginkgo.It("should create a pod that prints his name and namespace", func(ctx context.Context) { test := "test/fixtures/doc-yaml/user-guide/downward-api" podYaml := readFile(test, "dapi-pod.yaml.in") podName := "dapi-test-pod" diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index 6e8b070a574..f7d26f41e44 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -343,7 +343,7 @@ var _ = SIGDescribe("kubelet", func() { name := fmt.Sprintf( "kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout) itArg := itArg - ginkgo.It(name, func() { + ginkgo.It(name, func(ctx context.Context) { totalPods := itArg.podsPerNode * numNodes ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID())) @@ -436,7 +436,7 @@ var _ = SIGDescribe("kubelet", func() { // execute It blocks from above table of tests for _, t := range testTbl { t := t - ginkgo.It(t.itDescr, func() { + ginkgo.It(t.itDescr, func(ctx context.Context) { pod = createPodUsingNfs(f, c, ns, nfsIP, t.podCmd) ginkgo.By("Stop the NFS server") @@ -482,7 +482,7 @@ var _ = SIGDescribe("kubelet", func() { returns something or not! */ - ginkgo.It("should return the logs ", func() { + ginkgo.It("should return the logs ", func(ctx context.Context) { ginkgo.By("Starting the command") tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns) @@ -497,7 +497,7 @@ var _ = SIGDescribe("kubelet", func() { returns something or not! */ - ginkgo.It("should return the logs for the requested service", func() { + ginkgo.It("should return the logs for the requested service", func(ctx context.Context) { ginkgo.By("Starting the command") tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns) @@ -512,7 +512,7 @@ var _ = SIGDescribe("kubelet", func() { returns something or not! */ - ginkgo.It("should return the logs for the provided path", func() { + ginkgo.It("should return the logs for the provided path", func(ctx context.Context) { ginkgo.By("Starting the command") tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns) diff --git a/test/e2e/node/kubelet_perf.go b/test/e2e/node/kubelet_perf.go index e83eec6c956..dedda995878 100644 --- a/test/e2e/node/kubelet_perf.go +++ b/test/e2e/node/kubelet_perf.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "strings" "time" @@ -265,7 +266,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { podsPerNode := itArg.podsPerNode name := fmt.Sprintf( "resource tracking for %d pods per node", podsPerNode) - ginkgo.It(name, func() { + ginkgo.It(name, func(ctx context.Context) { runResourceTrackingTest(f, podsPerNode, nodeNames, rm, itArg.cpuLimits, itArg.memLimits) }) } @@ -276,7 +277,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { podsPerNode := density[i] name := fmt.Sprintf( "resource tracking for %d pods per node", podsPerNode) - ginkgo.It(name, func() { + ginkgo.It(name, func(ctx context.Context) { runResourceTrackingTest(f, podsPerNode, nodeNames, rm, nil, nil) }) } diff --git a/test/e2e/node/mount_propagation.go b/test/e2e/node/mount_propagation.go index 5a9700d8069..b7ddcf6d1d6 100644 --- a/test/e2e/node/mount_propagation.go +++ b/test/e2e/node/mount_propagation.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "strings" @@ -83,7 +84,7 @@ var _ = SIGDescribe("Mount propagation", func() { f := framework.NewDefaultFramework("mount-propagation") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should propagate mounts within defined scopes", func() { + ginkgo.It("should propagate mounts within defined scopes", func(ctx context.Context) { // This test runs two pods: master and slave with respective mount // propagation on common /var/lib/kubelet/XXXX directory. Both mount a // tmpfs to a subdirectory there. We check that these mounts are diff --git a/test/e2e/node/node_problem_detector.go b/test/e2e/node/node_problem_detector.go index 33f2f8a668c..1254bf6f0d3 100644 --- a/test/e2e/node/node_problem_detector.go +++ b/test/e2e/node/node_problem_detector.go @@ -59,7 +59,7 @@ var _ = SIGDescribe("NodeProblemDetector", func() { e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute) }) - ginkgo.It("should run without error", func() { + ginkgo.It("should run without error", func(ctx context.Context) { ginkgo.By("Getting all nodes and their SSH-able IP addresses") readyNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) framework.ExpectNoError(err) diff --git a/test/e2e/node/pod_gc.go b/test/e2e/node/pod_gc.go index 7ebe74c065b..e692e8824ce 100644 --- a/test/e2e/node/pod_gc.go +++ b/test/e2e/node/pod_gc.go @@ -38,7 +38,7 @@ import ( var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]", func() { f := framework.NewDefaultFramework("pod-garbage-collector") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should handle the creation of 1000 pods", func() { + ginkgo.It("should handle the creation of 1000 pods", func(ctx context.Context) { var count int for count < 1000 { pod, err := createTerminatingPod(f) diff --git a/test/e2e/node/pods.go b/test/e2e/node/pods.go index 3d955d01637..c8ea8fddc36 100644 --- a/test/e2e/node/pods.go +++ b/test/e2e/node/pods.go @@ -65,7 +65,7 @@ var _ = SIGDescribe("Pods Extended", func() { Testname: Pods, delete grace period Description: Create a pod, make sure it is running. Using the http client send a 'delete' with gracePeriodSeconds=30. Pod SHOULD get terminated within gracePeriodSeconds and removed from API server within a window. */ - ginkgo.It("should be submitted and removed", func() { + ginkgo.It("should be submitted and removed", func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-submit-remove-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -158,7 +158,7 @@ var _ = SIGDescribe("Pods Extended", func() { Testname: Pods, QOS Description: Create a Pod with CPU and Memory request and limits. Pod status MUST have QOSClass set to PodQOSGuaranteed. */ - framework.ConformanceIt("should be set on Pods with matching resource requests and limits for memory and cpu", func() { + framework.ConformanceIt("should be set on Pods with matching resource requests and limits for memory and cpu", func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-qos-class-" + string(uuid.NewUUID()) pod := &v1.Pod{ @@ -205,7 +205,7 @@ var _ = SIGDescribe("Pods Extended", func() { podClient = e2epod.NewPodClient(f) }) - ginkgo.It("should never report success for a pending container", func() { + ginkgo.It("should never report success for a pending container", func(ctx context.Context) { ginkgo.By("creating pods that should always exit 1 and terminating the pod after a random delay") createAndTestPodRepeatedly( 3, 15, @@ -213,7 +213,7 @@ var _ = SIGDescribe("Pods Extended", func() { podClient.PodInterface, ) }) - ginkgo.It("should never report container start when an init container fails", func() { + ginkgo.It("should never report container start when an init container fails", func(ctx context.Context) { ginkgo.By("creating pods with an init container that always exit 1 and terminating the pod after a random delay") createAndTestPodRepeatedly( 3, 15, @@ -229,7 +229,7 @@ var _ = SIGDescribe("Pods Extended", func() { podClient = e2epod.NewPodClient(f) }) - ginkgo.It("should not create extra sandbox if all containers are done", func() { + ginkgo.It("should not create extra sandbox if all containers are done", func(ctx context.Context) { ginkgo.By("creating the pod that should always exit 0") name := "pod-always-succeed" + string(uuid.NewUUID()) @@ -299,7 +299,7 @@ var _ = SIGDescribe("Pods Extended", func() { } }) - ginkgo.It("evicted pods should be terminal", func() { + ginkgo.It("evicted pods should be terminal", func(ctx context.Context) { ginkgo.By("creating the pod that should be evicted") name := "pod-should-be-evicted" + string(uuid.NewUUID()) diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index d1c3f94acd8..14f467e0224 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -165,11 +165,11 @@ var _ = SIGDescribe("PreStop", func() { Testname: Pods, prestop hook Description: Create a server pod with a rest endpoint '/write' that changes state.Received field. Create a Pod with a pre-stop handle that posts to the /write endpoint on the server Pod. Verify that the Pod with pre-stop hook is running. Delete the Pod with the pre-stop hook. Before the Pod is deleted, pre-stop handler MUST be called when configured. Verify that the Pod is deleted and a call to prestop hook is verified by checking the status received on the server Pod. */ - framework.ConformanceIt("should call prestop when killing a pod ", func() { + framework.ConformanceIt("should call prestop when killing a pod ", func(ctx context.Context) { testPreStop(f.ClientSet, f.Namespace.Name) }) - ginkgo.It("graceful pod terminated should wait until preStop hook completes the process", func() { + ginkgo.It("graceful pod terminated should wait until preStop hook completes the process", func(ctx context.Context) { gracefulTerminationPeriodSeconds := int64(30) ginkgo.By("creating the pod") name := "pod-prestop-hook-" + string(uuid.NewUUID()) diff --git a/test/e2e/node/runtimeclass.go b/test/e2e/node/runtimeclass.go index 3c4ea5d0abf..8790a3505fd 100644 --- a/test/e2e/node/runtimeclass.go +++ b/test/e2e/node/runtimeclass.go @@ -42,7 +42,7 @@ var _ = SIGDescribe("RuntimeClass", func() { f := framework.NewDefaultFramework("runtimeclass") f.NamespacePodSecurityEnforceLevel = api.LevelBaseline - ginkgo.It("should reject a Pod requesting a RuntimeClass with conflicting node selector", func() { + ginkgo.It("should reject a Pod requesting a RuntimeClass with conflicting node selector", func(ctx context.Context) { labelFooName := "foo-" + string(uuid.NewUUID()) scheduling := &nodev1.Scheduling{ @@ -66,7 +66,7 @@ var _ = SIGDescribe("RuntimeClass", func() { } }) - ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling with taints [Serial] ", func() { + ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling with taints [Serial] ", func(ctx context.Context) { labelFooName := "foo-" + string(uuid.NewUUID()) labelFizzName := "fizz-" + string(uuid.NewUUID()) @@ -127,7 +127,7 @@ var _ = SIGDescribe("RuntimeClass", func() { gomega.Expect(pod.Spec.Tolerations).To(gomega.ContainElement(tolerations[0])) }) - ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling without taints ", func() { + ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling without taints ", func(ctx context.Context) { // Requires special setup of test-handler which is only done in GCE kube-up environment // see https://github.com/kubernetes/kubernetes/blob/eb729620c522753bc7ae61fc2c7b7ea19d4aad2f/cluster/gce/gci/configure-helper.sh#L3069-L3076 e2eskipper.SkipUnlessProviderIs("gce") diff --git a/test/e2e/node/security_context.go b/test/e2e/node/security_context.go index f9295022486..6072c40e0cc 100644 --- a/test/e2e/node/security_context.go +++ b/test/e2e/node/security_context.go @@ -69,7 +69,7 @@ var _ = SIGDescribe("Security Context", func() { f := framework.NewDefaultFramework("security-context") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func() { + ginkgo.It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) pod.Spec.Containers[0].Command = []string{"id", "-G"} pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678} @@ -78,7 +78,7 @@ var _ = SIGDescribe("Security Context", func() { }) ginkgo.When("if the container's primary UID belongs to some groups in the image [LinuxOnly]", func() { - ginkgo.It("should add pod.Spec.SecurityContext.SupplementalGroups to them [LinuxOnly] in resultant supplementary groups for the container processes", func() { + ginkgo.It("should add pod.Spec.SecurityContext.SupplementalGroups to them [LinuxOnly] in resultant supplementary groups for the container processes", func(ctx context.Context) { uidInImage := int64(1000) gidDefinedInImage := int64(50000) supplementalGroup := int64(60000) @@ -108,7 +108,7 @@ var _ = SIGDescribe("Security Context", func() { }) }) - ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func() { + ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) userID := int64(1001) pod.Spec.SecurityContext.RunAsUser = &userID @@ -126,7 +126,7 @@ var _ = SIGDescribe("Security Context", func() { Description: Container is created with runAsUser and runAsGroup option by passing uid 1001 and gid 2002 at pod level. Pod MUST be in Succeeded phase. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. */ - framework.ConformanceIt("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly]", func() { + framework.ConformanceIt("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) userID := int64(1001) groupID := int64(2002) @@ -140,7 +140,7 @@ var _ = SIGDescribe("Security Context", func() { }) }) - ginkgo.It("should support container.SecurityContext.RunAsUser [LinuxOnly]", func() { + ginkgo.It("should support container.SecurityContext.RunAsUser [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) userID := int64(1001) overrideUserID := int64(1002) @@ -161,7 +161,7 @@ var _ = SIGDescribe("Security Context", func() { Description: Container is created with runAsUser and runAsGroup option by passing uid 1001 and gid 2002 at containr level. Pod MUST be in Succeeded phase. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. */ - framework.ConformanceIt("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly]", func() { + framework.ConformanceIt("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) userID := int64(1001) groupID := int64(2001) @@ -180,19 +180,19 @@ var _ = SIGDescribe("Security Context", func() { }) }) - ginkgo.It("should support volume SELinux relabeling [Flaky] [LinuxOnly]", func() { + ginkgo.It("should support volume SELinux relabeling [Flaky] [LinuxOnly]", func(ctx context.Context) { testPodSELinuxLabeling(f, false, false) }) - ginkgo.It("should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]", func() { + ginkgo.It("should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]", func(ctx context.Context) { testPodSELinuxLabeling(f, true, false) }) - ginkgo.It("should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]", func() { + ginkgo.It("should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]", func(ctx context.Context) { testPodSELinuxLabeling(f, false, true) }) - ginkgo.It("should support seccomp unconfined on the container [LinuxOnly]", func() { + ginkgo.It("should support seccomp unconfined on the container [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} @@ -200,21 +200,21 @@ var _ = SIGDescribe("Security Context", func() { e2eoutput.TestContainerOutput(f, "seccomp unconfined container", pod, 0, []string{"0"}) // seccomp disabled }) - ginkgo.It("should support seccomp unconfined on the pod [LinuxOnly]", func() { + ginkgo.It("should support seccomp unconfined on the pod [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} e2eoutput.TestContainerOutput(f, "seccomp unconfined pod", pod, 0, []string{"0"}) // seccomp disabled }) - ginkgo.It("should support seccomp runtime/default [LinuxOnly]", func() { + ginkgo.It("should support seccomp runtime/default [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} e2eoutput.TestContainerOutput(f, "seccomp runtime/default", pod, 0, []string{"2"}) // seccomp filtered }) - ginkgo.It("should support seccomp default which is unconfined [LinuxOnly]", func() { + ginkgo.It("should support seccomp default which is unconfined [LinuxOnly]", func(ctx context.Context) { pod := scTestPod(false, false) pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} e2eoutput.TestContainerOutput(f, "seccomp default unconfined", pod, 0, []string{"0"}) // seccomp disabled diff --git a/test/e2e/node/ssh.go b/test/e2e/node/ssh.go index 964bb8e3d2c..7112172ed90 100644 --- a/test/e2e/node/ssh.go +++ b/test/e2e/node/ssh.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "strings" @@ -44,7 +45,7 @@ var _ = SIGDescribe("SSH", func() { e2eskipper.SkipUnlessSSHKeyPresent() }) - ginkgo.It("should SSH to all nodes and run commands", func() { + ginkgo.It("should SSH to all nodes and run commands", func(ctx context.Context) { // Get all nodes' external IPs. ginkgo.By("Getting all nodes' SSH-able IP addresses") hosts, err := e2essh.NodeSSHHosts(f.ClientSet) diff --git a/test/e2e/node/taints.go b/test/e2e/node/taints.go index f7b4b25e153..9894d577b68 100644 --- a/test/e2e/node/taints.go +++ b/test/e2e/node/taints.go @@ -180,7 +180,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 1. Run a pod // 2. Taint the node running this pod with a no-execute taint // 3. See if pod will get evicted - ginkgo.It("evicts pods from tainted nodes", func() { + ginkgo.It("evicts pods from tainted nodes", func(ctx context.Context) { podName := "taint-eviction-1" pod := createPodForTaintsTest(false, 0, podName, podName, ns) observedDeletions := make(chan string, 100) @@ -212,7 +212,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 1. Run a pod with toleration // 2. Taint the node running this pod with a no-execute taint // 3. See if pod won't get evicted - ginkgo.It("doesn't evict pod with tolerations from tainted nodes", func() { + ginkgo.It("doesn't evict pod with tolerations from tainted nodes", func(ctx context.Context) { podName := "taint-eviction-2" pod := createPodForTaintsTest(true, 0, podName, podName, ns) observedDeletions := make(chan string, 100) @@ -245,7 +245,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 2. Taint the node running this pod with a no-execute taint // 3. See if pod won't get evicted before toleration time runs out // 4. See if pod will get evicted after toleration time runs out - ginkgo.It("eventually evict pod with finite tolerations from tainted nodes", func() { + ginkgo.It("eventually evict pod with finite tolerations from tainted nodes", func(ctx context.Context) { podName := "taint-eviction-3" pod := createPodForTaintsTest(true, kubeletPodDeletionDelaySeconds+2*additionalWaitPerDeleteSeconds, podName, podName, ns) observedDeletions := make(chan string, 100) @@ -290,7 +290,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { Description: The Pod with toleration timeout scheduled on a tainted Node MUST not be evicted if the taint is removed before toleration time ends. */ - framework.ConformanceIt("removing taint cancels eviction [Disruptive]", func() { + framework.ConformanceIt("removing taint cancels eviction [Disruptive]", func(ctx context.Context) { podName := "taint-eviction-4" pod := createPodForTaintsTest(true, 2*additionalWaitPerDeleteSeconds, podName, podName, ns) observedDeletions := make(chan string, 100) @@ -346,7 +346,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { // 2. Taint the node running this pod with a no-execute taint // 3. See if pod will get evicted and has the pod disruption condition // 4. Remove the finalizer so that the pod can be deleted by GC - ginkgo.It("pods evicted from tainted nodes have pod disruption condition", func() { + ginkgo.It("pods evicted from tainted nodes have pod disruption condition", func(ctx context.Context) { podName := "taint-eviction-pod-disruption" pod := createPodForTaintsTest(false, 0, podName, podName, ns) pod.Finalizers = append(pod.Finalizers, testFinalizer) @@ -393,7 +393,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { // 1. Run two pods; one with toleration, one without toleration // 2. Taint the nodes running those pods with a no-execute taint // 3. See if pod-without-toleration get evicted, and pod-with-toleration is kept - ginkgo.It("only evicts pods without tolerations from tainted nodes", func() { + ginkgo.It("only evicts pods without tolerations from tainted nodes", func(ctx context.Context) { podGroup := "taint-eviction-a" observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) @@ -452,7 +452,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { Description: In a multi-pods scenario with tolerationSeconds, the pods MUST be evicted as per the toleration time limit. */ - framework.ConformanceIt("evicts pods with minTolerationSeconds [Disruptive]", func() { + framework.ConformanceIt("evicts pods with minTolerationSeconds [Disruptive]", func(ctx context.Context) { podGroup := "taint-eviction-b" observedDeletions := make(chan string, 100) stopCh := make(chan struct{}) diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index f65c176451e..807de255e0e 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -58,7 +58,7 @@ var _ = SIGDescribe("LimitRange", func() { Testname: LimitRange, resources Description: Creating a Limitrange and verifying the creation of Limitrange, updating the Limitrange and validating the Limitrange. Creating Pods with resources and validate the pod resources are applied to the Limitrange */ - framework.ConformanceIt("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() { + framework.ConformanceIt("should create a LimitRange with defaults and ensure pod has those defaults applied.", func(ctx context.Context) { ginkgo.By("Creating a LimitRange") min := getResourceList("50m", "100Mi", "100Gi") max := getResourceList("500m", "500Mi", "500Gi") @@ -93,7 +93,7 @@ var _ = SIGDescribe("LimitRange", func() { _, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.LimitRange{}) defer w.Stop() - ctx, cancelCtx := context.WithTimeout(context.TODO(), wait.ForeverTestTimeout) + ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) defer cancelCtx() if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { framework.Failf("Timeout while waiting for LimitRange informer to sync") @@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() { the limitRange by collection with a labelSelector it MUST delete only one limitRange. */ - framework.ConformanceIt("should list, patch and delete a LimitRange by collection", func() { + framework.ConformanceIt("should list, patch and delete a LimitRange by collection", func(ctx context.Context) { ns := f.Namespace.Name lrClient := f.ClientSet.CoreV1().LimitRanges(ns) @@ -275,7 +275,7 @@ var _ = SIGDescribe("LimitRange", func() { limitRange2 := &v1.LimitRange{} *limitRange2 = *limitRange - ctx, cancelCtx := context.WithTimeout(context.Background(), wait.ForeverTestTimeout) + ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) defer cancelCtx() ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name)) diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 6e6f8201ceb..d9c0a2040c2 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -224,7 +224,7 @@ func logContainers(f *framework.Framework, pod *v1.Pod) { var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() { f := framework.NewDefaultFramework("device-plugin-gpus") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("run Nvidia GPU Device Plugin tests", func() { + ginkgo.It("run Nvidia GPU Device Plugin tests", func(ctx context.Context) { testNvidiaGPUs(f) }) }) @@ -326,7 +326,7 @@ var _ = SIGDescribe("GPUDevicePluginAcrossRecreate [Feature:Recreate]", func() { }) f := framework.NewDefaultFramework("device-plugin-gpus-recreate") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("run Nvidia GPU Device Plugin tests with a recreation", func() { + ginkgo.It("run Nvidia GPU Device Plugin tests with a recreation", func(ctx context.Context) { testNvidiaGPUsJob(f) }) }) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 7775b77596e..6ac86296a9f 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -124,7 +124,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage resource requests of pods is greater than machines capacity. // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster. - ginkgo.It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func() { + ginkgo.It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func(ctx context.Context) { e2eskipper.SkipUnlessServerVersionGTE(localStorageVersion, f.ClientSet.Discovery()) @@ -273,7 +273,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { cs.NodeV1beta1().RuntimeClasses().Delete(context.TODO(), e2enode.PreconfiguredRuntimeClassHandler, metav1.DeleteOptions{}) }) - ginkgo.It("verify pod overhead is accounted for", func() { + ginkgo.It("verify pod overhead is accounted for", func(ctx context.Context) { if testNodeName == "" { framework.Fail("unable to find a node which can run a pod") } @@ -328,7 +328,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Testname: Scheduler, resource limits Description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity. */ - framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() { + framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func(ctx context.Context) { WaitForStableCluster(cs, workerNodes) nodeMaxAllocatable := int64(0) nodeToAllocatableMap := make(map[string]int64) @@ -440,7 +440,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Testname: Scheduler, node selector not matching Description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled. */ - framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func() { + framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func(ctx context.Context) { ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.") podName := "restricted-pod" @@ -463,7 +463,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Testname: Scheduler, node selector matching Description: Create a label on the node {k: v}. Then create a Pod with a NodeSelector set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches then Pod MUST be scheduled on that node. */ - framework.ConformanceIt("validates that NodeSelector is respected if matching ", func() { + framework.ConformanceIt("validates that NodeSelector is respected if matching ", func(ctx context.Context) { nodeName := GetNodeThatCanRunPod(f) ginkgo.By("Trying to apply a random label on the found node.") @@ -495,7 +495,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // Test Nodes does not have any label, hence it should be impossible to schedule Pod with // non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution. - ginkgo.It("validates that NodeAffinity is respected if not matching", func() { + ginkgo.It("validates that NodeAffinity is respected if not matching", func(ctx context.Context) { ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.") podName := "restricted-pod" @@ -536,7 +536,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // Keep the same steps with the test on NodeSelector, // but specify Affinity in Pod.Spec.Affinity, instead of NodeSelector. - ginkgo.It("validates that required NodeAffinity setting is respected if matching", func() { + ginkgo.It("validates that required NodeAffinity setting is respected if matching", func(ctx context.Context) { nodeName := GetNodeThatCanRunPod(f) ginkgo.By("Trying to apply a random label on the found node.") @@ -584,7 +584,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // 2. Taint the node with a random taint // 3. Try to relaunch the pod with tolerations tolerate the taints on node, // and the pod's nodeName specified to the name of node found in step 1 - ginkgo.It("validates that taints-tolerations is respected if matching", func() { + ginkgo.It("validates that taints-tolerations is respected if matching", func(ctx context.Context) { nodeName := getNodeThatCanRunPodWithoutToleration(f) ginkgo.By("Trying to apply a random taint on the found node.") @@ -627,7 +627,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // 2. Taint the node with a random taint // 3. Try to relaunch the pod still no tolerations, // and the pod's nodeName specified to the name of node found in step 1 - ginkgo.It("validates that taints-tolerations is respected if not matching", func() { + ginkgo.It("validates that taints-tolerations is respected if not matching", func(ctx context.Context) { nodeName := getNodeThatCanRunPodWithoutToleration(f) ginkgo.By("Trying to apply a random taint on the found node.") @@ -662,7 +662,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { verifyResult(cs, 1, 0, ns) }) - ginkgo.It("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() { + ginkgo.It("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func(ctx context.Context) { nodeName := GetNodeThatCanRunPod(f) localhost := "127.0.0.1" @@ -701,7 +701,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { Description: Pods with the same HostPort and Protocol, but different HostIPs, MUST NOT schedule to the same node if one of those IPs is the default HostIP of 0.0.0.0, which represents all IPs on the host. */ - framework.ConformanceIt("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() { + framework.ConformanceIt("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func(ctx context.Context) { nodeName := GetNodeThatCanRunPod(f) hostIP := getNodeHostIP(f, nodeName) // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not @@ -745,7 +745,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } }) - ginkgo.It("validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes", func() { + ginkgo.It("validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes", func(ctx context.Context) { podLabel := "e2e-pts-filter" replicas := 4 rsConfig := pauseRSConfig{ @@ -805,7 +805,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { }) }) - ginkgo.It("validates Pods with non-empty schedulingGates are blocked on scheduling [Feature:PodSchedulingReadiness] [alpha]", func() { + ginkgo.It("validates Pods with non-empty schedulingGates are blocked on scheduling [Feature:PodSchedulingReadiness] [alpha]", func(ctx context.Context) { podLabel := "e2e-scheduling-gates" replicas := 3 ginkgo.By(fmt.Sprintf("Creating a ReplicaSet with replicas=%v, carrying scheduling gates [foo bar]", replicas)) diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 5ffa5b20095..5bff0dafa8f 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -127,7 +127,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { resources is found, the scheduler MUST preempt a lower priority pod and schedule the high priority pod. */ - framework.ConformanceIt("validates basic preemption works", func() { + framework.ConformanceIt("validates basic preemption works", func(ctx context.Context) { var podRes v1.ResourceList // Create two pods per node that uses a lot of the node's resources. @@ -221,7 +221,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { resources is found, the scheduler MUST preempt a lower priority pod to schedule the critical pod. */ - framework.ConformanceIt("validates lower priority pod preemption by critical pod", func() { + framework.ConformanceIt("validates lower priority pod preemption by critical pod", func(ctx context.Context) { var podRes v1.ResourceList ginkgo.By("Create pods that use 4/5 of node resources.") @@ -324,7 +324,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { // 2. Schedule a higher priority pod which also consumes 1/1 of node resources // 3. See if the pod with lower priority is preempted and has the pod disruption condition // 4. Remove the finalizer so that the pod can be deleted by GC - ginkgo.It("validates pod disruption condition is added to the preempted pod", func() { + ginkgo.It("validates pod disruption condition is added to the preempted pod", func(ctx context.Context) { podRes := v1.ResourceList{testExtendedResource: resource.MustParse("1")} ginkgo.By("Select a node to run the lower and higher priority pods") @@ -431,7 +431,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } }) - ginkgo.It("validates proper pods are preempted", func() { + ginkgo.It("validates proper pods are preempted", func(ctx context.Context) { podLabel := "e2e-pts-preemption" nodeAffinity := &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ @@ -621,7 +621,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { Testname: Pod preemption verification Description: Four levels of Pods in ReplicaSets with different levels of Priority, restricted by given CPU limits MUST launch. Priority 1 - 3 Pods MUST spawn first followed by Priority 4 Pod. The ReplicaSets with Replicas MUST contain the expected number of Replicas. */ - framework.ConformanceIt("runs ReplicaSets to verify preemption running path", func() { + framework.ConformanceIt("runs ReplicaSets to verify preemption running path", func(ctx context.Context) { podNamesSeen := []int32{0, 0, 0} stopCh := make(chan struct{}) @@ -811,7 +811,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { either patched or updated it MUST succeed. When any immutable field is either patched or updated it MUST fail. */ - framework.ConformanceIt("verify PriorityClass endpoints can be operated with different HTTP methods", func() { + framework.ConformanceIt("verify PriorityClass endpoints can be operated with different HTTP methods", func(ctx context.Context) { // 1. Patch/Update on immutable fields will fail. pcCopy := pcs[0].DeepCopy() pcCopy.Value = pcCopy.Value * 10 diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 95c4df6593d..55e2f4b7f5c 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -121,7 +121,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { } }) - ginkgo.It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() { + ginkgo.It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func(ctx context.Context) { e2eskipper.SkipUnlessNodeCountIsAtLeast(2) @@ -205,7 +205,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { framework.ExpectNotEqual(labelPod.Spec.NodeName, nodeName) }) - ginkgo.It("Pod should be preferably scheduled to nodes pod can tolerate", func() { + ginkgo.It("Pod should be preferably scheduled to nodes pod can tolerate", func(ctx context.Context) { // make the nodes have balanced cpu,mem usage ratio cleanUp, err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5) defer cleanUp() @@ -285,7 +285,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { } }) - ginkgo.It("validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed", func() { + ginkgo.It("validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed", func(ctx context.Context) { var nodes []v1.Node for _, nodeName := range nodeNames { node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index 933b1382c57..f30d62b9178 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -74,11 +74,11 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() { cleanUp() } }) - ginkgo.It("should spread the pods of a service across zones [Serial]", func() { + ginkgo.It("should spread the pods of a service across zones [Serial]", func(ctx context.Context) { SpreadServiceOrFail(f, 5*zoneCount, zoneNames, imageutils.GetPauseImageName()) }) - ginkgo.It("should spread the pods of a replication controller across zones [Serial]", func() { + ginkgo.It("should spread the pods of a replication controller across zones [Serial]", func(ctx context.Context) { SpreadRCOrFail(f, int32(5*zoneCount), zoneNames, framework.ServeHostnameImage, []string{"serve-hostname"}) }) }) diff --git a/test/e2e/storage/csi_inline.go b/test/e2e/storage/csi_inline.go index 7686be2c474..c5b2cf065ae 100644 --- a/test/e2e/storage/csi_inline.go +++ b/test/e2e/storage/csi_inline.go @@ -43,7 +43,7 @@ var _ = utils.SIGDescribe("CSIInlineVolumes", func() { Description: CSIDriver resources with ephemeral VolumeLifecycleMode should support create, get, list, and delete operations. */ - framework.ConformanceIt("should support ephemeral VolumeLifecycleMode in CSIDriver API", func() { + framework.ConformanceIt("should support ephemeral VolumeLifecycleMode in CSIDriver API", func(ctx context.Context) { // Create client client := f.ClientSet.StorageV1().CSIDrivers() defaultFSGroupPolicy := storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy @@ -128,7 +128,7 @@ var _ = utils.SIGDescribe("CSIInlineVolumes", func() { Description: Pod resources with CSIVolumeSource should support create, get, list, patch, and delete operations. */ - framework.ConformanceIt("should support CSIVolumeSource in Pod API", func() { + framework.ConformanceIt("should support CSIVolumeSource in Pod API", func(ctx context.Context) { // Create client client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index eee80be337d..799e1024a5b 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -389,7 +389,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - ginkgo.It(t.name, func() { + ginkgo.It(t.name, func(ctx context.Context) { var err error init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach}) defer cleanup() @@ -427,7 +427,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { }) ginkgo.Context("CSI CSIDriver deployment after pod creation using non-attachable mock driver", func() { - ginkgo.It("should bringup pod after deploying CSIDriver attach=false [Slow]", func() { + ginkgo.It("should bringup pod after deploying CSIDriver attach=false [Slow]", func(ctx context.Context) { var err error init(testParameters{registerDriver: false, disableAttach: true}) defer cleanup() @@ -586,7 +586,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { }) ginkgo.Context("CSI volume limit information using mock driver", func() { - ginkgo.It("should report attach limit when limit is bigger than 0 [Slow]", func() { + ginkgo.It("should report attach limit when limit is bigger than 0 [Slow]", func(ctx context.Context) { // define volume limit to be 2 for this test var err error init(testParameters{attachLimit: 2}) @@ -617,7 +617,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3) }) - ginkgo.It("should report attach limit for generic ephemeral volume when persistent volume is attached [Slow]", func() { + ginkgo.It("should report attach limit for generic ephemeral volume when persistent volume is attached [Slow]", func(ctx context.Context) { // define volume limit to be 2 for this test var err error init(testParameters{attachLimit: 1}) @@ -642,7 +642,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod2) }) - ginkgo.It("should report attach limit for persistent volume when generic ephemeral volume is attached [Slow]", func() { + ginkgo.It("should report attach limit for persistent volume when generic ephemeral volume is attached [Slow]", func(ctx context.Context) { // define volume limit to be 2 for this test var err error init(testParameters{attachLimit: 1}) @@ -697,7 +697,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - ginkgo.It(t.name, func() { + ginkgo.It(t.name, func(ctx context.Context) { var err error tp := testParameters{ enableResizing: true, @@ -796,7 +796,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - ginkgo.It(test.name, func() { + ginkgo.It(test.name, func(ctx context.Context) { var err error params := testParameters{enableResizing: true, enableNodeExpansion: true} if test.disableAttach { @@ -939,7 +939,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - ginkgo.It(test.name, func() { + ginkgo.It(test.name, func(ctx context.Context) { var hooks *drivers.Hooks if test.nodeStageHook != nil { hooks = createPreHook("NodeStageVolume", test.nodeStageHook) @@ -961,7 +961,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { ginkgo.By("Waiting for expected CSI calls") // Watch for all calls up to deletePod = true - ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout) + ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout) defer cancel() for { if ctx.Err() != nil { @@ -1072,7 +1072,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - ginkgo.It(test.name, func() { + ginkgo.It(test.name, func(ctx context.Context) { // Index of the last deleted pod. NodeUnstage calls are then related to this pod. var deletedPodNumber int64 = 1 var hooks *drivers.Hooks @@ -1191,7 +1191,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { for _, t := range tests { test := t - ginkgo.It(test.name, func() { + ginkgo.It(test.name, func(ctx context.Context) { var err error params := testParameters{ lateBinding: test.lateBinding, @@ -1410,7 +1410,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - ginkgo.It(t.name, func() { + ginkgo.It(t.name, func(ctx context.Context) { scName := "mock-csi-storage-capacity-" + f.UniqueName init(testParameters{ registerDriver: true, @@ -1497,7 +1497,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, test := range tests { test := test - ginkgo.It(test.name, func() { + ginkgo.It(test.name, func(ctx context.Context) { var hooks *drivers.Hooks if test.createSnapshotHook != nil { hooks = createPreHook("CreateSnapshot", test.createSnapshotHook) @@ -1513,7 +1513,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { e2eskipper.Skipf("mock driver %s does not support snapshots -- skipping", m.driver.GetDriverInfo().Name) } - ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout) + ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout) defer cancel() defer cleanup() @@ -1635,7 +1635,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { for _, test := range tests { test := test csiServiceAccountTokenEnabled := test.tokenRequests != nil - ginkgo.It(test.desc, func() { + ginkgo.It(test.desc, func(ctx context.Context) { init(testParameters{ registerDriver: test.deployCSIDriverObject, tokenRequests: test.tokenRequests, @@ -1693,7 +1693,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - ginkgo.It(test.name, func() { + ginkgo.It(test.name, func(ctx context.Context) { if framework.NodeOSDistroIs("windows") { e2eskipper.Skipf("FSGroupPolicy is only applied on linux nodes -- skipping") } @@ -1768,7 +1768,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { t := t - ginkgo.It(t.name, func() { + ginkgo.It(t.name, func(ctx context.Context) { var nodeStageFsGroup, nodePublishFsGroup string if framework.NodeOSDistroIs("windows") { e2eskipper.Skipf("FSGroupPolicy is only applied on linux nodes -- skipping") @@ -1835,7 +1835,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, test := range tests { test := test - ginkgo.It(test.name, func() { + ginkgo.It(test.name, func(ctx context.Context) { hooks := createPreHook("CreateSnapshot", test.createSnapshotHook) init(testParameters{ disableAttach: true, @@ -1926,7 +1926,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, test := range tests { test := test - ginkgo.It(test.name, func() { + ginkgo.It(test.name, func(ctx context.Context) { init(testParameters{ disableAttach: true, registerDriver: true, @@ -2069,7 +2069,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { t := t - ginkgo.It(t.name, func() { + ginkgo.It(t.name, func(ctx context.Context) { if framework.NodeOSDistroIs("windows") { e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping") } diff --git a/test/e2e/storage/csistoragecapacity.go b/test/e2e/storage/csistoragecapacity.go index 495417afaad..44cb76d0176 100644 --- a/test/e2e/storage/csistoragecapacity.go +++ b/test/e2e/storage/csistoragecapacity.go @@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("CSIStorageCapacity", func() { The csistoragecapacities resource MUST exist in the /apis/storage.k8s.io/v1 discovery document. The csistoragecapacities resource must support create, get, list, watch, update, patch, delete, and deletecollection. */ - framework.ConformanceIt(" should support CSIStorageCapacities API operations", func() { + framework.ConformanceIt(" should support CSIStorageCapacities API operations", func(ctx context.Context) { // Setup cscVersion := "v1" cscClient := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name) diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go index bb8d9f319d6..b007f660c79 100644 --- a/test/e2e/storage/detach_mounted.go +++ b/test/e2e/storage/detach_mounted.go @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Detaching volumes", func() { suffix = ns.Name }) - ginkgo.It("should not work when mount is in progress [Slow]", func() { + ginkgo.It("should not work when mount is in progress [Slow]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() driver := "attachable-with-long-mount" diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 5daa9672131..c867f4f6024 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -64,7 +64,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { Testname: EmptyDir Wrapper Volume, Secret and ConfigMap volumes, no conflict Description: Secret volume and ConfigMap volume is created with data. Pod MUST be able to start with Secret and ConfigMap volumes mounted into the container. */ - framework.ConformanceIt("should not conflict", func() { + framework.ConformanceIt("should not conflict", func(ctx context.Context) { name := "emptydir-wrapper-test-" + string(uuid.NewUUID()) volumeName := "secret-volume" volumeMountPath := "/etc/secret-volume" @@ -186,7 +186,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { Testname: EmptyDir Wrapper Volume, ConfigMap volumes, no race Description: Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes mounted. Pod MUST NOT fail waiting for Volumes. */ - framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func() { + framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func(ctx context.Context) { configMapNames := createConfigmapsForRace(f) defer deleteConfigMaps(f, configMapNames) volumes, volumeMounts := makeConfigMapVolumes(configMapNames) @@ -199,7 +199,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { // This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance. // To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. // This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem. - ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func() { + ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func(ctx context.Context) { gitURL, gitRepo, cleanup := createGitServer(f) defer cleanup() volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo) diff --git a/test/e2e/storage/ephemeral_volume.go b/test/e2e/storage/ephemeral_volume.go index 14f055ed010..8cadb232630 100644 --- a/test/e2e/storage/ephemeral_volume.go +++ b/test/e2e/storage/ephemeral_volume.go @@ -55,7 +55,7 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() { ginkgo.Describe("When pod refers to non-existent ephemeral storage", func() { for _, testSource := range invalidEphemeralSource("pod-ephm-test") { testSource := testSource - ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() { + ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func(ctx context.Context) { pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source) pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index cd5c7aac6a2..f8569992d39 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "net" "path" @@ -187,7 +188,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { suffix = ns.Name }) - ginkgo.It("should be mountable when non-attachable", func() { + ginkgo.It("should be mountable when non-attachable", func(ctx context.Context) { driver := "dummy" driverInstallAs := driver + "-" + suffix @@ -205,7 +206,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { uninstallFlex(cs, node, "k8s", driverInstallAs) }) - ginkgo.It("should be mountable when attachable [Feature:Flexvolumes]", func() { + ginkgo.It("should be mountable when attachable [Feature:Flexvolumes]", func(ctx context.Context) { driver := "dummy-attachable" driverInstallAs := driver + "-" + suffix diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 1185a928563..9b7320b76eb 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -112,7 +112,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] }) }) - ginkgo.It("Should verify mounted flex volumes can be resized", func() { + ginkgo.It("Should verify mounted flex volumes can be resized", func(ctx context.Context) { driver := "dummy-attachable" ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) installFlex(c, node, "k8s", driver, path.Join(driverDir, driver)) diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index f14443437f5..cda7a70d4e7 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -107,7 +107,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan }) }) - ginkgo.It("should be resizable when mounted", func() { + ginkgo.It("should be resizable when mounted", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() driver := "dummy-attachable" diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go index 1a55913c4cc..7de7bb4c845 100644 --- a/test/e2e/storage/generic_persistent_volume-disruptive.go +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -78,7 +78,7 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() { }) for _, test := range disruptiveTestTable { func(t disruptiveTest) { - ginkgo.It(t.testItStmt, func() { + ginkgo.It(t.testItStmt, func(ctx context.Context) { ginkgo.By("Executing Spec") t.runTest(c, f, clientPod, e2epod.VolumeMountPath1) }) diff --git a/test/e2e/storage/gke_local_ssd.go b/test/e2e/storage/gke_local_ssd.go index 48ff3e4f784..36987ba33b0 100644 --- a/test/e2e/storage/gke_local_ssd.go +++ b/test/e2e/storage/gke_local_ssd.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "os/exec" @@ -41,7 +42,7 @@ var _ = utils.SIGDescribe("GKE local SSD [Feature:GKELocalSSD]", func() { e2eskipper.SkipUnlessProviderIs("gke") }) - ginkgo.It("should write and read from node local SSD [Feature:GKELocalSSD]", func() { + ginkgo.It("should write and read from node local SSD [Feature:GKELocalSSD]", func(ctx context.Context) { framework.Logf("Start local SSD test") createNodePoolWithLocalSsds("np-ssd") doTestWriteAndReadToLocalSsd(f) diff --git a/test/e2e/storage/host_path_type.go b/test/e2e/storage/host_path_type.go index a14ea7804f5..9369af59497 100644 --- a/test/e2e/storage/host_path_type.go +++ b/test/e2e/storage/host_path_type.go @@ -69,36 +69,36 @@ var _ = utils.SIGDescribe("HostPathType Directory [Slow]", func() { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, &hostPathDirectoryOrCreate) }) - ginkgo.It("Should fail on mounting non-existent directory 'does-not-exist-dir' when HostPathType is HostPathDirectory", func() { + ginkgo.It("Should fail on mounting non-existent directory 'does-not-exist-dir' when HostPathType is HostPathDirectory", func(ctx context.Context) { dirPath := path.Join(hostBaseDir, "does-not-exist-dir") verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, dirPath, fmt.Sprintf("%s is not a directory", dirPath), &hostPathDirectory) }) - ginkgo.It("Should be able to mount directory 'adir' successfully when HostPathType is HostPathDirectory", func() { + ginkgo.It("Should be able to mount directory 'adir' successfully when HostPathType is HostPathDirectory", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, &hostPathDirectory) }) - ginkgo.It("Should be able to mount directory 'adir' successfully when HostPathType is HostPathUnset", func() { + ginkgo.It("Should be able to mount directory 'adir' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, &hostPathUnset) }) - ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathFile", func() { + ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathFile", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, fmt.Sprintf("%s is not a file", targetDir), &hostPathFile) }) - ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathSocket", func() { + ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathSocket", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, fmt.Sprintf("%s is not a socket", targetDir), &hostPathSocket) }) - ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathCharDev", func() { + ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathCharDev", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, fmt.Sprintf("%s is not a character device", targetDir), &hostPathCharDev) }) - ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathBlockDev", func() { + ginkgo.It("Should fail on mounting directory 'adir' when HostPathType is HostPathBlockDev", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetDir, fmt.Sprintf("%s is not a block device", targetDir), &hostPathBlockDev) }) }) @@ -137,36 +137,36 @@ var _ = utils.SIGDescribe("HostPathType File [Slow]", func() { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, &hostPathFileOrCreate) }) - ginkgo.It("Should fail on mounting non-existent file 'does-not-exist-file' when HostPathType is HostPathFile", func() { + ginkgo.It("Should fail on mounting non-existent file 'does-not-exist-file' when HostPathType is HostPathFile", func(ctx context.Context) { filePath := path.Join(hostBaseDir, "does-not-exist-file") verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, filePath, fmt.Sprintf("%s is not a file", filePath), &hostPathFile) }) - ginkgo.It("Should be able to mount file 'afile' successfully when HostPathType is HostPathFile", func() { + ginkgo.It("Should be able to mount file 'afile' successfully when HostPathType is HostPathFile", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, &hostPathFile) }) - ginkgo.It("Should be able to mount file 'afile' successfully when HostPathType is HostPathUnset", func() { + ginkgo.It("Should be able to mount file 'afile' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, &hostPathUnset) }) - ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathDirectory", func() { + ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathDirectory", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, fmt.Sprintf("%s is not a directory", targetFile), &hostPathDirectory) }) - ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathSocket", func() { + ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathSocket", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, fmt.Sprintf("%s is not a socket", targetFile), &hostPathSocket) }) - ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathCharDev", func() { + ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathCharDev", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, fmt.Sprintf("%s is not a character device", targetFile), &hostPathCharDev) }) - ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathBlockDev", func() { + ginkgo.It("Should fail on mounting file 'afile' when HostPathType is HostPathBlockDev", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetFile, fmt.Sprintf("%s is not a block device", targetFile), &hostPathBlockDev) }) @@ -203,36 +203,36 @@ var _ = utils.SIGDescribe("HostPathType Socket [Slow]", func() { targetSocket = path.Join(hostBaseDir, "asocket") }) - ginkgo.It("Should fail on mounting non-existent socket 'does-not-exist-socket' when HostPathType is HostPathSocket", func() { + ginkgo.It("Should fail on mounting non-existent socket 'does-not-exist-socket' when HostPathType is HostPathSocket", func(ctx context.Context) { socketPath := path.Join(hostBaseDir, "does-not-exist-socket") verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, socketPath, fmt.Sprintf("%s is not a socket", socketPath), &hostPathSocket) }) - ginkgo.It("Should be able to mount socket 'asocket' successfully when HostPathType is HostPathSocket", func() { + ginkgo.It("Should be able to mount socket 'asocket' successfully when HostPathType is HostPathSocket", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, &hostPathSocket) }) - ginkgo.It("Should be able to mount socket 'asocket' successfully when HostPathType is HostPathUnset", func() { + ginkgo.It("Should be able to mount socket 'asocket' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, &hostPathUnset) }) - ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathDirectory", func() { + ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathDirectory", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, fmt.Sprintf("%s is not a directory", targetSocket), &hostPathDirectory) }) - ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathFile", func() { + ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathFile", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, fmt.Sprintf("%s is not a file", targetSocket), &hostPathFile) }) - ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathCharDev", func() { + ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathCharDev", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, fmt.Sprintf("%s is not a character device", targetSocket), &hostPathCharDev) }) - ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathBlockDev", func() { + ginkgo.It("Should fail on mounting socket 'asocket' when HostPathType is HostPathBlockDev", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetSocket, fmt.Sprintf("%s is not a block device", targetSocket), &hostPathBlockDev) }) @@ -273,36 +273,36 @@ var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() { framework.ExpectNoError(err, "command: %q, stdout: %s\nstderr: %s", cmd, stdout, stderr) }) - ginkgo.It("Should fail on mounting non-existent character device 'does-not-exist-char-dev' when HostPathType is HostPathCharDev", func() { + ginkgo.It("Should fail on mounting non-existent character device 'does-not-exist-char-dev' when HostPathType is HostPathCharDev", func(ctx context.Context) { charDevPath := path.Join(hostBaseDir, "does-not-exist-char-dev") verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, charDevPath, fmt.Sprintf("%s is not a character device", charDevPath), &hostPathCharDev) }) - ginkgo.It("Should be able to mount character device 'achardev' successfully when HostPathType is HostPathCharDev", func() { + ginkgo.It("Should be able to mount character device 'achardev' successfully when HostPathType is HostPathCharDev", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, &hostPathCharDev) }) - ginkgo.It("Should be able to mount character device 'achardev' successfully when HostPathType is HostPathUnset", func() { + ginkgo.It("Should be able to mount character device 'achardev' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, &hostPathUnset) }) - ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathDirectory", func() { + ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathDirectory", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, fmt.Sprintf("%s is not a directory", targetCharDev), &hostPathDirectory) }) - ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathFile", func() { + ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathFile", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, fmt.Sprintf("%s is not a file", targetCharDev), &hostPathFile) }) - ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathSocket", func() { + ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathSocket", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, fmt.Sprintf("%s is not a socket", targetCharDev), &hostPathSocket) }) - ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathBlockDev", func() { + ginkgo.It("Should fail on mounting character device 'achardev' when HostPathType is HostPathBlockDev", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetCharDev, fmt.Sprintf("%s is not a block device", targetCharDev), &hostPathBlockDev) }) @@ -343,36 +343,36 @@ var _ = utils.SIGDescribe("HostPathType Block Device [Slow]", func() { framework.ExpectNoError(err, "command %q: stdout: %s\nstderr: %s", cmd, stdout, stderr) }) - ginkgo.It("Should fail on mounting non-existent block device 'does-not-exist-blk-dev' when HostPathType is HostPathBlockDev", func() { + ginkgo.It("Should fail on mounting non-existent block device 'does-not-exist-blk-dev' when HostPathType is HostPathBlockDev", func(ctx context.Context) { blkDevPath := path.Join(hostBaseDir, "does-not-exist-blk-dev") verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, blkDevPath, fmt.Sprintf("%s is not a block device", blkDevPath), &hostPathBlockDev) }) - ginkgo.It("Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathBlockDev", func() { + ginkgo.It("Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathBlockDev", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, &hostPathBlockDev) }) - ginkgo.It("Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathUnset", func() { + ginkgo.It("Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathUnset", func(ctx context.Context) { verifyPodHostPathType(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, &hostPathUnset) }) - ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathDirectory", func() { + ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathDirectory", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, fmt.Sprintf("%s is not a directory", targetBlockDev), &hostPathDirectory) }) - ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathFile", func() { + ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathFile", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, fmt.Sprintf("%s is not a file", targetBlockDev), &hostPathFile) }) - ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathSocket", func() { + ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathSocket", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, fmt.Sprintf("%s is not a socket", targetBlockDev), &hostPathSocket) }) - ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathCharDev", func() { + ginkgo.It("Should fail on mounting block device 'ablkdev' when HostPathType is HostPathCharDev", func(ctx context.Context) { verifyPodHostPathTypeFailure(f, map[string]string{"kubernetes.io/hostname": basePod.Spec.NodeName}, targetBlockDev, fmt.Sprintf("%s is not a character device", targetBlockDev), &hostPathCharDev) }) diff --git a/test/e2e/storage/local_volume_resize.go b/test/e2e/storage/local_volume_resize.go index d8107054afd..b66e61588c1 100644 --- a/test/e2e/storage/local_volume_resize.go +++ b/test/e2e/storage/local_volume_resize.go @@ -82,7 +82,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() { cleanupStorageClass(config) }) - ginkgo.It("should support online expansion on node", func() { + ginkgo.It("should support online expansion on node", func(ctx context.Context) { var ( pod1 *v1.Pod pod1Err error diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index f837d98723a..f7efa1b7d5a 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun }) }) - ginkgo.It("Should verify mounted devices can be resized", func() { + ginkgo.It("Should verify mounted devices can be resized", func(ctx context.Context) { pvcClaims := []*v1.PersistentVolumeClaim{pvc} // The reason we use a node selector is because we do not want pod to move to different node when pod is deleted. diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 66314d67392..7cf657030cd 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -208,7 +208,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { } }) - ginkgo.It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func() { + ginkgo.It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() ginkgo.By("Deleting PVC for volume 2") @@ -270,7 +270,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { // Test loop executes each disruptiveTest iteratively. for _, test := range disruptiveTestTable { func(t disruptiveTest) { - ginkgo.It(t.testItStmt, func() { + ginkgo.It(t.testItStmt, func(ctx context.Context) { ginkgo.By("Executing Spec") t.runTest(c, f, clientPod, e2epod.VolumeMountPath1) }) diff --git a/test/e2e/storage/non_graceful_node_shutdown.go b/test/e2e/storage/non_graceful_node_shutdown.go index d6832b9774e..b3fec272400 100644 --- a/test/e2e/storage/non_graceful_node_shutdown.go +++ b/test/e2e/storage/non_graceful_node_shutdown.go @@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [ }) ginkgo.Describe("[NonGracefulNodeShutdown] pod that uses a persistent volume via gce pd driver", func() { - ginkgo.It("should get immediately rescheduled to a different node after non graceful node shutdown ", func() { + ginkgo.It("should get immediately rescheduled to a different node after non graceful node shutdown ", func(ctx context.Context) { // Install gce pd csi driver ginkgo.By("deploying csi gce-pd driver") driver := drivers.InitGcePDCSIDriver() diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 58c4afdaf08..81015ab0461 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -134,7 +134,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { readOnly := t.readOnly readOnlyTxt := readOnlyMap[readOnly] - ginkgo.It(fmt.Sprintf("for %s PD with pod delete grace period of %q", readOnlyTxt, t.descr), func() { + ginkgo.It(fmt.Sprintf("for %s PD with pod delete grace period of %q", readOnlyTxt, t.descr), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") if readOnly { e2eskipper.SkipIfProviderIs("aws") @@ -254,7 +254,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { numContainers := t.numContainers t := t - ginkgo.It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func() { + ginkgo.It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") var host0Pod *v1.Pod var err error @@ -348,7 +348,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { for _, t := range tests { disruptOp := t.disruptOp - ginkgo.It(fmt.Sprintf("when %s", t.descr), func() { + ginkgo.It(fmt.Sprintf("when %s", t.descr), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce") origNodeCnt := len(nodes.Items) // healhy nodes running kubelet @@ -449,7 +449,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { } }) - ginkgo.It("should be able to delete a non-existent PD without error", func() { + ginkgo.It("should be able to delete a non-existent PD without error", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce") ginkgo.By("delete a PD") @@ -458,7 +458,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { // This test is marked to run as serial so as device selection on AWS does not // conflict with other concurrent attach operations. - ginkgo.It("[Serial] attach on previously attached volumes should work", func() { + ginkgo.It("[Serial] attach on previously attached volumes should work", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") ginkgo.By("creating PD") diskName, err := e2epv.CreatePDWithRetry() diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index 7eeed91b043..f0f8ed6c1ed 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -126,7 +126,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f // Attach a persistent disk to a pod using a PVC. // Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete. - ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() { + ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func(ctx context.Context) { ginkgo.By("Deleting the Claim") framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name) @@ -141,7 +141,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f // Attach a persistent disk to a pod using a PVC. // Delete the PV and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete. - ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() { + ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func(ctx context.Context) { ginkgo.By("Deleting the Persistent Volume") framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) @@ -155,7 +155,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", f }) // Test that a Pod and PVC attached to a GCEPD successfully unmounts and detaches when the encompassing Namespace is deleted. - ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk", func() { + ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk", func(ctx context.Context) { ginkgo.By("Deleting the Namespace") err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 33a232ba1d6..7b2c0ee0a21 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -232,13 +232,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name) }) - ginkgo.It("should be able to mount volume and read from pod1", func() { + ginkgo.It("should be able to mount volume and read from pod1", func(ctx context.Context) { ginkgo.By("Reading in pod1") // testFileContent was written in BeforeEach testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVolType) }) - ginkgo.It("should be able to mount volume and write from pod1", func() { + ginkgo.It("should be able to mount volume and write from pod1", func(ctx context.Context) { // testFileContent was written in BeforeEach testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVolType) @@ -249,13 +249,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) ginkgo.Context("Two pods mounting a local volume at the same time", func() { - ginkgo.It("should be able to write from pod1 and read from pod2", func() { + ginkgo.It("should be able to write from pod1 and read from pod2", func(ctx context.Context) { twoPodsReadWriteTest(f, config, testVol) }) }) ginkgo.Context("Two pods mounting a local volume one after the other", func() { - ginkgo.It("should be able to write from pod1 and read from pod2", func() { + ginkgo.It("should be able to write from pod1 and read from pod2", func(ctx context.Context) { twoPodsReadWriteSerialTest(f, config, testVol) }) }) @@ -267,14 +267,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { } }) - ginkgo.It("should set fsGroup for one pod [Slow]", func() { + ginkgo.It("should set fsGroup for one pod [Slow]", func(ctx context.Context) { ginkgo.By("Checking fsGroup is set") pod := createPodWithFsGroupTest(config, testVol, 1234, 1234) ginkgo.By("Deleting pod") e2epod.DeletePodOrFail(config.client, config.ns, pod.Name) }) - ginkgo.It("should set same fsGroup for two pods simultaneously [Slow]", func() { + ginkgo.It("should set same fsGroup for two pods simultaneously [Slow]", func(ctx context.Context) { fsGroup := int64(1234) ginkgo.By("Create first pod and check fsGroup is set") pod1 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup) @@ -286,7 +286,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name) }) - ginkgo.It("should set different fsGroup for second pod if first pod is deleted [Flaky]", func() { + ginkgo.It("should set different fsGroup for second pod if first pod is deleted [Flaky]", func(ctx context.Context) { // TODO: Disabled temporarily, remove [Flaky] tag after #73168 is fixed. fsGroup1, fsGroup2 := int64(1234), int64(4321) ginkgo.By("Create first pod and check fsGroup is set") @@ -307,7 +307,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.Context("Local volume that cannot be mounted [Slow]", func() { // TODO: // - check for these errors in unit tests instead - ginkgo.It("should fail due to non-existent path", func() { + ginkgo.It("should fail due to non-existent path", func(ctx context.Context) { testVol := &localTestVolume{ ltr: &utils.LocalTestResource{ Node: config.randomNode, @@ -324,7 +324,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { cleanupLocalPVCsPVs(config, []*localTestVolume{testVol}) }) - ginkgo.It("should fail due to wrong node", func() { + ginkgo.It("should fail due to wrong node", func(ctx context.Context) { if len(config.nodes) < 2 { e2eskipper.Skipf("Runs only when number of nodes >= 2") } @@ -375,11 +375,11 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { cleanupStorageClass(config) }) - ginkgo.It("should fail scheduling due to different NodeAffinity", func() { + ginkgo.It("should fail scheduling due to different NodeAffinity", func(ctx context.Context) { testPodWithNodeConflict(config, testVol, conflictNodeName, makeLocalPodWithNodeAffinity) }) - ginkgo.It("should fail scheduling due to different NodeSelector", func() { + ginkgo.It("should fail scheduling due to different NodeSelector", func(ctx context.Context) { testPodWithNodeConflict(config, testVol, conflictNodeName, makeLocalPodWithNodeSelector) }) }) @@ -410,7 +410,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { cleanupStorageClass(config) }) - ginkgo.It("should use volumes spread across nodes when pod has anti-affinity", func() { + ginkgo.It("should use volumes spread across nodes when pod has anti-affinity", func(ctx context.Context) { if len(config.nodes) < ssReplicas { e2eskipper.Skipf("Runs only when number of nodes >= %v", ssReplicas) } @@ -419,13 +419,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { validateStatefulSet(config, ss, true) }) - ginkgo.It("should use volumes on one node when pod has affinity", func() { + ginkgo.It("should use volumes on one node when pod has affinity", func(ctx context.Context) { ginkgo.By("Creating a StatefulSet with pod affinity on nodes") ss := createStatefulSet(config, ssReplicas, volsPerNode/ssReplicas, false, false) validateStatefulSet(config, ss, false) }) - ginkgo.It("should use volumes spread across nodes when pod management is parallel and pod has anti-affinity", func() { + ginkgo.It("should use volumes spread across nodes when pod management is parallel and pod has anti-affinity", func(ctx context.Context) { if len(config.nodes) < ssReplicas { e2eskipper.Skipf("Runs only when number of nodes >= %v", ssReplicas) } @@ -434,7 +434,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { validateStatefulSet(config, ss, true) }) - ginkgo.It("should use volumes on one node when pod management is parallel and pod has affinity", func() { + ginkgo.It("should use volumes on one node when pod management is parallel and pod has affinity", func(ctx context.Context) { ginkgo.By("Creating a StatefulSet with pod affinity on nodes") ss := createStatefulSet(config, ssReplicas, 1, false, true) validateStatefulSet(config, ss, false) @@ -531,7 +531,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { cleanupStorageClass(config) }) - ginkgo.It("should be able to process many pods and reuse local volumes", func() { + ginkgo.It("should be able to process many pods and reuse local volumes", func(ctx context.Context) { var ( podsLock sync.Mutex // Have one extra pod pending @@ -653,7 +653,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { framework.ExpectNoError(err) }) - ginkgo.It("all pods should be running", func() { + ginkgo.It("all pods should be running", func(ctx context.Context) { var ( pvc *v1.PersistentVolumeClaim pods = map[string]*v1.Pod{} diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index c3276656d3f..8312175cc23 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -169,7 +169,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create an nfs PV, then a claim that matches the PV, and a pod that // contains the claim. Verify that the PV and PVC bind correctly, and // that the pod can write to the nfs volume. - ginkgo.It("should create a non-pre-bound PV and PVC: test write access ", func() { + ginkgo.It("should create a non-pre-bound PV and PVC: test write access ", func(ctx context.Context) { pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) @@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create a claim first, then a nfs PV that matches the claim, and a // pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. - ginkgo.It("create a PVC and non-pre-bound PV: test write access", func() { + ginkgo.It("create a PVC and non-pre-bound PV: test write access", func(ctx context.Context) { pv, pvc, err = e2epv.CreatePVCPV(c, f.Timeouts, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) @@ -187,7 +187,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create a claim first, then a pre-bound nfs PV that matches the claim, // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. - ginkgo.It("create a PVC and a pre-bound PV: test write access", func() { + ginkgo.It("create a PVC and a pre-bound PV: test write access", func(ctx context.Context) { pv, pvc, err = e2epv.CreatePVCPV(c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) @@ -196,7 +196,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create a nfs PV first, then a pre-bound PVC that matches the PV, // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. - ginkgo.It("create a PV and a pre-bound PVC: test write access", func() { + ginkgo.It("create a PV and a pre-bound PVC: test write access", func(ctx context.Context) { pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) @@ -233,7 +233,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create 2 PVs and 4 PVCs. // Note: PVs are created before claims and no pre-binding - ginkgo.It("should create 2 PVs and 4 PVCs: test write access", func() { + ginkgo.It("should create 2 PVs and 4 PVCs: test write access", func(ctx context.Context) { numPVs, numPVCs := 2, 4 pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) @@ -243,7 +243,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create 3 PVs and 3 PVCs. // Note: PVs are created before claims and no pre-binding - ginkgo.It("should create 3 PVs and 3 PVCs: test write access", func() { + ginkgo.It("should create 3 PVs and 3 PVCs: test write access", func(ctx context.Context) { numPVs, numPVCs := 3, 3 pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) @@ -253,7 +253,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create 4 PVs and 2 PVCs. // Note: PVs are created before claims and no pre-binding. - ginkgo.It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() { + ginkgo.It("should create 4 PVs and 2 PVCs: test write access [Slow]", func(ctx context.Context) { numPVs, numPVCs := 4, 2 pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) @@ -283,7 +283,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // This ginkgo.It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked // for files. If files are found, the checking Pod fails, failing the test. Otherwise, the pod // (and test) succeed. - ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() { + ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func(ctx context.Context) { ginkgo.By("Writing to the volume.") pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')") pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) @@ -331,7 +331,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { e2estatefulset.DeleteAllStatefulSets(c, ns) }) - ginkgo.It("should be reschedulable [Slow]", func() { + ginkgo.It("should be reschedulable [Slow]", func(ctx context.Context) { // Only run on providers with default storageclass e2epv.SkipIfNoDefaultStorageClass(c) diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 9c63715c688..6a2210941a7 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -99,7 +99,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { } }) - ginkgo.It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() { + ginkgo.It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func(ctx context.Context) { ginkgo.By("Deleting the PV") err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") @@ -107,7 +107,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { framework.ExpectNoError(err, "waiting for PV to be deleted") }) - ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func() { + ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func(ctx context.Context) { ginkgo.By("Creating a PVC") pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, nameSpace) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index bf69c49f8c2..ec8d682e7de 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -114,7 +114,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { } }) - ginkgo.It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() { + ginkgo.It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func(ctx context.Context) { ginkgo.By("Deleting the pod using the PVC") err = e2epod.DeletePodWithWait(client, pod) framework.ExpectNoError(err, "Error terminating and deleting pod") @@ -126,7 +126,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { pvcCreatedAndNotDeleted = false }) - ginkgo.It("Verify that PVC in active use by a pod is not removed immediately", func() { + ginkgo.It("Verify that PVC in active use by a pod is not removed immediately", func(ctx context.Context) { ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") @@ -145,7 +145,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { pvcCreatedAndNotDeleted = false }) - ginkgo.It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() { + ginkgo.It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func(ctx context.Context) { ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, *metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 29623d99aba..b3a842dc703 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Regional PD", func() { testRegionalAllowedTopologiesWithDelayedBinding(ctx, c, ns, 3 /* pvcCount */) }) - ginkgo.It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() { + ginkgo.It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func(ctx context.Context) { testZonalFailover(c, ns) }) }) diff --git a/test/e2e/storage/subpath.go b/test/e2e/storage/subpath.go index 7aa837e0461..eca7b7851c2 100644 --- a/test/e2e/storage/subpath.go +++ b/test/e2e/storage/subpath.go @@ -57,7 +57,7 @@ var _ = utils.SIGDescribe("Subpath", func() { Testname: SubPath: Reading content from a secret volume. Description: Containers in a pod can read content from a secret mounted volume which was configured with a subpath. */ - framework.ConformanceIt("should support subpaths with secret pod", func() { + framework.ConformanceIt("should support subpaths with secret pod", func(ctx context.Context) { pod := testsuites.SubpathTestPod(f, "secret-key", "secret", &v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}}, privilegedSecurityContext) testsuites.TestBasicSubpath(f, "secret-value", pod) }) @@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("Subpath", func() { Testname: SubPath: Reading content from a configmap volume. Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath. */ - framework.ConformanceIt("should support subpaths with configmap pod", func() { + framework.ConformanceIt("should support subpaths with configmap pod", func(ctx context.Context) { pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) testsuites.TestBasicSubpath(f, "configmap-value", pod) }) @@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("Subpath", func() { Testname: SubPath: Reading content from a configmap volume. Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath and also using a mountpath that is a specific file. */ - framework.ConformanceIt("should support subpaths with configmap pod with mountPath of existing file", func() { + framework.ConformanceIt("should support subpaths with configmap pod with mountPath of existing file", func(ctx context.Context) { pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) file := "/etc/resolv.conf" pod.Spec.Containers[0].VolumeMounts[0].MountPath = file @@ -89,7 +89,7 @@ var _ = utils.SIGDescribe("Subpath", func() { Testname: SubPath: Reading content from a downwardAPI volume. Description: Containers in a pod can read content from a downwardAPI mounted volume which was configured with a subpath. */ - framework.ConformanceIt("should support subpaths with downward pod", func() { + framework.ConformanceIt("should support subpaths with downward pod", func(ctx context.Context) { pod := testsuites.SubpathTestPod(f, "downward/podname", "downwardAPI", &v1.VolumeSource{ DownwardAPI: &v1.DownwardAPIVolumeSource{ Items: []v1.DownwardAPIVolumeFile{{Path: "downward/podname", FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}}, @@ -103,7 +103,7 @@ var _ = utils.SIGDescribe("Subpath", func() { Testname: SubPath: Reading content from a projected volume. Description: Containers in a pod can read content from a projected mounted volume which was configured with a subpath. */ - framework.ConformanceIt("should support subpaths with projected pod", func() { + framework.ConformanceIt("should support subpaths with projected pod", func(ctx context.Context) { pod := testsuites.SubpathTestPod(f, "projected/configmap-key", "projected", &v1.VolumeSource{ Projected: &v1.ProjectedVolumeSource{ Sources: []v1.VolumeProjection{ @@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Subpath", func() { }) ginkgo.Context("Container restart", func() { - ginkgo.It("should verify that container can restart successfully after configmaps modified", func() { + ginkgo.It("should verify that container can restart successfully after configmaps modified", func(ctx context.Context) { configmapToModify := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap-to-modify"}, Data: map[string]string{"configmap-key": "configmap-value"}} configmapModified := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap-to-modify"}, Data: map[string]string{"configmap-key": "configmap-modified-value"}} testsuites.TestPodContainerRestartWithConfigmapModified(f, configmapToModify, configmapModified) diff --git a/test/e2e/storage/testsuites/disruptive.go b/test/e2e/storage/testsuites/disruptive.go index 2b7bd26dc81..02731bc69a3 100644 --- a/test/e2e/storage/testsuites/disruptive.go +++ b/test/e2e/storage/testsuites/disruptive.go @@ -17,6 +17,8 @@ limitations under the License. package testsuites import ( + "context" + "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/errors" @@ -160,7 +162,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa func(t singlePodTest) { if (pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil) || (pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) { - ginkgo.It(t.testItStmt, func() { + ginkgo.It(t.testItStmt, func(ctx context.Context) { init(nil) defer cleanup() @@ -232,7 +234,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa for _, test := range multiplePodTests { func(t multiplePodTest) { if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil { - ginkgo.It(t.testItStmt, func() { + ginkgo.It(t.testItStmt, func(ctx context.Context) { init([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}) defer cleanup() diff --git a/test/e2e/storage/testsuites/fsgroupchangepolicy.go b/test/e2e/storage/testsuites/fsgroupchangepolicy.go index 542264119a6..27ebef60ce5 100644 --- a/test/e2e/storage/testsuites/fsgroupchangepolicy.go +++ b/test/e2e/storage/testsuites/fsgroupchangepolicy.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "strconv" @@ -207,7 +208,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD for _, t := range tests { test := t testCaseName := fmt.Sprintf("(%s)[LinuxOnly], %s", test.podfsGroupChangePolicy, test.name) - ginkgo.It(testCaseName, func() { + ginkgo.It(testCaseName, func(ctx context.Context) { dInfo := driver.GetDriverInfo() policy := v1.PodFSGroupChangePolicy(test.podfsGroupChangePolicy) diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 3a96a391558..09f14b8d8aa 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -132,7 +132,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] ==> [ node1 ] // / \ <- same volume mode / \ // [volume1] [volume2] [volume1] [volume2] - ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on the same node", func() { + ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on the same node", func(ctx context.Context) { // Currently, multiple volumes are not generally available for pre-provisoined volume, // because containerized storage servers, such as iSCSI and rbd, are just returning // a static volume inside container, not actually creating a new volume per request. @@ -162,7 +162,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] ==> [ node2 ] // / \ <- same volume mode / \ // [volume1] [volume2] [volume1] [volume2] - ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on different node", func() { + ginkgo.It("should access to two volumes with the same volume mode and retain data across pod recreation on different node", func(ctx context.Context) { // Currently, multiple volumes are not generally available for pre-provisoined volume, // because containerized storage servers, such as iSCSI and rbd, are just returning // a static volume inside container, not actually creating a new volume per request. @@ -203,7 +203,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] ==> [ node1 ] // / \ <- different volume mode / \ // [volume1] [volume2] [volume1] [volume2] - ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on the same node", func() { + ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on the same node", func(ctx context.Context) { if pattern.VolMode == v1.PersistentVolumeFilesystem { e2eskipper.Skipf("Filesystem volume case should be covered by block volume case -- skipping") } @@ -242,7 +242,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] ==> [ node2 ] // / \ <- different volume mode / \ // [volume1] [volume2] [volume1] [volume2] - ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on different node", func() { + ginkgo.It("should access to two volumes with different volume mode and retain data across pod recreation on different node", func(ctx context.Context) { if pattern.VolMode == v1.PersistentVolumeFilesystem { e2eskipper.Skipf("Filesystem volume case should be covered by block volume case -- skipping") } @@ -292,7 +292,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] // \ / <- same volume mode // [volume1] - ginkgo.It("should concurrently access the single volume from pods on the same node", func() { + ginkgo.It("should concurrently access the single volume from pods on the same node", func(ctx context.Context) { init() defer cleanup() @@ -415,7 +415,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] // \ / <- same volume mode (read only) // [volume1] - ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func() { + ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func(ctx context.Context) { init() defer cleanup() @@ -447,7 +447,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] [ node2 ] // \ / <- same volume mode // [volume1] - ginkgo.It("should concurrently access the single volume from pods on different node", func() { + ginkgo.It("should concurrently access the single volume from pods on different node", func(ctx context.Context) { init() defer cleanup() diff --git a/test/e2e/storage/testsuites/readwriteoncepod.go b/test/e2e/storage/testsuites/readwriteoncepod.go index 85124d50ef6..9fb6c840650 100644 --- a/test/e2e/storage/testsuites/readwriteoncepod.go +++ b/test/e2e/storage/testsuites/readwriteoncepod.go @@ -121,7 +121,7 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv ginkgo.DeferCleanup(cleanup) }) - ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume", func() { + ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume", func(ctx context.Context) { // Create the ReadWriteOncePod PVC. accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod} l.volume = storageframework.CreateVolumeResourceWithAccessModes(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes) @@ -157,7 +157,7 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv framework.ExpectNoError(err, "failed to wait for pod2 running status") }) - ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume on the same node", func() { + ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume on the same node", func(ctx context.Context) { // Create the ReadWriteOncePod PVC. accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod} l.volume = storageframework.CreateVolumeResourceWithAccessModes(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes) diff --git a/test/e2e/storage/testsuites/snapshottable_stress.go b/test/e2e/storage/testsuites/snapshottable_stress.go index e56405b1424..9e99ca0b242 100644 --- a/test/e2e/storage/testsuites/snapshottable_stress.go +++ b/test/e2e/storage/testsuites/snapshottable_stress.go @@ -251,7 +251,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD createPodsAndVolumes() }) - ginkgo.It("should support snapshotting of many volumes repeatedly [Slow] [Serial]", func() { + ginkgo.It("should support snapshotting of many volumes repeatedly [Slow] [Serial]", func(ctx context.Context) { // Repeatedly create and delete snapshots of each volume. for i := 0; i < stressTest.testOptions.NumPods; i++ { for j := 0; j < stressTest.testOptions.NumSnapshots; j++ { diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index fa0fd98beec..9cb4255b344 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -191,7 +191,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte driverName := driver.GetDriverInfo().Name - ginkgo.It("should support non-existent path", func() { + ginkgo.It("should support non-existent path", func(ctx context.Context) { init() defer cleanup() @@ -202,7 +202,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testReadFile(f, l.filePathInVolume, l.pod, 1) }) - ginkgo.It("should support existing directory", func() { + ginkgo.It("should support existing directory", func(ctx context.Context) { init() defer cleanup() @@ -216,7 +216,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testReadFile(f, l.filePathInVolume, l.pod, 1) }) - ginkgo.It("should support existing single file [LinuxOnly]", func() { + ginkgo.It("should support existing single file [LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -227,7 +227,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testReadFile(f, l.filePathInSubpath, l.pod, 0) }) - ginkgo.It("should support file as subpath [LinuxOnly]", func() { + ginkgo.It("should support file as subpath [LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -237,7 +237,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte TestBasicSubpath(f, f.Namespace.Name, l.pod) }) - ginkgo.It("should fail if subpath directory is outside the volume [Slow][LinuxOnly]", func() { + ginkgo.It("should fail if subpath directory is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -253,7 +253,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodFailSubpath(f, l.pod, false) }) - ginkgo.It("should fail if subpath file is outside the volume [Slow][LinuxOnly]", func() { + ginkgo.It("should fail if subpath file is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -264,7 +264,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodFailSubpath(f, l.pod, false) }) - ginkgo.It("should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]", func() { + ginkgo.It("should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -275,7 +275,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodFailSubpath(f, l.pod, false) }) - ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]", func() { + ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -291,7 +291,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodFailSubpath(f, l.pod, false) }) - ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func() { + ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func(ctx context.Context) { init() defer cleanup() @@ -317,7 +317,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testMultipleReads(f, l.pod, 0, filepath1, filepath2) }) - ginkgo.It("should support restarting containers using directory as subpath [Slow]", func() { + ginkgo.It("should support restarting containers using directory as subpath [Slow]", func(ctx context.Context) { init() defer cleanup() @@ -328,7 +328,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodContainerRestart(f, l.pod) }) - ginkgo.It("should support restarting containers using file as subpath [Slow][LinuxOnly]", func() { + ginkgo.It("should support restarting containers using file as subpath [Slow][LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -338,7 +338,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodContainerRestart(f, l.pod) }) - ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func() { + ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -350,7 +350,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testSubpathReconstruction(f, l.hostExec, l.pod, false) }) - ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func() { + ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -362,7 +362,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testSubpathReconstruction(f, l.hostExec, l.pod, true) }) - ginkgo.It("should support readOnly directory specified in the volumeMount", func() { + ginkgo.It("should support readOnly directory specified in the volumeMount", func(ctx context.Context) { init() defer cleanup() @@ -377,7 +377,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testReadFile(f, l.filePathInSubpath, l.pod, 0) }) - ginkgo.It("should support readOnly file specified in the volumeMount [LinuxOnly]", func() { + ginkgo.It("should support readOnly file specified in the volumeMount [LinuxOnly]", func(ctx context.Context) { init() defer cleanup() @@ -392,7 +392,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testReadFile(f, volumePath, l.pod, 0) }) - ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func() { + ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func(ctx context.Context) { init() defer cleanup() if l.roVolSource == nil { @@ -420,7 +420,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testReadFile(f, l.filePathInSubpath, l.pod, 0) }) - ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func() { + ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func(ctx context.Context) { init() defer cleanup() if l.roVolSource == nil { @@ -442,7 +442,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte // Set this test linux-only because the test will fail in Windows when // deleting a dir from one container while another container still use it. - ginkgo.It("should be able to unmount after the subpath directory is deleted [LinuxOnly]", func() { + ginkgo.It("should be able to unmount after the subpath directory is deleted [LinuxOnly]", func(ctx context.Context) { init() defer cleanup() diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index 39120d53fae..3c09df83195 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -160,7 +160,7 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt l.migrationCheck.validateMigrationVolumeOpCounts() } - ginkgo.It("should provision a volume and schedule a pod with AllowedTopologies", func() { + ginkgo.It("should provision a volume and schedule a pod with AllowedTopologies", func(ctx context.Context) { l := init() defer func() { cleanup(l) @@ -188,7 +188,7 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt t.verifyNodeTopology(node, allowedTopologies) }) - ginkgo.It("should fail to schedule a pod which has topologies that conflict with AllowedTopologies", func() { + ginkgo.It("should fail to schedule a pod which has topologies that conflict with AllowedTopologies", func(ctx context.Context) { l := init() defer func() { cleanup(l) diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index b91e27690ad..9f9d03e511d 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -152,7 +152,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, } if !pattern.AllowExpansion { - ginkgo.It("should not allow expansion of pvcs without AllowVolumeExpansion property", func() { + ginkgo.It("should not allow expansion of pvcs without AllowVolumeExpansion property", func(ctx context.Context) { init() defer cleanup() @@ -169,7 +169,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, framework.ExpectError(err, "While updating non-expandable PVC") }) } else { - ginkgo.It("Verify if offline PVC expansion works", func() { + ginkgo.It("Verify if offline PVC expansion works", func(ctx context.Context) { init() defer cleanup() @@ -245,7 +245,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions") }) - ginkgo.It("should resize volume when PVC is edited while pod is using it", func() { + ginkgo.It("should resize volume when PVC is edited while pod is using it", func(ctx context.Context) { init() defer cleanup() diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 489ad92f4ac..fc31eda9fc3 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -139,7 +139,7 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt l.migrationCheck.validateMigrationVolumeOpCounts() } - ginkgo.It("should write files of various sizes, verify size, validate content [Slow]", func() { + ginkgo.It("should write files of various sizes, verify size, validate content [Slow]", func(ctx context.Context) { init() defer cleanup() diff --git a/test/e2e/storage/testsuites/volume_stress.go b/test/e2e/storage/testsuites/volume_stress.go index f82dc1bced3..17fd6f0ec1b 100644 --- a/test/e2e/storage/testsuites/volume_stress.go +++ b/test/e2e/storage/testsuites/volume_stress.go @@ -195,7 +195,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, createPodsAndVolumes() }) - ginkgo.It("multiple pods should access different volumes repeatedly [Slow] [Serial]", func() { + ginkgo.It("multiple pods should access different volumes repeatedly [Slow] [Serial]", func(ctx context.Context) { // Restart pod repeatedly for i := 0; i < l.testOptions.NumPods; i++ { podIndex := i diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index c1edad5dd30..c8cc5cb567b 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -244,7 +244,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, framework.ExpectNoError(err) }) - ginkgo.It("should verify that all csinodes have volume limits", func() { + ginkgo.It("should verify that all csinodes have volume limits", func(ctx context.Context) { driverInfo := driver.GetDriverInfo() if !driverInfo.Capabilities[storageframework.CapVolumeLimits] { ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name)) diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 1448c23f8a0..8289bb7dfb1 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -196,7 +196,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa switch pattern.VolType { case storageframework.PreprovisionedPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { - ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func() { + ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func(ctx context.Context) { manualInit() defer cleanup() @@ -257,7 +257,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa case storageframework.DynamicPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { - ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func() { + ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func(ctx context.Context) { manualInit() defer cleanup() @@ -296,7 +296,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType) } - ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func() { + ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func(ctx context.Context) { skipTestIfBlockNotSupported(driver) init() testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange @@ -351,7 +351,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending") }) - ginkgo.It("should not mount / map unused volumes in a pod [LinuxOnly]", func() { + ginkgo.It("should not mount / map unused volumes in a pod [LinuxOnly]", func(ctx context.Context) { if pattern.VolMode == v1.PersistentVolumeBlock { skipTestIfBlockNotSupported(driver) } diff --git a/test/e2e/storage/testsuites/volumeperf.go b/test/e2e/storage/testsuites/volumeperf.go index 5c77f47eb7b..a5a93bddea3 100644 --- a/test/e2e/storage/testsuites/volumeperf.go +++ b/test/e2e/storage/testsuites/volumeperf.go @@ -143,7 +143,7 @@ func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDri framework.ExpectNoError(err) }) - ginkgo.It("should provision volumes at scale within performance constraints [Slow] [Serial]", func() { + ginkgo.It("should provision volumes at scale within performance constraints [Slow] [Serial]", func(ctx context.Context) { l = &local{ cs: f.ClientSet, ns: f.Namespace, diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index 472e7169148..b924c4a60bd 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -22,6 +22,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "path/filepath" @@ -155,7 +156,7 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte l.migrationCheck.validateMigrationVolumeOpCounts() } - ginkgo.It("should store data", func() { + ginkgo.It("should store data", func(ctx context.Context) { init() defer func() { e2evolume.TestServerCleanup(f, storageframework.ConvertTestConfig(l.config)) @@ -192,7 +193,7 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte // Exec works only on filesystem volumes if pattern.VolMode != v1.PersistentVolumeBlock { - ginkgo.It("should allow exec of files on the volume", func() { + ginkgo.It("should allow exec of files on the volume", func(ctx context.Context) { skipExecTest(driver) init() defer cleanup() diff --git a/test/e2e/storage/ubernetes_lite_volumes.go b/test/e2e/storage/ubernetes_lite_volumes.go index 9a38d03b7f9..c89f1f44b29 100644 --- a/test/e2e/storage/ubernetes_lite_volumes.go +++ b/test/e2e/storage/ubernetes_lite_volumes.go @@ -53,7 +53,7 @@ var _ = utils.SIGDescribe("Multi-AZ Cluster Volumes", func() { e2eskipper.SkipUnlessAtLeast(zoneCount, 2, msg) // TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread }) - ginkgo.It("should schedule pods in the same zones as statically provisioned PVs", func() { + ginkgo.It("should schedule pods in the same zones as statically provisioned PVs", func(ctx context.Context) { PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image) }) }) diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 25c550cc243..ce9e7106068 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -463,27 +463,27 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } testAll := func(isEphemeral bool) { - ginkgo.It("should create prometheus metrics for volume provisioning and attach/detach", func() { + ginkgo.It("should create prometheus metrics for volume provisioning and attach/detach", func(ctx context.Context) { provisioning(isEphemeral) }) // TODO(mauriciopoppe): after CSIMigration is turned on we're no longer reporting // the volume_provision metric (removed in #106609), issue to investigate the bug #106773 - ginkgo.It("should create prometheus metrics for volume provisioning errors [Slow]", func() { + ginkgo.It("should create prometheus metrics for volume provisioning errors [Slow]", func(ctx context.Context) { provisioningError(isEphemeral) }) - ginkgo.It("should create volume metrics with the correct FilesystemMode PVC ref", func() { + ginkgo.It("should create volume metrics with the correct FilesystemMode PVC ref", func(ctx context.Context) { filesystemMode(isEphemeral) }) - ginkgo.It("should create volume metrics with the correct BlockMode PVC ref", func() { + ginkgo.It("should create volume metrics with the correct BlockMode PVC ref", func(ctx context.Context) { blockmode(isEphemeral) }) - ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() { + ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func(ctx context.Context) { totalTime(isEphemeral) }) - ginkgo.It("should create volume metrics in Volume Manager", func() { + ginkgo.It("should create volume metrics in Volume Manager", func(ctx context.Context) { volumeManager(isEphemeral) }) - ginkgo.It("should create metrics for total number of volumes in A/D Controller", func() { + ginkgo.It("should create metrics for total number of volumes in A/D Controller", func(ctx context.Context) { adController(isEphemeral) }) } @@ -595,7 +595,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { originMetricValues = nil }) - ginkgo.It("should create none metrics for pvc controller before creating any PV or PVC", func() { + ginkgo.It("should create none metrics for pvc controller before creating any PV or PVC", func(ctx context.Context) { validator([]map[string]int64{nil, nil, nil, nil}) }) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 8af3f17c2e1..6838b238220 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -419,7 +419,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) }) - ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() { + ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func(ctx context.Context) { // This case tests for the regressions of a bug fixed by PR #21268 // REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV // not being deleted. @@ -465,7 +465,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.Logf("0 PersistentVolumes remain.") }) - ginkgo.It("deletion should be idempotent", func() { + ginkgo.It("deletion should be idempotent", func(ctx context.Context) { // This test ensures that deletion of a volume is idempotent. // It creates a PV with Retain policy, deletes underlying AWS / GCE // volume and changes the reclaim policy to Delete. @@ -623,7 +623,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) // Modifying the default storage class can be disruptive to other tests that depend on it - ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() { + ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") e2epv.SkipIfNoDefaultStorageClass(c) @@ -662,7 +662,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) // Modifying the default storage class can be disruptive to other tests that depend on it - ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() { + ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") e2epv.SkipIfNoDefaultStorageClass(c) diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index 14a84e1ece2..9e3770bf866 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -47,7 +47,7 @@ var _ = utils.SIGDescribe("Volumes", func() { }) ginkgo.Describe("ConfigMap", func() { - ginkgo.It("should be mountable", func() { + ginkgo.It("should be mountable", func(ctx context.Context) { config := e2evolume.TestConfig{ Namespace: namespace.Name, Prefix: "configmap", diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index ab35f37381c..9bbc1ca1196 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -126,7 +126,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() } }) - ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() { + ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func(ctx context.Context) { ginkgo.By("Deleting the Claim") framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil @@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() 1. Delete PV. 2. Delete POD, POD deletion should succeed. */ - ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on vsphere volume detach", func() { + ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on vsphere volume detach", func(ctx context.Context) { ginkgo.By("Deleting the Persistent Volume") framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) pv = nil @@ -157,7 +157,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() 2. Restart kubelet 3. Verify that written file is accessible after kubelet restart */ - ginkgo.It("should test that a file written to the vsphere volume mount before kubelet restart can be read after restart [Disruptive]", func() { + ginkgo.It("should test that a file written to the vsphere volume mount before kubelet restart can be read after restart [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, e2epod.VolumeMountPath1) }) @@ -173,7 +173,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() 4. Start kubelet. 5. Verify that volume mount not to be found. */ - ginkgo.It("should test that a vsphere volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() { + ginkgo.It("should test that a vsphere volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, e2epod.VolumeMountPath1) }) @@ -186,7 +186,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() 2. Wait for namespace to get deleted. (Namespace deletion should trigger deletion of belonging pods) 3. Verify volume should be detached from the node. */ - ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { + ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func(ctx context.Context) { ginkgo.By("Deleting the Namespace") err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index 79bf7fe5d51..53a6875e9cc 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo 5. Delete PVC 6. Verify PV is deleted automatically. */ - ginkgo.It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() { + ginkgo.It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func(ctx context.Context) { var err error volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) framework.ExpectNoError(err) @@ -107,7 +107,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo 8. Delete the pod. 9. Verify PV should be detached from the node and automatically deleted. */ - ginkgo.It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() { + ginkgo.It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func(ctx context.Context) { var err error volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) @@ -171,7 +171,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo 11. Created POD using PVC created in Step 10 and verify volume content is matching. */ - ginkgo.It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() { + ginkgo.It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func(ctx context.Context) { var err error var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10) diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index 74c73072674..e0b720c1f00 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -84,7 +84,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pvSsd, pvcSsd, pvcVvol) } }) - ginkgo.It("should bind volume with claim for given label", func() { + ginkgo.It("should bind volume with claim for given label", func(ctx context.Context) { volumePath, pvSsd, pvcSsd, pvcVvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index a8f140e3fc3..0a482703e6f 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -114,7 +114,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { }) }) - ginkgo.It("vsphere scale tests", func() { + ginkgo.It("vsphere scale tests", func(ctx context.Context) { var pvcClaimList []string nodeVolumeMap := make(map[string][]string) // Volumes will be provisioned with each different types of Storage Class diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 5b63bbdfe23..630bea92a01 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { Bootstrap(f) }) - ginkgo.It("vsphere statefulset testing", func() { + ginkgo.It("vsphere statefulset testing", func(ctx context.Context) { ginkgo.By("Creating StorageClass for Statefulset") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index a4ceed43fe3..87c8e460ad9 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -88,7 +88,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) }) - ginkgo.It("vsphere stress tests", func() { + ginkgo.It("vsphere stress tests", func(ctx context.Context) { scArrays := make([]*storagev1.StorageClass, len(scNames)) for index, scname := range scNames { // Create vSphere Storage Class diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index 82610d17335..97d695657a7 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v 6. Delete the volume */ - ginkgo.It("verify static provisioning on clustered datastore", func() { + ginkgo.It("verify static provisioning on clustered datastore", func(ctx context.Context) { var volumePath string ginkgo.By("creating a test vsphere volume") @@ -119,7 +119,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v 1. Create storage class parameter and specify datastore to be a clustered datastore name 2. invokeValidPolicyTest - util to do e2e dynamic provision test */ - ginkgo.It("verify dynamic provision with default parameter on clustered datastore", func() { + ginkgo.It("verify dynamic provision with default parameter on clustered datastore", func(ctx context.Context) { scParameters[Datastore] = clusterDatastore invokeValidPolicyTest(f, client, namespace, scParameters) }) @@ -129,7 +129,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v 1. Create storage class parameter and specify storage policy to be a tag based spbm policy 2. invokeValidPolicyTest - util to do e2e dynamic provision test */ - ginkgo.It("verify dynamic provision with spbm policy on clustered datastore", func() { + ginkgo.It("verify dynamic provision with spbm policy on clustered datastore", func(ctx context.Context) { policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster) scParameters[SpbmStoragePolicy] = policyDatastoreCluster invokeValidPolicyTest(f, client, namespace, scParameters) diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index d96293debde..4fbcba327eb 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", vSphereCSIMigrationEnabled = GetAndExpectBoolEnvVar(VSphereCSIMigrationEnabled) }) - ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() { + ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func(ctx context.Context) { ginkgo.By("Invoking Test for invalid datastore") scParameters[Datastore] = invalidDatastore scParameters[DiskFormat] = ThinDisk diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index 14a2e1e01fb..bfb9cbc991f 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -81,15 +81,15 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, client, nodeName, NodeLabelKey) }) - ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() { + ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func(ctx context.Context) { ginkgo.By("Invoking Test for diskformat: eagerzeroedthick") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick") }) - ginkgo.It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() { + ginkgo.It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func(ctx context.Context) { ginkgo.By("Invoking Test for diskformat: zeroedthick") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick") }) - ginkgo.It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() { + ginkgo.It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func(ctx context.Context) { ginkgo.By("Invoking Test for diskformat: thin") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin") }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go index c820cb43d0a..33357fffdb6 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName) }) - ginkgo.It("verify dynamically provisioned pv has size rounded up correctly", func() { + ginkgo.It("verify dynamically provisioned pv has size rounded up correctly", func(ctx context.Context) { ginkgo.By("Invoking Test disk size") scParameters[Datastore] = datastore scParameters[DiskFormat] = ThinDisk diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index 503119fb9ae..99aea04e3fc 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -83,17 +83,17 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() { gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty()) }) - ginkgo.It("verify fstype - ext3 formatted volume", func() { + ginkgo.It("verify fstype - ext3 formatted volume", func(ctx context.Context) { ginkgo.By("Invoking Test for fstype: ext3") invokeTestForFstype(f, client, namespace, ext3FSType, ext3FSType) }) - ginkgo.It("verify fstype - default value should be ext4", func() { + ginkgo.It("verify fstype - default value should be ext4", func(ctx context.Context) { ginkgo.By("Invoking Test for fstype: Default Value - ext4") invokeTestForFstype(f, client, namespace, "", ext4FSType) }) - ginkgo.It("verify invalid fstype", func() { + ginkgo.It("verify invalid fstype", func(ctx context.Context) { ginkgo.By("Invoking Test for fstype: invalid Value") invokeTestForInvalidFstype(f, client, namespace, invalidFSType) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 72f4eb06265..9958f3749e0 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -140,7 +140,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup } }) - ginkgo.It("verify volume remains attached after master kubelet restart", func() { + ginkgo.It("verify volume remains attached after master kubelet restart", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() // Create pod on each node diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index 9785d01b80e..b8c8eff664a 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -51,7 +51,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR") }) - ginkgo.It("node unregister", func() { + ginkgo.It("node unregister", func(ctx context.Context) { ginkgo.By("Get total Ready nodes") nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) framework.ExpectNoError(err) @@ -69,9 +69,6 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] // They are required to register a node VM to VC vmxFilePath := getVMXFilePath(vmObject) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - vmHost, err := vmObject.HostSystem(ctx) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 08a6a9755d3..bf5bf66d36e 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -81,7 +81,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", 11. Delete the PVC 12. Delete the StorageClass */ - ginkgo.It("verify volume status after node power off", func() { + ginkgo.It("verify volume status after node power off", func(ctx context.Context) { ginkgo.By("Creating a Storage Class") storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "") storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec, metav1.CreateOptions{}) @@ -123,8 +123,6 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1) vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() _, err = vm.PowerOff(ctx) framework.ExpectNoError(err) defer vm.PowerOn(ctx) diff --git a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index 42f7d4f2343..13197e1c4d4 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -89,7 +89,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should create pod with many volumes and verify no attach call fails", func() { + ginkgo.It("should create pod with many volumes and verify no attach call fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volumeOpsScale)) ginkgo.By("Creating Storage Class") scParameters := make(map[string]string) diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index 31fbb064fbe..34c23b12736 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { nodeSelectorList = createNodeLabels(client, namespace, nodes) }) - ginkgo.It("vcp performance tests", func() { + ginkgo.It("vcp performance tests", func(ctx context.Context) { scList := getTestStorageClasses(client, policyName, datastoreName) defer func(scList []*storagev1.StorageClass) { for _, sc := range scList { diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index fb80df1c63b..faeba671f20 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { */ - ginkgo.It("should create and delete pod with the same volume source on the same worker node", func() { + ginkgo.It("should create and delete pod with the same volume source on the same worker node", func(ctx context.Context) { var volumeFiles []string pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) @@ -137,7 +137,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { 13. Delete pod. */ - ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() { + ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func(ctx context.Context) { var volumeFiles []string pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable @@ -172,7 +172,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { 10. Wait for vmdk1 and vmdk2 to be detached from node. */ - ginkgo.It("should create and delete pod with multiple volumes from same datastore", func() { + ginkgo.It("should create and delete pod with multiple volumes from same datastore", func(ctx context.Context) { ginkgo.By("creating another vmdk") volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) @@ -214,7 +214,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { 9. Delete POD. 10. Wait for vmdk1 and vmdk2 to be detached from node. */ - ginkgo.It("should create and delete pod with multiple volumes from different datastore", func() { + ginkgo.It("should create and delete pod with multiple volumes from different datastore", func(ctx context.Context) { ginkgo.By("creating another vmdk on non default shared datastore") var volumeOptions *VolumeOptions volumeOptions = new(VolumeOptions) @@ -266,7 +266,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { 10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching. 11. Wait for vmdk1 and vmdk2 to be detached from node. */ - ginkgo.It("test back to back pod creation and deletion with different volume sources on the same worker node", func() { + ginkgo.It("test back to back pod creation and deletion with different volume sources on the same worker node", func(ctx context.Context) { var ( podA *v1.Pod podB *v1.Pod diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 42089c92447..bad5fccb530 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs } }) - ginkgo.It("verify volume remains attached through vpxd restart", func() { + ginkgo.It("verify volume remains attached through vpxd restart", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() for vcHost, nodes := range vcNodesMap { diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index a9ca1b3460b..15be7bf0ded 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -99,7 +99,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal)) scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal @@ -108,7 +108,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) scParameters[PolicyDiskStripes] = "1" scParameters[PolicyObjectSpaceReservation] = "30" @@ -117,7 +117,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal @@ -127,7 +127,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal)) scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal @@ -136,7 +136,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Invalid VSAN storage capabilities parameters. - ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal)) scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal scParameters[PolicyDiskStripes] = StripeWidthCapabilityVal @@ -151,7 +151,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp // Invalid policy on a VSAN test bed. // diskStripes value has to be between 1 and 12. - ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal)) scParameters[PolicyDiskStripes] = DiskStripesCapabilityInvalidVal scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal @@ -166,7 +166,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp // Invalid policy on a VSAN test bed. // hostFailuresToTolerate value has to be between 0 and 3 including. - ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal)) scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) @@ -180,7 +180,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp // Specify a valid VSAN policy on a non-VSAN test bed. // The test should fail. - ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, vmfsDatastore)) scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal @@ -195,7 +195,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp } }) - ginkgo.It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName)) scParameters[SpbmStoragePolicy] = policyName scParameters[DiskFormat] = ThinDisk @@ -203,7 +203,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp invokeValidPolicyTest(f, client, namespace, scParameters) }) - ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() { + ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func(ctx context.Context) { scParameters[PolicyDiskStripes] = diskStripesCapabilityMaxVal scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Datastore] = vsanDatastore @@ -214,7 +214,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp invokeStaleDummyVMTestWithStoragePolicy(client, controlPlaneNode, namespace, kubernetesClusterName, scParameters) }) - ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, vsanDatastore)) scParameters[SpbmStoragePolicy] = tagPolicy scParameters[Datastore] = vsanDatastore @@ -228,7 +228,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp } }) - ginkgo.It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy)) scParameters[SpbmStoragePolicy] = BronzeStoragePolicy scParameters[DiskFormat] = ThinDisk @@ -241,7 +241,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp } }) - ginkgo.It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName)) scParameters[SpbmStoragePolicy] = policyName gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty()) diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index 898ec6cbdd7..d48749a6d5b 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -127,20 +127,20 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { framework.ExpectNoError(err) }) - ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() { + ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA)) zones = append(zones, zoneA) verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones) }) - ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() { + ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones) }) - ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() { + ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", invalidZone)) zones = append(zones, invalidZone) err := verifyPVCCreationFails(client, namespace, nil, zones, "") @@ -151,27 +151,27 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA)) zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "") }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "") }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") }) - ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() { + ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneC) @@ -182,21 +182,21 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") }) - ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { + ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneB) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") }) - ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() { + ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy)) scParameters[SpbmStoragePolicy] = nonCompatPolicy zones = append(zones, zoneA) @@ -207,7 +207,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore1 @@ -215,7 +215,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") }) - ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() { + ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, nonCompatPolicy)) scParameters[SpbmStoragePolicy] = nonCompatPolicy scParameters[Datastore] = vsanDatastore1 @@ -227,7 +227,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func() { + ginkgo.It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneC, vsanDatastore2, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore2 @@ -239,7 +239,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() { + ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with no zones")) err := verifyPVCCreationFails(client, namespace, nil, nil, "") errorMsg := "No shared datastores found in the Kubernetes cluster" @@ -248,7 +248,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() { + ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 err := verifyPVCCreationFails(client, namespace, scParameters, nil, "") @@ -258,7 +258,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() { + ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy err := verifyPVCCreationFails(client, namespace, scParameters, nil, "") @@ -268,7 +268,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func() { + ginkgo.It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore1 @@ -279,7 +279,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() { + ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneC)) zones = append(zones, zoneC) err := verifyPVCCreationFails(client, namespace, nil, zones, "") @@ -289,7 +289,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC)) zones = append(zones, zoneA) zones = append(zones, zoneC) @@ -300,7 +300,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() { + ginkgo.It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", PolicyHostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA)) scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal zones = append(zones, zoneA) @@ -311,7 +311,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", PolicyObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, PolicyIopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA)) scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal @@ -320,40 +320,40 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD)) zones = append(zones, zoneD) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore)) scParameters[Datastore] = localDatastore zones = append(zones, zoneD) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "") }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer) }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer) }) - ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func() { + ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer) }) - ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func() { + ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and following zones :%s and %s", zoneA, zoneC)) zones = append(zones, zoneA) zones = append(zones, zoneC) @@ -365,7 +365,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { } }) - ginkgo.It("Verify a pod fails to get scheduled when conflicting volume topology (allowedTopologies) and pod scheduling constraints(nodeSelector) are specified", func() { + ginkgo.It("Verify a pod fails to get scheduled when conflicting volume topology (allowedTopologies) and pod scheduling constraints(nodeSelector) are specified", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumerMode, storage policy :%s and zone :%s", compatPolicy, zoneA)) scParameters[SpbmStoragePolicy] = compatPolicy // allowedTopologies set as zoneA diff --git a/test/e2e/windows/cpu_limits.go b/test/e2e/windows/cpu_limits.go index 33671ee698b..2fdbbb43d83 100644 --- a/test/e2e/windows/cpu_limits.go +++ b/test/e2e/windows/cpu_limits.go @@ -42,7 +42,7 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() { powershellImage := imageutils.GetConfig(imageutils.BusyBox) ginkgo.Context("Container limits", func() { - ginkgo.It("should not be exceeded after waiting 2 minutes", func() { + ginkgo.It("should not be exceeded after waiting 2 minutes", func(ctx context.Context) { ginkgo.By("Creating one pod with limit set to '0.5'") podsDecimal := newCPUBurnPods(1, powershellImage, "0.5", "1Gi") e2epod.NewPodClient(f).CreateBatch(podsDecimal) diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index b808147eab8..7ed40e9fc73 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -65,7 +65,7 @@ var _ = SIGDescribe("[Feature:Windows] Density [Serial] [Slow]", func() { for _, testArg := range dTests { itArg := testArg desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval) - ginkgo.It(desc, func() { + ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "batch" runDensityBatchTest(f, itArg) }) diff --git a/test/e2e/windows/device_plugin.go b/test/e2e/windows/device_plugin.go index f6ce4ae46a5..d657e1b778e 100644 --- a/test/e2e/windows/device_plugin.go +++ b/test/e2e/windows/device_plugin.go @@ -50,7 +50,7 @@ var _ = SIGDescribe("[Feature:GPUDevicePlugin] Device Plugin", func() { e2eskipper.SkipUnlessNodeOSDistroIs("windows") cs = f.ClientSet }) - ginkgo.It("should be able to create a functioning device plugin for Windows", func() { + ginkgo.It("should be able to create a functioning device plugin for Windows", func(ctx context.Context) { ginkgo.By("creating Windows device plugin daemonset") dsName := "directx-device-plugin" daemonsetNameLabel := "daemonset-name" diff --git a/test/e2e/windows/dns.go b/test/e2e/windows/dns.go index e099e254fa0..c37b0e212cb 100644 --- a/test/e2e/windows/dns.go +++ b/test/e2e/windows/dns.go @@ -38,7 +38,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() { f := framework.NewDefaultFramework("dns") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should support configurable pod DNS servers", func() { + ginkgo.It("should support configurable pod DNS servers", func(ctx context.Context) { ginkgo.By("Getting the IP address of the internal Kubernetes service") diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index 41d686f862b..2b163d65a92 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Describe("GMSA support", func() { - ginkgo.It("works end to end", func() { + ginkgo.It("works end to end", func(ctx context.Context) { defer ginkgo.GinkgoRecover() ginkgo.By("finding the worker node that fulfills this test's assumptions") @@ -165,7 +165,7 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", func() { }, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue()) }) - ginkgo.It("can read and write file to remote SMB folder", func() { + ginkgo.It("can read and write file to remote SMB folder", func(ctx context.Context) { defer ginkgo.GinkgoRecover() ginkgo.By("finding the worker node that fulfills this test's assumptions") diff --git a/test/e2e/windows/gmsa_kubelet.go b/test/e2e/windows/gmsa_kubelet.go index 57c3658612d..beb28184f18 100644 --- a/test/e2e/windows/gmsa_kubelet.go +++ b/test/e2e/windows/gmsa_kubelet.go @@ -22,6 +22,7 @@ limitations under the License. package windows import ( + "context" "fmt" "strings" "time" @@ -44,7 +45,7 @@ var _ = SIGDescribe("[Feature:Windows] GMSA Kubelet [Slow]", func() { ginkgo.Describe("kubelet GMSA support", func() { ginkgo.Context("when creating a pod with correct GMSA credential specs", func() { - ginkgo.It("passes the credential specs down to the Pod's containers", func() { + ginkgo.It("passes the credential specs down to the Pod's containers", func(ctx context.Context) { defer ginkgo.GinkgoRecover() podName := "with-correct-gmsa-specs" diff --git a/test/e2e/windows/host_process.go b/test/e2e/windows/host_process.go index 66dc0bf9f19..9d38e91d428 100644 --- a/test/e2e/windows/host_process.go +++ b/test/e2e/windows/host_process.go @@ -89,7 +89,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi f := framework.NewDefaultFramework("host-process-test-windows") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should run as a process on the host/node", func() { + ginkgo.It("should run as a process on the host/node", func(ctx context.Context) { ginkgo.By("selecting a Windows node") targetNode, err := findWindowsNode(f) @@ -138,7 +138,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded) }) - ginkgo.It("should support init containers", func() { + ginkgo.It("should support init containers", func(ctx context.Context) { ginkgo.By("scheduling a pod with a container that verifies init container can configure the node") podName := "host-process-init-pods" filename := fmt.Sprintf("/testfile%s.txt", string(uuid.NewUUID())) @@ -198,7 +198,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded) }) - ginkgo.It("container command path validation", func() { + ginkgo.It("container command path validation", func(ctx context.Context) { // The way hostprocess containers are created is being updated in container // v1.7 to better support volume mounts and part of these changes include @@ -446,7 +446,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi }) - ginkgo.It("should support various volume mount types", func() { + ginkgo.It("should support various volume mount types", func(ctx context.Context) { ns := f.Namespace ginkgo.By("Creating a configmap containing test data and a validation script") @@ -507,7 +507,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded) }) - ginkgo.It("metrics should report count of started and failed to start HostProcess containers", func() { + ginkgo.It("metrics should report count of started and failed to start HostProcess containers", func(ctx context.Context) { ginkgo.By("Selecting a Windows node") targetNode, err := findWindowsNode(f) framework.ExpectNoError(err, "Error finding Windows node") @@ -613,7 +613,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi gomega.Expect(beforeMetrics.StartedInitContainersErrorCount).To(gomega.BeNumerically("<", afterMetrics.StartedInitContainersErrorCount), "Count of started HostProcess errors init containers should increase") }) - ginkgo.It("container stats validation", func() { + ginkgo.It("container stats validation", func(ctx context.Context) { ginkgo.By("selecting a Windows node") targetNode, err := findWindowsNode(f) framework.ExpectNoError(err, "Error finding Windows node") @@ -692,7 +692,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi } }) - ginkgo.It("should support querying api-server using in-cluster config", func() { + ginkgo.It("should support querying api-server using in-cluster config", func(ctx context.Context) { // This functionality is only support on containerd v1.7+ ginkgo.By("Ensuring Windows nodes are running containerd v1.7+") windowsNode, err := findWindowsNode(f) @@ -771,7 +771,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi "app logs should not contain 'status=failed") }) - ginkgo.It("should run as localgroup accounts", func() { + ginkgo.It("should run as localgroup accounts", func(ctx context.Context) { // This functionality is only supported on containerd v1.7+ ginkgo.By("Ensuring Windows nodes are running containerd v1.7+") windowsNode, err := findWindowsNode(f) diff --git a/test/e2e/windows/hybrid_network.go b/test/e2e/windows/hybrid_network.go index ba501a557dc..7614f0b03fb 100644 --- a/test/e2e/windows/hybrid_network.go +++ b/test/e2e/windows/hybrid_network.go @@ -17,6 +17,7 @@ limitations under the License. package windows import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -53,7 +54,7 @@ var _ = SIGDescribe("Hybrid cluster network", func() { ginkgo.Context("for all supported CNIs", func() { - ginkgo.It("should have stable networking for Linux and Windows pods", func() { + ginkgo.It("should have stable networking for Linux and Windows pods", func(ctx context.Context) { linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS) ginkgo.By("creating a linux pod and waiting for it to be running") @@ -75,7 +76,7 @@ var _ = SIGDescribe("Hybrid cluster network", func() { }) - ginkgo.It("should provide Internet connection for Linux containers using DNS [Feature:Networking-DNS]", func() { + ginkgo.It("should provide Internet connection for Linux containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) { linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS) ginkgo.By("creating a linux pod and waiting for it to be running") linuxPod = e2epod.NewPodClient(f).CreateSync(linuxPod) @@ -86,7 +87,7 @@ var _ = SIGDescribe("Hybrid cluster network", func() { assertConsistentConnectivity(f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck("8.8.8.8", 53)) }) - ginkgo.It("should provide Internet connection for Windows containers using DNS [Feature:Networking-DNS]", func() { + ginkgo.It("should provide Internet connection for Windows containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) { windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS) ginkgo.By("creating a windows pod and waiting for it to be running") windowsPod = e2epod.NewPodClient(f).CreateSync(windowsPod) diff --git a/test/e2e/windows/kubelet_stats.go b/test/e2e/windows/kubelet_stats.go index 300fdc3ac7b..3b806466c3c 100644 --- a/test/e2e/windows/kubelet_stats.go +++ b/test/e2e/windows/kubelet_stats.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() { ginkgo.Context("when running 10 pods", func() { // 10 seconds is the default scrape timeout for metrics-server and kube-prometheus - ginkgo.It("should return within 10 seconds", func() { + ginkgo.It("should return within 10 seconds", func(ctx context.Context) { ginkgo.By("Selecting a Windows node") targetNode, err := findWindowsNode(f) @@ -119,7 +119,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { ginkgo.Describe("Kubelet stats collection for Windows nodes", func() { ginkgo.Context("when windows is booted", func() { - ginkgo.It("should return bootid within 10 seconds", func() { + ginkgo.It("should return bootid within 10 seconds", func(ctx context.Context) { ginkgo.By("Selecting a Windows node") targetNode, err := findWindowsNode(f) framework.ExpectNoError(err, "Error finding Windows node") @@ -134,7 +134,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { ginkgo.Context("when running 3 pods", func() { // 10 seconds is the default scrape timeout for metrics-server and kube-prometheus - ginkgo.It("should return within 10 seconds", func() { + ginkgo.It("should return within 10 seconds", func(ctx context.Context) { ginkgo.By("Selecting a Windows node") targetNode, err := findWindowsNode(f) diff --git a/test/e2e/windows/memory_limits.go b/test/e2e/windows/memory_limits.go index 8bd139243d4..a7950d68dfb 100644 --- a/test/e2e/windows/memory_limits.go +++ b/test/e2e/windows/memory_limits.go @@ -51,13 +51,13 @@ var _ = SIGDescribe("[Feature:Windows] Memory Limits [Serial] [Slow]", func() { }) ginkgo.Context("Allocatable node memory", func() { - ginkgo.It("should be equal to a calculated allocatable memory value", func() { + ginkgo.It("should be equal to a calculated allocatable memory value", func(ctx context.Context) { checkNodeAllocatableTest(f) }) }) ginkgo.Context("attempt to deploy past allocatable memory limits", func() { - ginkgo.It("should fail deployments of pods once there isn't enough memory", func() { + ginkgo.It("should fail deployments of pods once there isn't enough memory", func(ctx context.Context) { overrideAllocatableMemoryTest(f, framework.TestContext.CloudConfig.NumNodes) }) }) diff --git a/test/e2e/windows/reboot_node.go b/test/e2e/windows/reboot_node.go index e8ebe32f151..6f0ca3f4b43 100644 --- a/test/e2e/windows/reboot_node.go +++ b/test/e2e/windows/reboot_node.go @@ -41,7 +41,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV f := framework.NewDefaultFramework("reboot-host-test-windows") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should run as a reboot process on the host/node", func() { + ginkgo.It("should run as a reboot process on the host/node", func(ctx context.Context) { ginkgo.By("selecting a Windows node") targetNode, err := findWindowsNode(f) diff --git a/test/e2e/windows/security_context.go b/test/e2e/windows/security_context.go index 801e5a8449d..887c6179bb9 100644 --- a/test/e2e/windows/security_context.go +++ b/test/e2e/windows/security_context.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { f := framework.NewDefaultFramework("windows-run-as-username") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should be able create pods and run containers with a given username", func() { + ginkgo.It("should be able create pods and run containers with a given username", func(ctx context.Context) { ginkgo.By("Creating 2 pods: 1 with the default user, and one with a custom one.") podDefault := runAsUserNamePod(nil) e2eoutput.TestContainerOutput(f, "check default user", podDefault, 0, []string{"ContainerUser"}) @@ -53,7 +53,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { e2eoutput.TestContainerOutput(f, "check set user", podUserName, 0, []string{"ContainerAdministrator"}) }) - ginkgo.It("should not be able to create pods with unknown usernames at Pod level", func() { + ginkgo.It("should not be able to create pods with unknown usernames at Pod level", func(ctx context.Context) { ginkgo.By("Creating a pod with an invalid username") podInvalid := e2epod.NewPodClient(f).Create(runAsUserNamePod(toPtr("FooLish"))) @@ -100,7 +100,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { }, framework.PodStartTimeout, 1*time.Second).Should(gomega.BeTrue()) }) - ginkgo.It("should not be able to create pods with unknown usernames at Container level", func() { + ginkgo.It("should not be able to create pods with unknown usernames at Container level", func(ctx context.Context) { ginkgo.By("Creating a pod with an invalid username at container level and pod running as ContainerUser") p := runAsUserNamePod(toPtr("FooLish")) p.Spec.SecurityContext.WindowsOptions.RunAsUserName = toPtr("ContainerUser") @@ -116,7 +116,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { } }) - ginkgo.It("should override SecurityContext username if set", func() { + ginkgo.It("should override SecurityContext username if set", func(ctx context.Context) { ginkgo.By("Creating a pod with 2 containers with different username configurations.") pod := runAsUserNamePod(toPtr("ContainerAdministrator")) @@ -131,7 +131,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { e2eoutput.TestContainerOutput(f, "check pod SecurityContext username", pod, 1, []string{"ContainerAdministrator"}) }) - ginkgo.It("should ignore Linux Specific SecurityContext if set", func() { + ginkgo.It("should ignore Linux Specific SecurityContext if set", func(ctx context.Context) { ginkgo.By("Creating a pod with SELinux options") // It is sufficient to show that the pod comes up here. Since we're stripping the SELinux and other linux // security contexts in apiserver and not updating the pod object in the apiserver, we cannot validate the @@ -155,7 +155,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { f.Namespace.Name), "failed to wait for pod %s to be running", windowsPodWithSELinux.Name) }) - ginkgo.It("should not be able to create pods with containers running as ContainerAdministrator when runAsNonRoot is true", func() { + ginkgo.It("should not be able to create pods with containers running as ContainerAdministrator when runAsNonRoot is true", func(ctx context.Context) { ginkgo.By("Creating a pod") p := runAsUserNamePod(toPtr("ContainerAdministrator")) @@ -173,7 +173,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext", func() { framework.ExpectEqual(true, strings.Contains(event.Message, expectedEventError), "Event error should indicate non-root policy caused container to not start") }) - ginkgo.It("should not be able to create pods with containers running as CONTAINERADMINISTRATOR when runAsNonRoot is true", func() { + ginkgo.It("should not be able to create pods with containers running as CONTAINERADMINISTRATOR when runAsNonRoot is true", func(ctx context.Context) { ginkgo.By("Creating a pod") p := runAsUserNamePod(toPtr("CONTAINERADMINISTRATOR")) diff --git a/test/e2e/windows/service.go b/test/e2e/windows/service.go index 3b7de33834e..21bfea51b33 100644 --- a/test/e2e/windows/service.go +++ b/test/e2e/windows/service.go @@ -17,6 +17,7 @@ limitations under the License. package windows import ( + "context" "fmt" "net" "strconv" @@ -45,7 +46,7 @@ var _ = SIGDescribe("Services", func() { e2eskipper.SkipUnlessNodeOSDistroIs("windows") cs = f.ClientSet }) - ginkgo.It("should be able to create a functioning NodePort service for Windows", func() { + ginkgo.It("should be able to create a functioning NodePort service for Windows", func(ctx context.Context) { serviceName := "nodeport-test" ns := f.Namespace.Name diff --git a/test/e2e/windows/volumes.go b/test/e2e/windows/volumes.go index 8891c2638b6..48e403d6506 100644 --- a/test/e2e/windows/volumes.go +++ b/test/e2e/windows/volumes.go @@ -17,6 +17,7 @@ limitations under the License. package windows import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -65,7 +66,7 @@ var _ = SIGDescribe("[Feature:Windows] Windows volume mounts ", func() { ginkgo.Context("check volume mount permissions", func() { - ginkgo.It("container should have readOnly permissions on emptyDir", func() { + ginkgo.It("container should have readOnly permissions on emptyDir", func(ctx context.Context) { ginkgo.By("creating a container with readOnly permissions on emptyDir volume") doReadOnlyTest(f, emptyDirSource, emptyDirVolumePath) @@ -74,7 +75,7 @@ var _ = SIGDescribe("[Feature:Windows] Windows volume mounts ", func() { doReadWriteReadOnlyTest(f, emptyDirSource, emptyDirVolumePath) }) - ginkgo.It("container should have readOnly permissions on hostMapPath", func() { + ginkgo.It("container should have readOnly permissions on hostMapPath", func(ctx context.Context) { ginkgo.By("creating a container with readOnly permissions on hostMap volume") doReadOnlyTest(f, hostMapSource, hostMapPath) diff --git a/test/e2e_kubeadm/bootstrap_signer.go b/test/e2e_kubeadm/bootstrap_signer.go index a31e3ceb3f6..25e8325264f 100644 --- a/test/e2e_kubeadm/bootstrap_signer.go +++ b/test/e2e_kubeadm/bootstrap_signer.go @@ -17,6 +17,8 @@ limitations under the License. package kubeadm import ( + "context" + "k8s.io/kubernetes/test/e2e/framework" admissionapi "k8s.io/pod-security-admission/api" @@ -39,7 +41,7 @@ var _ = Describe("bootstrap signer", func() { // so we are disabling the creation of a namespace in order to get a faster execution f.SkipNamespaceCreation = true - ginkgo.It("should be active", func() { + ginkgo.It("should be active", func(ctx context.Context) { //NB. this is technically implemented a part of the control-plane phase // and more specifically if the controller manager is properly configured, // the bootstrapsigner controller is activated and the system:controller:bootstrap-signer diff --git a/test/e2e_kubeadm/bootstrap_token_test.go b/test/e2e_kubeadm/bootstrap_token_test.go index 525f597f14a..dc5e577afe9 100644 --- a/test/e2e_kubeadm/bootstrap_token_test.go +++ b/test/e2e_kubeadm/bootstrap_token_test.go @@ -51,7 +51,7 @@ var _ = Describe("bootstrap token", func() { // so we are disabling the creation of a namespace in order to get a faster execution f.SkipNamespaceCreation = true - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { secrets, err := f.ClientSet.CoreV1(). Secrets(kubeSystemNamespace). List(context.TODO(), metav1.ListOptions{}) @@ -71,7 +71,7 @@ var _ = Describe("bootstrap token", func() { gomega.Expect(tokenNum).Should(gomega.BeNumerically(">", 0), "At least one bootstrap token should exist") }) - ginkgo.It("should be allowed to post CSR for kubelet certificates on joining nodes", func() { + ginkgo.It("should be allowed to post CSR for kubelet certificates on joining nodes", func(ctx context.Context) { ExpectClusterRoleBindingWithSubjectAndRole(f.ClientSet, bootstrapTokensAllowPostCSRClusterRoleBinding, rbacv1.GroupKind, bootstrapTokensGroup, @@ -79,7 +79,7 @@ var _ = Describe("bootstrap token", func() { ) }) - ginkgo.It("should be allowed to auto approve CSR for kubelet certificates on joining nodes", func() { + ginkgo.It("should be allowed to auto approve CSR for kubelet certificates on joining nodes", func(ctx context.Context) { ExpectClusterRoleBindingWithSubjectAndRole(f.ClientSet, bootstrapTokensCSRAutoApprovalClusterRoleBinding, rbacv1.GroupKind, bootstrapTokensGroup, diff --git a/test/e2e_kubeadm/cluster_info_test.go b/test/e2e_kubeadm/cluster_info_test.go index f702907c182..52fcfbcc92b 100644 --- a/test/e2e_kubeadm/cluster_info_test.go +++ b/test/e2e_kubeadm/cluster_info_test.go @@ -17,6 +17,8 @@ limitations under the License. package kubeadm import ( + "context" + authv1 "k8s.io/api/authorization/v1" rbacv1 "k8s.io/api/rbac/v1" bootstrapapi "k8s.io/cluster-bootstrap/token/api" @@ -55,7 +57,7 @@ var _ = Describe("cluster-info ConfigMap", func() { // so we are disabling the creation of a namespace in order to get a faster execution f.SkipNamespaceCreation = true - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { // Nb. this is technically implemented a part of the bootstrap-token phase cm := GetConfigMap(f.ClientSet, kubePublicNamespace, clusterInfoConfigMapName) @@ -65,13 +67,13 @@ var _ = Describe("cluster-info ConfigMap", func() { //TODO: What else? server? }) - ginkgo.It("should have related Role and RoleBinding", func() { + ginkgo.It("should have related Role and RoleBinding", func(ctx context.Context) { // Nb. this is technically implemented a part of the bootstrap-token phase ExpectRole(f.ClientSet, kubePublicNamespace, clusterInfoRoleName) ExpectRoleBinding(f.ClientSet, kubePublicNamespace, clusterInfoRoleBindingName) }) - ginkgo.It("should be accessible for anonymous", func() { + ginkgo.It("should be accessible for anonymous", func(ctx context.Context) { ExpectSubjectHasAccessToResource(f.ClientSet, rbacv1.UserKind, anonymousUser, clusterInfoConfigMapResource, diff --git a/test/e2e_kubeadm/controlplane_nodes_test.go b/test/e2e_kubeadm/controlplane_nodes_test.go index 845ad615efa..bf087aa2ba1 100644 --- a/test/e2e_kubeadm/controlplane_nodes_test.go +++ b/test/e2e_kubeadm/controlplane_nodes_test.go @@ -49,7 +49,7 @@ var _ = Describe("control-plane node", func() { // Important! please note that this test can't be run on single-node clusters // in case you can skip this test with SKIP=multi-node - ginkgo.It("should be labelled and tainted [multi-node]", func() { + ginkgo.It("should be labelled and tainted [multi-node]", func(ctx context.Context) { // get all control-plane nodes (and this implicitly checks that node are properly labeled) controlPlanes := getControlPlaneNodes(f.ClientSet) diff --git a/test/e2e_kubeadm/dns_addon_test.go b/test/e2e_kubeadm/dns_addon_test.go index bbc714ccd54..f8f42337d95 100644 --- a/test/e2e_kubeadm/dns_addon_test.go +++ b/test/e2e_kubeadm/dns_addon_test.go @@ -17,6 +17,8 @@ limitations under the License. package kubeadm import ( + "context" + "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" admissionapi "k8s.io/pod-security-admission/api" @@ -78,7 +80,7 @@ var _ = Describe("DNS addon", func() { ginkgo.Context("kube-dns", func() { ginkgo.Context("kube-dns ServiceAccount", func() { - ginkgo.It("should exist", func() { + ginkgo.It("should exist", func(ctx context.Context) { if dnsType != "kube-dns" { e2eskipper.Skipf("Skipping because DNS type is %s", dnsType) } @@ -88,7 +90,7 @@ var _ = Describe("DNS addon", func() { }) ginkgo.Context("kube-dns Deployment", func() { - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { if dnsType != "kube-dns" { e2eskipper.Skipf("Skipping because DNS type is %s", dnsType) } @@ -102,7 +104,7 @@ var _ = Describe("DNS addon", func() { ginkgo.Context("CoreDNS", func() { ginkgo.Context("CoreDNS ServiceAccount", func() { - ginkgo.It("should exist", func() { + ginkgo.It("should exist", func(ctx context.Context) { if dnsType != "CoreDNS" { e2eskipper.Skipf("Skipping because DNS type is %s", dnsType) } @@ -110,7 +112,7 @@ var _ = Describe("DNS addon", func() { ExpectServiceAccount(f.ClientSet, kubeSystemNamespace, coreDNSServiceAccountName) }) - ginkgo.It("should have related ClusterRole and ClusterRoleBinding", func() { + ginkgo.It("should have related ClusterRole and ClusterRoleBinding", func(ctx context.Context) { if dnsType != "CoreDNS" { e2eskipper.Skipf("Skipping because DNS type is %s", dnsType) } @@ -121,7 +123,7 @@ var _ = Describe("DNS addon", func() { }) ginkgo.Context("CoreDNS ConfigMap", func() { - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { if dnsType != "CoreDNS" { e2eskipper.Skipf("Skipping because DNS type is %s", dnsType) } @@ -133,7 +135,7 @@ var _ = Describe("DNS addon", func() { }) ginkgo.Context("CoreDNS Deployment", func() { - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { if dnsType != "CoreDNS" { e2eskipper.Skipf("Skipping because DNS type is %s", dnsType) } @@ -146,7 +148,7 @@ var _ = Describe("DNS addon", func() { }) ginkgo.Context("DNS Service", func() { - ginkgo.It("should exist", func() { + ginkgo.It("should exist", func(ctx context.Context) { ExpectService(f.ClientSet, kubeSystemNamespace, dnsService) }) }) diff --git a/test/e2e_kubeadm/kubeadm_certs_test.go b/test/e2e_kubeadm/kubeadm_certs_test.go index 67d978fb512..e5a368eed93 100644 --- a/test/e2e_kubeadm/kubeadm_certs_test.go +++ b/test/e2e_kubeadm/kubeadm_certs_test.go @@ -17,6 +17,7 @@ limitations under the License. package kubeadm import ( + "context" "fmt" authv1 "k8s.io/api/authorization/v1" @@ -61,7 +62,7 @@ var _ = Describe("kubeadm-certs [copy-certs]", func() { // so we are disabling the creation of a namespace in order to get a faster execution f.SkipNamespaceCreation = true - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { s := GetSecret(f.ClientSet, kubeSystemNamespace, kubeadmCertsSecretName) // Checks the kubeadm-certs is ownen by a time lived token @@ -104,12 +105,12 @@ var _ = Describe("kubeadm-certs [copy-certs]", func() { } }) - ginkgo.It("should have related Role and RoleBinding", func() { + ginkgo.It("should have related Role and RoleBinding", func(ctx context.Context) { ExpectRole(f.ClientSet, kubeSystemNamespace, kubeadmCertsRoleName) ExpectRoleBinding(f.ClientSet, kubeSystemNamespace, kubeadmCertsRoleBindingName) }) - ginkgo.It("should be accessible for bootstrap tokens", func() { + ginkgo.It("should be accessible for bootstrap tokens", func(ctx context.Context) { ExpectSubjectHasAccessToResource(f.ClientSet, rbacv1.GroupKind, bootstrapTokensGroup, kubeadmCertsSecretResource, diff --git a/test/e2e_kubeadm/kubeadm_config_test.go b/test/e2e_kubeadm/kubeadm_config_test.go index d65e789e982..3fb6ef6b98c 100644 --- a/test/e2e_kubeadm/kubeadm_config_test.go +++ b/test/e2e_kubeadm/kubeadm_config_test.go @@ -17,6 +17,8 @@ limitations under the License. package kubeadm import ( + "context" + yaml "gopkg.in/yaml.v2" authv1 "k8s.io/api/authorization/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -57,25 +59,25 @@ var _ = Describe("kubeadm-config ConfigMap", func() { // so we are disabling the creation of a namespace in order to get a faster execution f.SkipNamespaceCreation = true - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { cm := GetConfigMap(f.ClientSet, kubeSystemNamespace, kubeadmConfigName) gomega.Expect(cm.Data).To(gomega.HaveKey(kubeadmConfigClusterConfigurationConfigMapKey)) }) - ginkgo.It("should have related Role and RoleBinding", func() { + ginkgo.It("should have related Role and RoleBinding", func(ctx context.Context) { ExpectRole(f.ClientSet, kubeSystemNamespace, kubeadmConfigRoleName) ExpectRoleBinding(f.ClientSet, kubeSystemNamespace, kubeadmConfigRoleBindingName) }) - ginkgo.It("should be accessible for bootstrap tokens", func() { + ginkgo.It("should be accessible for bootstrap tokens", func(ctx context.Context) { ExpectSubjectHasAccessToResource(f.ClientSet, rbacv1.GroupKind, bootstrapTokensGroup, kubeadmConfigConfigMapResource, ) }) - ginkgo.It("should be accessible for nodes", func() { + ginkgo.It("should be accessible for nodes", func(ctx context.Context) { ExpectSubjectHasAccessToResource(f.ClientSet, rbacv1.GroupKind, nodesGroup, kubeadmConfigConfigMapResource, diff --git a/test/e2e_kubeadm/kubelet_config_test.go b/test/e2e_kubeadm/kubelet_config_test.go index 6987ba0087e..4b9a18661f7 100644 --- a/test/e2e_kubeadm/kubelet_config_test.go +++ b/test/e2e_kubeadm/kubelet_config_test.go @@ -17,6 +17,8 @@ limitations under the License. package kubeadm import ( + "context" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/kubernetes/test/e2e/framework" admissionapi "k8s.io/pod-security-admission/api" @@ -61,24 +63,24 @@ var _ = Describe("kubelet-config ConfigMap", func() { kubeletConfigRoleBindingName = kubeletConfigRoleName }) - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { cm := GetConfigMap(f.ClientSet, kubeSystemNamespace, kubeletConfigConfigMapName) gomega.Expect(cm.Data).To(gomega.HaveKey(kubeletConfigConfigMapKey)) }) - ginkgo.It("should have related Role and RoleBinding", func() { + ginkgo.It("should have related Role and RoleBinding", func(ctx context.Context) { ExpectRole(f.ClientSet, kubeSystemNamespace, kubeletConfigRoleName) ExpectRoleBinding(f.ClientSet, kubeSystemNamespace, kubeletConfigRoleBindingName) }) - ginkgo.It("should be accessible for bootstrap tokens", func() { + ginkgo.It("should be accessible for bootstrap tokens", func(ctx context.Context) { ExpectSubjectHasAccessToResource(f.ClientSet, rbacv1.GroupKind, bootstrapTokensGroup, kubeadmConfigConfigMapResource, ) }) - ginkgo.It("should be accessible for nodes", func() { + ginkgo.It("should be accessible for nodes", func(ctx context.Context) { ExpectSubjectHasAccessToResource(f.ClientSet, rbacv1.GroupKind, nodesGroup, kubeadmConfigConfigMapResource, diff --git a/test/e2e_kubeadm/networking_test.go b/test/e2e_kubeadm/networking_test.go index e665c51a860..5acdfa545d4 100644 --- a/test/e2e_kubeadm/networking_test.go +++ b/test/e2e_kubeadm/networking_test.go @@ -77,7 +77,7 @@ var _ = Describe("networking [setup-networking]", func() { ginkgo.Context("single-stack", func() { ginkgo.Context("podSubnet", func() { - ginkgo.It("should be properly configured if specified in kubeadm-config", func() { + ginkgo.It("should be properly configured if specified in kubeadm-config", func(ctx context.Context) { if dualStack { e2eskipper.Skipf("Skipping because cluster is dual-stack") } @@ -101,7 +101,7 @@ var _ = Describe("networking [setup-networking]", func() { }) }) ginkgo.Context("serviceSubnet", func() { - ginkgo.It("should be properly configured if specified in kubeadm-config", func() { + ginkgo.It("should be properly configured if specified in kubeadm-config", func(ctx context.Context) { if dualStack { e2eskipper.Skipf("Skipping because cluster is dual-stack") } @@ -126,7 +126,7 @@ var _ = Describe("networking [setup-networking]", func() { }) ginkgo.Context("dual-stack", func() { ginkgo.Context("podSubnet", func() { - ginkgo.It("should be properly configured if specified in kubeadm-config", func() { + ginkgo.It("should be properly configured if specified in kubeadm-config", func(ctx context.Context) { if !dualStack { e2eskipper.Skipf("Skipping because cluster is not dual-stack") } diff --git a/test/e2e_kubeadm/nodes_test.go b/test/e2e_kubeadm/nodes_test.go index 769e8e0870c..93cc062b03c 100644 --- a/test/e2e_kubeadm/nodes_test.go +++ b/test/e2e_kubeadm/nodes_test.go @@ -47,7 +47,7 @@ var _ = Describe("nodes", func() { // so we are disabling the creation of a namespace in order to get a faster execution f.SkipNamespaceCreation = true - ginkgo.It("should have CRI annotation", func() { + ginkgo.It("should have CRI annotation", func(ctx context.Context) { nodes, err := f.ClientSet.CoreV1().Nodes(). List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "error reading nodes") @@ -60,7 +60,7 @@ var _ = Describe("nodes", func() { } }) - ginkgo.It("should be allowed to rotate CSR", func() { + ginkgo.It("should be allowed to rotate CSR", func(ctx context.Context) { // Nb. this is technically implemented a part of the bootstrap-token phase ExpectClusterRoleBindingWithSubjectAndRole(f.ClientSet, nodesCertificateRotationClusterRoleBinding, diff --git a/test/e2e_kubeadm/proxy_addon_test.go b/test/e2e_kubeadm/proxy_addon_test.go index 4d6cb52a1ec..a7332910c5a 100644 --- a/test/e2e_kubeadm/proxy_addon_test.go +++ b/test/e2e_kubeadm/proxy_addon_test.go @@ -17,6 +17,8 @@ limitations under the License. package kubeadm import ( + "context" + authv1 "k8s.io/api/authorization/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/kubernetes/test/e2e/framework" @@ -60,11 +62,11 @@ var _ = Describe("proxy addon", func() { f.SkipNamespaceCreation = true ginkgo.Context("kube-proxy ServiceAccount", func() { - ginkgo.It("should exist", func() { + ginkgo.It("should exist", func(ctx context.Context) { ExpectServiceAccount(f.ClientSet, kubeSystemNamespace, kubeProxyServiceAccountName) }) - ginkgo.It("should be bound to the system:node-proxier cluster role", func() { + ginkgo.It("should be bound to the system:node-proxier cluster role", func(ctx context.Context) { ExpectClusterRoleBindingWithSubjectAndRole(f.ClientSet, kubeProxyClusterRoleBindingName, rbacv1.ServiceAccountKind, kubeProxyServiceAccountName, @@ -74,19 +76,19 @@ var _ = Describe("proxy addon", func() { }) ginkgo.Context("kube-proxy ConfigMap", func() { - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { cm := GetConfigMap(f.ClientSet, kubeSystemNamespace, kubeProxyConfigMap) gomega.Expect(cm.Data).To(gomega.HaveKey(kubeProxyConfigMapKey)) gomega.Expect(cm.Data).To(gomega.HaveKey(kubeProxyConfigMapKeyKubeconfig)) }) - ginkgo.It("should have related Role and RoleBinding", func() { + ginkgo.It("should have related Role and RoleBinding", func(ctx context.Context) { ExpectRole(f.ClientSet, kubeSystemNamespace, kubeProxyRoleName) ExpectRoleBinding(f.ClientSet, kubeSystemNamespace, kubeProxyRoleBindingName) }) - ginkgo.It("should be accessible by bootstrap tokens", func() { + ginkgo.It("should be accessible by bootstrap tokens", func(ctx context.Context) { ExpectSubjectHasAccessToResource(f.ClientSet, rbacv1.GroupKind, bootstrapTokensGroup, kubeProxyConfigMapResource, @@ -95,7 +97,7 @@ var _ = Describe("proxy addon", func() { }) ginkgo.Context("kube-proxy DaemonSet", func() { - ginkgo.It("should exist and be properly configured", func() { + ginkgo.It("should exist and be properly configured", func(ctx context.Context) { ds := GetDaemonSet(f.ClientSet, kubeSystemNamespace, kubeProxyDaemonSetName) framework.ExpectEqual(ds.Spec.Template.Spec.ServiceAccountName, kubeProxyServiceAccountName) diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 72a4d811e02..1b7e261be17 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -57,11 +57,11 @@ var _ = SIGDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() f := framework.NewDefaultFramework("apparmor-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should reject an unloaded profile", func() { + ginkgo.It("should reject an unloaded profile", func(ctx context.Context) { status := runAppArmorTest(f, false, v1.AppArmorBetaProfileNamePrefix+"non-existent-profile") gomega.Expect(status.ContainerStatuses[0].State.Waiting.Message).To(gomega.ContainSubstring("apparmor")) }) - ginkgo.It("should enforce a profile blocking writes", func() { + ginkgo.It("should enforce a profile blocking writes", func(ctx context.Context) { status := runAppArmorTest(f, true, v1.AppArmorBetaProfileNamePrefix+apparmorProfilePrefix+"deny-write") if len(status.ContainerStatuses) == 0 { framework.Failf("Unexpected pod status: %s", spew.Sdump(status)) @@ -72,7 +72,7 @@ var _ = SIGDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state) }) - ginkgo.It("should enforce a permissive profile", func() { + ginkgo.It("should enforce a permissive profile", func(ctx context.Context) { status := runAppArmorTest(f, true, v1.AppArmorBetaProfileNamePrefix+apparmorProfilePrefix+"audit-write") if len(status.ContainerStatuses) == 0 { framework.Failf("Unexpected pod status: %s", spew.Sdump(status)) @@ -88,7 +88,7 @@ var _ = SIGDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() f := framework.NewDefaultFramework("apparmor-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should reject a pod with an AppArmor profile", func() { + ginkgo.It("should reject a pod with an AppArmor profile", func(ctx context.Context) { status := runAppArmorTest(f, false, v1.AppArmorBetaProfileRuntimeDefault) expectSoftRejection(status) }) diff --git a/test/e2e_node/checkpoint_container.go b/test/e2e_node/checkpoint_container.go index 0adfac8d264..baf21473486 100644 --- a/test/e2e_node/checkpoint_container.go +++ b/test/e2e_node/checkpoint_container.go @@ -67,7 +67,7 @@ func proxyPostRequest(c clientset.Interface, node, endpoint string, port int) (r var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", func() { f := framework.NewDefaultFramework("checkpoint-container-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline - ginkgo.It("will checkpoint a container out of a pod", func() { + ginkgo.It("will checkpoint a container out of a pod", func(ctx context.Context) { ginkgo.By("creating a target pod") podClient := e2epod.NewPodClient(f) pod := podClient.CreateSync(&v1.Pod{ diff --git a/test/e2e_node/container_log_rotation_test.go b/test/e2e_node/container_log_rotation_test.go index 446b4b7c505..56796fd469b 100644 --- a/test/e2e_node/container_log_rotation_test.go +++ b/test/e2e_node/container_log_rotation_test.go @@ -50,7 +50,7 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() initialConfig.ContainerLogMaxSize = testContainerLogMaxSize }) - ginkgo.It("should be rotated and limited to a fixed amount of files", func() { + ginkgo.It("should be rotated and limited to a fixed amount of files", func(ctx context.Context) { ginkgo.By("create log container") pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index a35416f26b1..7776f997ce5 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -82,7 +82,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() { ginkgo.Context("once the node is setup", func() { - ginkgo.It("container runtime's oom-score-adj should be -999", func() { + ginkgo.It("container runtime's oom-score-adj should be -999", func(ctx context.Context) { runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) framework.ExpectNoError(err, "failed to get list of container runtime pids") for _, pid := range runtimePids { @@ -91,7 +91,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { }, 5*time.Minute, 30*time.Second).Should(gomega.BeNil()) } }) - ginkgo.It("Kubelet's oom-score-adj should be -999", func() { + ginkgo.It("Kubelet's oom-score-adj should be -999", func(ctx context.Context) { kubeletPids, err := getPidsForProcess(kubeletProcessName, "") framework.ExpectNoError(err, "failed to get list of kubelet pids") framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids)) @@ -100,7 +100,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { }, 5*time.Minute, 30*time.Second).Should(gomega.BeNil()) }) ginkgo.Context("", func() { - ginkgo.It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func() { + ginkgo.It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func(ctx context.Context) { // Take a snapshot of existing pause processes. These were // created before this test, and may not be infra // containers. They should be excluded from the test. @@ -174,7 +174,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { } }) }) - ginkgo.It("guaranteed container's oom-score-adj should be -998", func() { + ginkgo.It("guaranteed container's oom-score-adj should be -998", func(ctx context.Context) { podClient := e2epod.NewPodClient(f) podName := "guaranteed" + string(uuid.NewUUID()) podClient.Create(&v1.Pod{ @@ -215,7 +215,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) - ginkgo.It("burstable container's oom-score-adj should be between [2, 1000)", func() { + ginkgo.It("burstable container's oom-score-adj should be between [2, 1000)", func(ctx context.Context) { podClient := e2epod.NewPodClient(f) podName := "burstable" + string(uuid.NewUUID()) podClient.Create(&v1.Pod{ diff --git a/test/e2e_node/cpu_manager_metrics_test.go b/test/e2e_node/cpu_manager_metrics_test.go index 8693380aedb..7f48e363384 100644 --- a/test/e2e_node/cpu_manager_metrics_test.go +++ b/test/e2e_node/cpu_manager_metrics_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "time" @@ -93,7 +94,7 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() { updateKubeletConfig(f, oldCfg, true) }) - ginkgo.It("should report zero pinning counters after a fresh restart", func() { + ginkgo.It("should report zero pinning counters after a fresh restart", func(ctx context.Context) { // we updated the kubelet config in BeforeEach, so we can assume we start fresh. // being [Serial], we can also assume noone else but us is running pods. ginkgo.By("Checking the cpumanager metrics right after the kubelet restart, with no pods running") @@ -113,7 +114,7 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() { gomega.Consistently(getCPUManagerMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) }) - ginkgo.It("should report pinning failures when the cpumanager allocation is known to fail", func() { + ginkgo.It("should report pinning failures when the cpumanager allocation is known to fail", func(ctx context.Context) { ginkgo.By("Creating the test pod which will be rejected for SMTAlignmentError") testPod = e2epod.NewPodClient(f).Create(makeGuaranteedCPUExclusiveSleeperPod("smt-align-err", 1)) @@ -136,7 +137,7 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() { gomega.Consistently(getCPUManagerMetrics, 1*time.Minute, 15*time.Second).Should(matchResourceMetrics) }) - ginkgo.It("should not report any pinning failures when the cpumanager allocation is expected to succeed", func() { + ginkgo.It("should not report any pinning failures when the cpumanager allocation is expected to succeed", func(ctx context.Context) { ginkgo.By("Creating the test pod") testPod = e2epod.NewPodClient(f).Create(makeGuaranteedCPUExclusiveSleeperPod("smt-align-ok", smtLevel)) diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 9cfbeaf54e0..b76df6ca789 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -523,7 +523,7 @@ func runCPUManagerTests(f *framework.Framework) { } }) - ginkgo.It("should assign CPUs as expected based on the Pod spec", func() { + ginkgo.It("should assign CPUs as expected based on the Pod spec", func(ctx context.Context) { cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(f) // Skip CPU Manager tests altogether if the CPU capacity < 2. @@ -596,7 +596,7 @@ func runCPUManagerTests(f *framework.Framework) { pod.Spec.Containers[0].Name, pod.Name) }) - ginkgo.It("should assign CPUs as expected with enhanced policy based on strict SMT alignment", func() { + ginkgo.It("should assign CPUs as expected with enhanced policy based on strict SMT alignment", func(ctx context.Context) { fullCPUsOnlyOpt := fmt.Sprintf("option=%s", cpumanager.FullPCPUsOnlyOption) _, cpuAlloc, _ = getLocalNodeCPUDetails(f) smtLevel := getSMTLevel() diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index c54a437e56b..bc06438553d 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -45,7 +45,7 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod] f := framework.NewDefaultFramework("critical-pod-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("when we need to admit a critical pod", func() { - ginkgo.It("[Flaky] should be able to create and delete a critical pod", func() { + ginkgo.It("[Flaky] should be able to create and delete a critical pod", func(ctx context.Context) { // because adminssion Priority enable, If the priority class is not found, the Pod is rejected. node := getNodeName(f) // Define test pods diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index f4e02a31c0d..d117562fe64 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -105,7 +105,7 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { for _, testArg := range dTests { itArg := testArg desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval) - ginkgo.It(desc, func() { + ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "batch" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) @@ -163,7 +163,7 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { for _, testArg := range dTests { itArg := testArg desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval) - ginkgo.It(desc, func() { + ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "batch" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) @@ -210,7 +210,7 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { // Set new API QPS limit cfg.KubeAPIQPS = int32(itArg.APIQPSLimit) }) - ginkgo.It(desc, func() { + ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "batch" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) batchLag, e2eLags := runDensityBatchTest(f, rc, itArg, testInfo, true) @@ -249,7 +249,7 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { for _, testArg := range dTests { itArg := testArg desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods", itArg.podsNr, itArg.bgPodsNr) - ginkgo.It(desc, func() { + ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "sequence" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo) @@ -282,7 +282,7 @@ var _ = SIGDescribe("Density [Serial] [Slow]", func() { for _, testArg := range dTests { itArg := testArg desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark][NodeSpeicalFeature:Benchmark]", itArg.podsNr, itArg.bgPodsNr) - ginkgo.It(desc, func() { + ginkgo.It(desc, func(ctx context.Context) { itArg.createMethod = "sequence" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) batchlag, e2eLags := runDensitySeqTest(f, rc, itArg, testInfo) diff --git a/test/e2e_node/device_manager_test.go b/test/e2e_node/device_manager_test.go index 1860f927b20..9a9e5d036d3 100644 --- a/test/e2e_node/device_manager_test.go +++ b/test/e2e_node/device_manager_test.go @@ -57,7 +57,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur ginkgo.Context("With SRIOV devices in the system", func() { // this test wants to reproduce what happened in https://github.com/kubernetes/kubernetes/issues/102880 - ginkgo.It("should be able to recover V1 (aka pre-1.20) checkpoint data and reject pods before device re-registration", func() { + ginkgo.It("should be able to recover V1 (aka pre-1.20) checkpoint data and reject pods before device re-registration", func(ctx context.Context) { if sriovdevCount, err := countSRIOVDevices(); err != nil || sriovdevCount == 0 { e2eskipper.Skipf("this test is meant to run on a system with at least one configured VF from SRIOV device") } @@ -154,7 +154,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur deletePodSyncByName(f, pod.Name) }) - ginkgo.It("should be able to recover V1 (aka pre-1.20) checkpoint data and update topology info on device re-registration", func() { + ginkgo.It("should be able to recover V1 (aka pre-1.20) checkpoint data and update topology info on device re-registration", func(ctx context.Context) { if sriovdevCount, err := countSRIOVDevices(); err != nil || sriovdevCount == 0 { e2eskipper.Skipf("this test is meant to run on a system with at least one configured VF from SRIOV device") } diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 35da39f39e6..242d665f82b 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -168,7 +168,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.By("devices now unavailable on the local node") }) - ginkgo.It("Can schedule a pod that requires a device", func() { + ginkgo.It("Can schedule a pod that requires a device", func(ctx context.Context) { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60" pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" @@ -228,7 +228,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { framework.ExpectEqual(len(v1ResourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1) }) - ginkgo.It("Keeps device plugin assignments across pod and kubelet restarts", func() { + ginkgo.It("Keeps device plugin assignments across pod and kubelet restarts", func(ctx context.Context) { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60" pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" @@ -257,7 +257,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { framework.ExpectEqual(devIDRestart1, devID1) }) - ginkgo.It("Keeps device plugin assignments after the device plugin has been re-registered", func() { + ginkgo.It("Keeps device plugin assignments after the device plugin has been re-registered", func(ctx context.Context) { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep 60" pod1 := e2epod.NewPodClient(f).CreateSync(makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index dfd66b1678d..13700c7b1f1 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -560,7 +560,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe e2epod.NewPodClient(f).CreateBatch(pods) }) - ginkgo.It("should eventually evict all of the correct pods", func() { + ginkgo.It("should eventually evict all of the correct pods", func(ctx context.Context) { ginkgo.By(fmt.Sprintf("Waiting for node to have NodeCondition: %s", expectedNodeCondition)) gomega.Eventually(func() error { logFunc() diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 291c360a32b..43d3aa97523 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -187,7 +187,7 @@ func containerGCTest(f *framework.Framework, test testRun) { }, setupDuration, runtimePollInterval).Should(gomega.BeNil()) }) - ginkgo.It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func() { + ginkgo.It(fmt.Sprintf("Should eventually garbage collect containers when we exceed the number of dead containers per container"), func(ctx context.Context) { totalContainers := 0 for _, pod := range test.testPods { totalContainers += pod.numContainers*2 + 1 diff --git a/test/e2e_node/gcp/gke_environment_test.go b/test/e2e_node/gcp/gke_environment_test.go index abd9f1fc6b9..060904bd80a 100644 --- a/test/e2e_node/gcp/gke_environment_test.go +++ b/test/e2e_node/gcp/gke_environment_test.go @@ -18,6 +18,7 @@ package gcp import ( "bytes" + "context" "fmt" "os" "os/exec" @@ -111,7 +112,7 @@ var _ = SIGDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][N e2eskipper.RunIfSystemSpecNameIs("gke") }) - ginkgo.It("The required processes should be running", func() { + ginkgo.It("The required processes should be running", func(ctx context.Context) { cmdToProcessMap, err := getCmdToProcessMap() framework.ExpectNoError(err) for _, p := range []struct { @@ -125,10 +126,10 @@ var _ = SIGDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][N framework.ExpectNoError(checkProcess(p.cmd, p.ppid, cmdToProcessMap)) } }) - ginkgo.It("The iptable rules should work (required by kube-proxy)", func() { + ginkgo.It("The iptable rules should work (required by kube-proxy)", func(ctx context.Context) { framework.ExpectNoError(checkIPTables()) }) - ginkgo.It("The GCR is accessible", func() { + ginkgo.It("The GCR is accessible", func(ctx context.Context) { framework.ExpectNoError(checkPublicGCR()) }) }) diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index 1a47554961b..be51d9090a3 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -204,7 +204,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H f := framework.NewDefaultFramework("hugepages-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should remove resources for huge page sizes no longer supported", func() { + ginkgo.It("should remove resources for huge page sizes no longer supported", func(ctx context.Context) { ginkgo.By("mimicking support for 9Mi of 3Mi huge page memory by patching the node status") patch := []byte(`[{"op": "add", "path": "/status/capacity/hugepages-3Mi", "value": "9Mi"}, {"op": "add", "path": "/status/allocatable/hugepages-3Mi", "value": "9Mi"}]`) result := f.ClientSet.CoreV1().RESTClient().Patch(types.JSONPatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(context.TODO()) @@ -230,7 +230,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H }, 30*time.Second, framework.Poll).Should(gomega.Equal(false)) }) - ginkgo.It("should add resources for new huge page sizes on kubelet restart", func() { + ginkgo.It("should add resources for new huge page sizes on kubelet restart", func(ctx context.Context) { ginkgo.By("Stopping kubelet") startKubelet := stopKubelet() ginkgo.By(`Patching away support for hugepage resource "hugepages-2Mi"`) @@ -320,7 +320,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H } runHugePagesTests := func() { - ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func() { + ginkgo.It("should set correct hugetlb mount and limit under the container cgroup", func(ctx context.Context) { ginkgo.By("getting mounts for the test pod") command := []string{"mount"} out := e2epod.ExecCommandInContainer(f, testpod.Name, testpod.Spec.Containers[0].Name, command...) diff --git a/test/e2e_node/image_credential_provider.go b/test/e2e_node/image_credential_provider.go index 317c4ee3067..32904375131 100644 --- a/test/e2e_node/image_credential_provider.go +++ b/test/e2e_node/image_credential_provider.go @@ -17,6 +17,8 @@ limitations under the License. package e2enode import ( + "context" + "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" @@ -42,7 +44,7 @@ var _ = SIGDescribe("ImageCredentialProvider [Feature:KubeletCredentialProviders Testname: Test kubelet image pull with external credential provider plugins Description: Create Pod with an image from a private registry. This test assumes that the kubelet credential provider plugin is enabled for the registry hosting imageutils.AgnhostPrivate. */ - ginkgo.It("should be able to create pod with image credentials fetched from external credential provider ", func() { + ginkgo.It("should be able to create pod with image credentials fetched from external credential provider ", func(ctx context.Context) { privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) name := "pod-auth-image-" + string(uuid.NewUUID()) pod := &v1.Pod{ diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index 51f8a8b80c5..de3f3815bf0 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -37,7 +37,7 @@ var _ = SIGDescribe("ImageID [NodeFeature: ImageID]", func() { f := framework.NewDefaultFramework("image-id-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("should be set to the manifest digest (from RepoDigests) when available", func() { + ginkgo.It("should be set to the manifest digest (from RepoDigests) when available", func(ctx context.Context) { podDesc := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-with-repodigest", diff --git a/test/e2e_node/lock_contention_linux_test.go b/test/e2e_node/lock_contention_linux_test.go index 840a544be01..7c68fd578c3 100644 --- a/test/e2e_node/lock_contention_linux_test.go +++ b/test/e2e_node/lock_contention_linux_test.go @@ -20,6 +20,7 @@ limitations under the License. package e2enode import ( + "context" "time" "golang.org/x/sys/unix" @@ -38,7 +39,7 @@ const contentionLockFile = "/var/run/kubelet.lock" var _ = SIGDescribe("Lock contention [Slow] [Disruptive] [NodeSpecialFeature:LockContention]", func() { // Requires `--lock-file` & `--exit-on-lock-contention` flags to be set on the Kubelet. - ginkgo.It("Kubelet should stop when the test acquires the lock on lock file and restart once the lock is released", func() { + ginkgo.It("Kubelet should stop when the test acquires the lock on lock file and restart once the lock is released", func(ctx context.Context) { ginkgo.By("perform kubelet health check to check if kubelet is healthy and running.") // Precautionary check that kubelet is healthy before running the test. diff --git a/test/e2e_node/log_path_test.go b/test/e2e_node/log_path_test.go index 3fa4f023456..2f2aebcf1f0 100644 --- a/test/e2e_node/log_path_test.go +++ b/test/e2e_node/log_path_test.go @@ -122,7 +122,7 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { err := createAndWaitPod(makeLogPod(logPodName, logString)) framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName) }) - ginkgo.It("should print log to correct log path", func() { + ginkgo.It("should print log to correct log path", func(ctx context.Context) { logDir := kubelet.ContainerLogsDir @@ -139,7 +139,7 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logCheckPodName) }) - ginkgo.It("should print log to correct cri log path", func() { + ginkgo.It("should print log to correct cri log path", func(ctx context.Context) { logCRIDir := "/var/log/pods" diff --git a/test/e2e_node/memory_manager_test.go b/test/e2e_node/memory_manager_test.go index 9ec4bcad318..65c1928b8d0 100644 --- a/test/e2e_node/memory_manager_test.go +++ b/test/e2e_node/memory_manager_test.go @@ -378,7 +378,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager }) // TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945 - ginkgo.It("should report memory data during request to pod resources GetAllocatableResources", func() { + ginkgo.It("should report memory data during request to pod resources GetAllocatableResources", func(ctx context.Context) { endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -439,7 +439,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager } }) - ginkgo.It("should succeed to start the pod", func() { + ginkgo.It("should succeed to start the pod", func(ctx context.Context) { ginkgo.By("Running the test pod") testPod = e2epod.NewPodClient(f).CreateSync(testPod) @@ -464,7 +464,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager } }) - ginkgo.It("should succeed to start the pod", func() { + ginkgo.It("should succeed to start the pod", func(ctx context.Context) { ginkgo.By("Running the test pod") testPod = e2epod.NewPodClient(f).CreateSync(testPod) @@ -495,7 +495,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager testPod2 = makeMemoryManagerPod("memory-manager-static", initCtnParams, ctnParams) }) - ginkgo.It("should succeed to start all pods", func() { + ginkgo.It("should succeed to start all pods", func(ctx context.Context) { ginkgo.By("Running the test pod and the test pod 2") testPod = e2epod.NewPodClient(f).CreateSync(testPod) @@ -512,7 +512,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager }) // TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945 - ginkgo.It("should report memory data for each guaranteed pod and container during request to pod resources List", func() { + ginkgo.It("should report memory data for each guaranteed pod and container during request to pod resources List", func(ctx context.Context) { ginkgo.By("Running the test pod and the test pod 2") testPod = e2epod.NewPodClient(f).CreateSync(testPod) @@ -604,7 +604,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager } }) - ginkgo.It("should be rejected", func() { + ginkgo.It("should be rejected", func(ctx context.Context) { ginkgo.By("Creating the pod") testPod = e2epod.NewPodClient(f).Create(testPod) @@ -663,7 +663,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager }) // TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945 - ginkgo.It("should not report any memory data during request to pod resources GetAllocatableResources", func() { + ginkgo.It("should not report any memory data during request to pod resources GetAllocatableResources", func(ctx context.Context) { endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -678,7 +678,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager }) // TODO: move the test to pod resource API test suite, see - https://github.com/kubernetes/kubernetes/issues/101945 - ginkgo.It("should not report any memory data during request to pod resources List", func() { + ginkgo.It("should not report any memory data during request to pod resources List", func(ctx context.Context) { testPod = e2epod.NewPodClient(f).CreateSync(testPod) endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) @@ -702,7 +702,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager } }) - ginkgo.It("should succeed to start the pod", func() { + ginkgo.It("should succeed to start the pod", func(ctx context.Context) { testPod = e2epod.NewPodClient(f).CreateSync(testPod) // it no taste to verify NUMA pinning when the node has only one NUMA node diff --git a/test/e2e_node/mirror_pod_grace_period_test.go b/test/e2e_node/mirror_pod_grace_period_test.go index f31c1525b9a..df86c288ae4 100644 --- a/test/e2e_node/mirror_pod_grace_period_test.go +++ b/test/e2e_node/mirror_pod_grace_period_test.go @@ -56,7 +56,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) - ginkgo.It("mirror pod termination should satisfy grace period when static pod is deleted [NodeConformance]", func() { + ginkgo.It("mirror pod termination should satisfy grace period when static pod is deleted [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -74,7 +74,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { }, 19*time.Second, 200*time.Millisecond).Should(gomega.BeNil()) }) - ginkgo.It("mirror pod termination should satisfy grace period when static pod is updated [NodeConformance]", func() { + ginkgo.It("mirror pod termination should satisfy grace period when static pod is updated [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -102,7 +102,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { framework.ExpectEqual(pod.Spec.Containers[0].Image, image) }) - ginkgo.It("should update a static pod when the static pod is updated multiple times during the graceful termination period [NodeConformance]", func() { + ginkgo.It("should update a static pod when the static pod is updated multiple times during the graceful termination period [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index 3f7fb32690e..7f7b6a977b3 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -69,7 +69,7 @@ var _ = SIGDescribe("MirrorPod", func() { Testname: Mirror Pod, update Description: Updating a static Pod MUST recreate an updated mirror Pod. Create a static pod, verify that a mirror pod is created. Update the static pod by changing the container image, the mirror pod MUST be re-created and updated with the new image. */ - ginkgo.It("should be updated when static pod updated [NodeConformance]", func() { + ginkgo.It("should be updated when static pod updated [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -96,7 +96,7 @@ var _ = SIGDescribe("MirrorPod", func() { Testname: Mirror Pod, delete Description: When a mirror-Pod is deleted then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod, the mirror pod MUST be re-created and running. */ - ginkgo.It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() { + ginkgo.It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -116,7 +116,7 @@ var _ = SIGDescribe("MirrorPod", func() { Testname: Mirror Pod, force delete Description: When a mirror-Pod is deleted, forcibly, then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod with delete wait time set to zero forcing immediate deletion, the mirror pod MUST be re-created and running. */ - ginkgo.It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() { + ginkgo.It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -151,7 +151,7 @@ var _ = SIGDescribe("MirrorPod", func() { Testname: Mirror Pod, recreate Description: When a static pod's manifest is removed and readded, the mirror pod MUST successfully recreate. Create the static pod, verify it is running, remove its manifest and then add it back, and verify the static pod runs again. */ - ginkgo.It("should successfully recreate when file is removed and recreated [NodeConformance]", func() { + ginkgo.It("should successfully recreate when file is removed and recreated [NodeConformance]", func(ctx context.Context) { ns = f.Namespace.Name staticPodName = "static-pod-" + string(uuid.NewUUID()) mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index 585c294db6d..114404e5e52 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -67,7 +67,7 @@ var _ = SIGDescribe("Node Container Manager [Serial]", func() { f := framework.NewDefaultFramework("node-container-manager") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() { - ginkgo.It("sets up the node and runs the test", func() { + ginkgo.It("sets up the node and runs the test", func(ctx context.Context) { framework.ExpectNoError(runTest(f)) }) }) diff --git a/test/e2e_node/node_perf_test.go b/test/e2e_node/node_perf_test.go index d10bfd9113f..4376ead030b 100644 --- a/test/e2e_node/node_perf_test.go +++ b/test/e2e_node/node_perf_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "time" @@ -171,7 +172,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { ginkgo.BeforeEach(func() { wl = workloads.NodePerfWorkloads[0] }) - ginkgo.It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func() { + ginkgo.It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func(ctx context.Context) { defer cleanup() runWorkload() }) @@ -180,7 +181,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { ginkgo.BeforeEach(func() { wl = workloads.NodePerfWorkloads[1] }) - ginkgo.It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func() { + ginkgo.It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func(ctx context.Context) { defer cleanup() runWorkload() }) @@ -189,7 +190,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { ginkgo.BeforeEach(func() { wl = workloads.NodePerfWorkloads[2] }) - ginkgo.It("TensorFlow workload", func() { + ginkgo.It("TensorFlow workload", func(ctx context.Context) { defer cleanup() runWorkload() }) diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 7589c5a1ae2..f6ab45c7b9d 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -301,7 +301,7 @@ current-context: local-context hostLogFile = "/var/lib/kubelet/pods/" + string(pod.UID) + "/volumes/kubernetes.io~empty-dir" + logFile }) - ginkgo.It("should generate node condition and events for corresponding errors", func() { + ginkgo.It("should generate node condition and events for corresponding errors", func(ctx context.Context) { for _, test := range []struct { description string timestamp time.Time diff --git a/test/e2e_node/node_shutdown_linux_test.go b/test/e2e_node/node_shutdown_linux_test.go index 22b4d57265b..eacdc0b861d 100644 --- a/test/e2e_node/node_shutdown_linux_test.go +++ b/test/e2e_node/node_shutdown_linux_test.go @@ -85,7 +85,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut framework.ExpectNoError(err) }) - ginkgo.It("should add the DisruptionTarget pod failure condition to the evicted pods", func() { + ginkgo.It("should add the DisruptionTarget pod failure condition to the evicted pods", func(ctx context.Context) { nodeName := getNodeName(f) nodeSelector := fields.Set{ "spec.nodeName": nodeName, @@ -188,7 +188,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut framework.ExpectNoError(err) }) - ginkgo.It("should be able to gracefully shutdown pods with various grace periods", func() { + ginkgo.It("should be able to gracefully shutdown pods with various grace periods", func(ctx context.Context) { nodeName := getNodeName(f) nodeSelector := fields.Set{ "spec.nodeName": nodeName, @@ -302,7 +302,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut }) - ginkgo.It("should be able to handle a cancelled shutdown", func() { + ginkgo.It("should be able to handle a cancelled shutdown", func(ctx context.Context) { ginkgo.By("Emitting Shutdown signal") err := emitSignalPrepareForShutdown(true) framework.ExpectNoError(err) @@ -326,7 +326,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut }, nodeStatusUpdateTimeout, pollInterval).Should(gomega.BeNil()) }) - ginkgo.It("after restart dbus, should be able to gracefully shutdown", func() { + ginkgo.It("after restart dbus, should be able to gracefully shutdown", func(ctx context.Context) { // allows manual restart of dbus to work in Ubuntu. err := overlayDbusConfig() framework.ExpectNoError(err) @@ -427,7 +427,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut framework.ExpectNoError(err) }) - ginkgo.It("should be able to gracefully shutdown pods with various grace periods", func() { + ginkgo.It("should be able to gracefully shutdown pods with various grace periods", func(ctx context.Context) { nodeName := getNodeName(f) nodeSelector := fields.Set{ "spec.nodeName": nodeName, diff --git a/test/e2e_node/os_label_rename_test.go b/test/e2e_node/os_label_rename_test.go index e63a75895e6..a1eaf2a4f42 100644 --- a/test/e2e_node/os_label_rename_test.go +++ b/test/e2e_node/os_label_rename_test.go @@ -42,7 +42,7 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu f := framework.NewDefaultFramework("node-label-reconciliation") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("Kubelet", func() { - ginkgo.It("should reconcile the OS and Arch labels when restarted", func() { + ginkgo.It("should reconcile the OS and Arch labels when restarted", func(ctx context.Context) { node := getLocalNode(f) e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH) @@ -63,7 +63,7 @@ var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", fu err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute) framework.ExpectNoError(err) }) - ginkgo.It("should reconcile the OS and Arch labels when running", func() { + ginkgo.It("should reconcile the OS and Arch labels when running", func(ctx context.Context) { node := getLocalNode(f) e2enode.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS) diff --git a/test/e2e_node/pids_test.go b/test/e2e_node/pids_test.go index b8dd2a05097..a59d772aeb2 100644 --- a/test/e2e_node/pids_test.go +++ b/test/e2e_node/pids_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -87,7 +88,7 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *v1.Pod { } func runPodPidsLimitTests(f *framework.Framework) { - ginkgo.It("should set pids.max for Pod", func() { + ginkgo.It("should set pids.max for Pod", func(ctx context.Context) { ginkgo.By("by creating a G pod") pod := e2epod.NewPodClient(f).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e_node/pod_hostnamefqdn_test.go b/test/e2e_node/pod_hostnamefqdn_test.go index d80a96641ca..d79be35b70a 100644 --- a/test/e2e_node/pod_hostnamefqdn_test.go +++ b/test/e2e_node/pod_hostnamefqdn_test.go @@ -21,6 +21,7 @@ limitations under the License. package e2enode import ( + "context" "crypto/rand" "fmt" "math/big" @@ -79,7 +80,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { Testname: Create Pod without fully qualified domain name (FQDN) Description: A Pod that does not define the subdomain field in it spec, does not have FQDN. */ - ginkgo.It("a pod without subdomain field does not have FQDN", func() { + ginkgo.It("a pod without subdomain field does not have FQDN", func(ctx context.Context) { pod := testPod("hostfqdn") pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"} output := []string{fmt.Sprintf("%s;%s;", pod.ObjectMeta.Name, pod.ObjectMeta.Name)} @@ -93,7 +94,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { Description: A Pod that does not define the subdomain field in it spec, does not have FQDN. Hence, setHostnameAsFQDN field has no effect. */ - ginkgo.It("a pod without FQDN is not affected by SetHostnameAsFQDN field", func() { + ginkgo.It("a pod without FQDN is not affected by SetHostnameAsFQDN field", func(ctx context.Context) { pod := testPod("hostfqdn") // Setting setHostnameAsFQDN field to true should have no effect. setHostnameAsFQDN := true @@ -110,7 +111,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { Description: A Pod that defines the subdomain field in it spec has FQDN. hostname command returns shortname (pod name in this case), and hostname -f returns FQDN. */ - ginkgo.It("a pod with subdomain field has FQDN, hostname is shortname", func() { + ginkgo.It("a pod with subdomain field has FQDN, hostname is shortname", func(ctx context.Context) { pod := testPod("hostfqdn") pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"} subdomain := "t" @@ -129,7 +130,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { Description: A Pod that defines the subdomain field in it spec has FQDN. When setHostnameAsFQDN: true, the hostname is set to be the FQDN. In this case, both commands hostname and hostname -f return the FQDN of the Pod. */ - ginkgo.It("a pod with subdomain field has FQDN, when setHostnameAsFQDN is set to true, the FQDN is set as hostname", func() { + ginkgo.It("a pod with subdomain field has FQDN, when setHostnameAsFQDN is set to true, the FQDN is set as hostname", func(ctx context.Context) { pod := testPod("hostfqdn") pod.Spec.Containers[0].Command = []string{"sh", "-c", "echo $(hostname)';'$(hostname -f)';'"} subdomain := "t" diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index 40eb55ad715..ea49561aba2 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -588,7 +588,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P initialConfig.ReservedSystemCPUs = cpus }) - ginkgo.It("should return the expected responses", func() { + ginkgo.It("should return the expected responses", func(ctx context.Context) { onlineCPUs, err := getOnlineCPUs() framework.ExpectNoError(err) @@ -616,7 +616,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P }) ginkgo.Context("with CPU manager None policy", func() { - ginkgo.It("should return the expected responses", func() { + ginkgo.It("should return the expected responses", func(ctx context.Context) { // current default is "none" policy - no need to restart the kubelet requireSRIOVDevices() @@ -672,7 +672,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P initialConfig.ReservedSystemCPUs = cpus }) - ginkgo.It("should return the expected responses", func() { + ginkgo.It("should return the expected responses", func(ctx context.Context) { onlineCPUs, err := getOnlineCPUs() framework.ExpectNoError(err) @@ -690,7 +690,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P }) ginkgo.Context("with CPU manager None policy", func() { - ginkgo.It("should return the expected responses", func() { + ginkgo.It("should return the expected responses", func(ctx context.Context) { endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -712,7 +712,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P initialConfig.FeatureGates[string(kubefeatures.KubeletPodResourcesGetAllocatable)] = false }) - ginkgo.It("should return the expected error with the feature gate disabled", func() { + ginkgo.It("should return the expected error with the feature gate disabled", func(ctx context.Context) { endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) framework.ExpectNoError(err) @@ -760,7 +760,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P initialConfig.ReservedSystemCPUs = cpus }) - ginkgo.It("should return proper podresources the same as before the restart of kubelet", func() { + ginkgo.It("should return proper podresources the same as before the restart of kubelet", func(ctx context.Context) { dpPod := setupKubeVirtDevicePluginOrFail(f) defer teardownKubeVirtDevicePluginOrFail(f, dpPod) @@ -842,7 +842,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P framework.ExpectNoError(err) }) - ginkgo.It("should report the values for the podresources metrics", func() { + ginkgo.It("should report the values for the podresources metrics", func(ctx context.Context) { // we updated the kubelet config in BeforeEach, so we can assume we start fresh. // being [Serial], we can also assume noone else but us is running pods. ginkgo.By("Checking the value of the podresources metrics") diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index 9e98117c8d7..f4f3315bc8a 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -170,7 +170,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { ginkgo.Describe("QOS containers", func() { ginkgo.Context("On enabling QOS cgroup hierarchy", func() { - ginkgo.It("Top level QoS containers should have been created [NodeConformance]", func() { + ginkgo.It("Top level QoS containers should have been created [NodeConformance]", func(ctx context.Context) { if !framework.TestContext.KubeletConfig.CgroupsPerQOS { return } @@ -185,7 +185,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { ginkgo.Describe("Pod containers [NodeConformance]", func() { ginkgo.Context("On scheduling a Guaranteed Pod", func() { - ginkgo.It("Pod containers should have been created under the cgroup-root", func() { + ginkgo.It("Pod containers should have been created under the cgroup-root", func(ctx context.Context) { if !framework.TestContext.KubeletConfig.CgroupsPerQOS { return } @@ -230,7 +230,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { }) }) ginkgo.Context("On scheduling a BestEffort Pod", func() { - ginkgo.It("Pod containers should have been created under the BestEffort cgroup", func() { + ginkgo.It("Pod containers should have been created under the BestEffort cgroup", func(ctx context.Context) { if !framework.TestContext.KubeletConfig.CgroupsPerQOS { return } @@ -275,7 +275,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { }) }) ginkgo.Context("On scheduling a Burstable Pod", func() { - ginkgo.It("Pod containers should have been created under the Burstable cgroup", func() { + ginkgo.It("Pod containers should have been created under the Burstable cgroup", func(ctx context.Context) { if !framework.TestContext.KubeletConfig.CgroupsPerQOS { return } diff --git a/test/e2e_node/resource_metrics_test.go b/test/e2e_node/resource_metrics_test.go index bac859738f3..271fa3700e3 100644 --- a/test/e2e_node/resource_metrics_test.go +++ b/test/e2e_node/resource_metrics_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "time" @@ -66,7 +67,7 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() { ginkgo.By("Waiting 15 seconds for cAdvisor to collect 2 stats points") time.Sleep(15 * time.Second) }) - ginkgo.It("should report resource usage through the resource metrics api", func() { + ginkgo.It("should report resource usage through the resource metrics api", func(ctx context.Context) { ginkgo.By("Fetching node so we can match against an appropriate memory limit") node := getLocalNode(f) memoryCapacity := node.Status.Capacity["memory"] diff --git a/test/e2e_node/resource_usage_test.go b/test/e2e_node/resource_usage_test.go index 5eac30e5b11..3ffaf01a89b 100644 --- a/test/e2e_node/resource_usage_test.go +++ b/test/e2e_node/resource_usage_test.go @@ -20,6 +20,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "strings" "time" @@ -86,7 +87,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() { for _, testArg := range rTests { itArg := testArg desc := fmt.Sprintf("resource tracking for %d pods per node", itArg.podsNr) - ginkgo.It(desc, func() { + ginkgo.It(desc, func(ctx context.Context) { testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) runResourceUsageTest(f, rc, itArg) @@ -116,7 +117,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() { for _, testArg := range rTests { itArg := testArg desc := fmt.Sprintf("resource tracking for %d pods per node [Benchmark]", itArg.podsNr) - ginkgo.It(desc, func() { + ginkgo.It(desc, func(ctx context.Context) { testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) runResourceUsageTest(f, rc, itArg) diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index cc2bb767992..fea650fe3c0 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -88,7 +88,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Context("Container Runtime", func() { ginkgo.Context("Network", func() { - ginkgo.It("should recover from ip leak", func() { + ginkgo.It("should recover from ip leak", func(ctx context.Context) { pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test") ginkgo.By(fmt.Sprintf("Trying to create %d pods on node", len(pods))) createBatchPodWithRateControl(f, pods, podCreationInterval) @@ -144,7 +144,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { }) ginkgo.Context("Dbus", func() { - ginkgo.It("should continue to run pods after a restart", func() { + ginkgo.It("should continue to run pods after a restart", func(ctx context.Context) { // Allow dbus to be restarted on ubuntu err := overlayDbusConfig() framework.ExpectNoError(err) @@ -198,7 +198,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { }) ginkgo.Context("Kubelet", func() { - ginkgo.It("should correctly account for terminated pods after restart", func() { + ginkgo.It("should correctly account for terminated pods after restart", func(ctx context.Context) { node := getLocalNode(f) cpus := node.Status.Allocatable[v1.ResourceCPU] numCpus := int((&cpus).Value()) diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 2c7778ccf13..6157435fe95 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "os" "path/filepath" @@ -67,7 +68,7 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() { }, } { testCase := testCase - ginkgo.It(testCase.description+" [NodeConformance]", func() { + ginkgo.It(testCase.description+" [NodeConformance]", func(ctx context.Context) { name := "image-pull-test" command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"} container := node.ConformanceContainer{ diff --git a/test/e2e_node/runtimeclass_test.go b/test/e2e_node/runtimeclass_test.go index a64260d53df..2cdb85c942b 100644 --- a/test/e2e_node/runtimeclass_test.go +++ b/test/e2e_node/runtimeclass_test.go @@ -95,7 +95,7 @@ var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Describe("PodOverhead cgroup accounting", func() { ginkgo.Context("On running pod with PodOverhead defined", func() { - ginkgo.It("Pod cgroup should be sum of overhead and resource limits", func() { + ginkgo.It("Pod cgroup should be sum of overhead and resource limits", func(ctx context.Context) { if !framework.TestContext.KubeletConfig.CgroupsPerQOS { return } diff --git a/test/e2e_node/seccompdefault_test.go b/test/e2e_node/seccompdefault_test.go index 7d61ff41e71..85f2ae86718 100644 --- a/test/e2e_node/seccompdefault_test.go +++ b/test/e2e_node/seccompdefault_test.go @@ -20,6 +20,8 @@ limitations under the License. package e2enode import ( + "context" + "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" @@ -59,12 +61,12 @@ var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly } } - ginkgo.It("should use the default seccomp profile when unspecified", func() { + ginkgo.It("should use the default seccomp profile when unspecified", func(ctx context.Context) { pod := newPod(nil) e2eoutput.TestContainerOutput(f, "SeccompDefault", pod, 0, []string{"2"}) }) - ginkgo.It("should use unconfined when specified", func() { + ginkgo.It("should use unconfined when specified", func(ctx context.Context) { pod := newPod(&v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}}) e2eoutput.TestContainerOutput(f, "SeccompDefault-unconfined", pod, 0, []string{"0"}) }) diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index 4d691f49254..5e8afbe1427 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "net" "os/exec" @@ -43,7 +44,7 @@ var _ = SIGDescribe("Security Context", func() { }) ginkgo.Context("[NodeConformance][LinuxOnly] Container PID namespace sharing", func() { - ginkgo.It("containers in pods using isolated PID namespaces should all receive PID 1", func() { + ginkgo.It("containers in pods using isolated PID namespaces should all receive PID 1", func(ctx context.Context) { ginkgo.By("Create a pod with isolated PID namespaces.") e2epod.NewPodClient(f).CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "isolated-pid-ns-test-pod"}, @@ -72,7 +73,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("processes in containers sharing a pod namespace should be able to see each other", func() { + ginkgo.It("processes in containers sharing a pod namespace should be able to see each other", func(ctx context.Context) { ginkgo.By("Create a pod with shared PID namespace.") e2epod.NewPodClient(f).CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"}, @@ -146,7 +147,7 @@ var _ = SIGDescribe("Security Context", func() { nginxPid = strings.TrimSpace(output) }) - ginkgo.It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() { + ginkgo.It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func(ctx context.Context) { busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(busyboxPodName, true) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -166,7 +167,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() { + ginkgo.It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func(ctx context.Context) { busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(busyboxPodName, false) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -222,7 +223,7 @@ var _ = SIGDescribe("Security Context", func() { framework.Logf("Got host shared memory ID %q", hostSharedMemoryID) }) - ginkgo.It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() { + ginkgo.It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func(ctx context.Context) { ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ipcutilsPodName, true) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -237,7 +238,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() { + ginkgo.It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func(ctx context.Context) { ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ipcutilsPodName, false) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -305,7 +306,7 @@ var _ = SIGDescribe("Security Context", func() { framework.Logf("Opened a new tcp port %q", listeningPort) }) - ginkgo.It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() { + ginkgo.It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func(ctx context.Context) { busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(busyboxPodName, true) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -319,7 +320,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() { + ginkgo.It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func(ctx context.Context) { busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(busyboxPodName, false) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) diff --git a/test/e2e_node/summary_test.go b/test/e2e_node/summary_test.go index 411bce3d982..e4113ae032e 100644 --- a/test/e2e_node/summary_test.go +++ b/test/e2e_node/summary_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "os" "strings" @@ -53,7 +54,7 @@ var _ = SIGDescribe("Summary API [NodeConformance]", func() { ginkgo.By("Recording processes in system cgroups") recordSystemCgroupProcesses() }) - ginkgo.It("should report resource usage through the stats api", func() { + ginkgo.It("should report resource usage through the stats api", func(ctx context.Context) { const pod0 = "stats-busybox-0" const pod1 = "stats-busybox-1" diff --git a/test/e2e_node/system_node_critical_test.go b/test/e2e_node/system_node_critical_test.go index f388e10ed99..7141a24a945 100644 --- a/test/e2e_node/system_node_critical_test.go +++ b/test/e2e_node/system_node_critical_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "os" "time" @@ -81,7 +82,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFea }, time.Minute, time.Second*2).Should(gomega.BeNil()) }) - ginkgo.It("should not be evicted upon DiskPressure", func() { + ginkgo.It("should not be evicted upon DiskPressure", func(ctx context.Context) { ginkgo.By("wait for the node to have DiskPressure condition") gomega.Eventually(func() error { if hasNodeCondition(f, v1.NodeDiskPressure) { diff --git a/test/e2e_node/topology_manager_test.go b/test/e2e_node/topology_manager_test.go index 8a9bed7fd1a..2cb9b88b15b 100644 --- a/test/e2e_node/topology_manager_test.go +++ b/test/e2e_node/topology_manager_test.go @@ -884,7 +884,7 @@ func runTopologyManagerTests(f *framework.Framework) { topologymanager.PolicyNone, } - ginkgo.It("run Topology Manager policy test suite", func() { + ginkgo.It("run Topology Manager policy test suite", func(ctx context.Context) { oldCfg, err = getCurrentKubeletConfig() framework.ExpectNoError(err) @@ -901,7 +901,7 @@ func runTopologyManagerTests(f *framework.Framework) { } }) - ginkgo.It("run Topology Manager node alignment test suite", func() { + ginkgo.It("run Topology Manager node alignment test suite", func(ctx context.Context) { numaNodes, coreCount := hostPrecheck() configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile) @@ -925,7 +925,7 @@ func runTopologyManagerTests(f *framework.Framework) { } }) - ginkgo.It("run the Topology Manager pod scope alignment test suite", func() { + ginkgo.It("run the Topology Manager pod scope alignment test suite", func(ctx context.Context) { numaNodes, coreCount := hostPrecheck() configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile) diff --git a/test/e2e_node/volume_manager_test.go b/test/e2e_node/volume_manager_test.go index 21afa80118b..ce350afad34 100644 --- a/test/e2e_node/volume_manager_test.go +++ b/test/e2e_node/volume_manager_test.go @@ -37,7 +37,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.Describe("Volume Manager", func() { ginkgo.Context("On termination of pod with memory backed volume", func() { - ginkgo.It("should remove the volume from the node [NodeConformance]", func() { + ginkgo.It("should remove the volume from the node [NodeConformance]", func(ctx context.Context) { var ( memoryBackedPod *v1.Pod volumeName string