Merge pull request #114401 from pohly/e2e-ginkgo-timeouts-callbacks

e2e: accept context from Ginkgo
This commit is contained in:
Kubernetes Prow Robot 2022-12-11 09:07:14 -08:00 committed by GitHub
commit f256321e76
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
293 changed files with 1564 additions and 1485 deletions

View File

@ -30,7 +30,7 @@ import (
)
const (
//e.g. framework.ConformanceIt("should provide secure master service ", func() {
//e.g. framework.ConformanceIt("should provide secure master service ", func(ctx context.Context) {
patternStartConformance = `framework.ConformanceIt\(.*, func\(\) {$`
patternEndConformance = `}\)$`
patternSkip = `e2eskipper.Skip.*\(`

View File

@ -19,7 +19,7 @@ Example:
Testname: Kubelet, log output, default
Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs.
*/
framework.ConformanceIt("should print the output to logs [NodeConformance]", func() {
framework.ConformanceIt("should print the output to logs [NodeConformance]", func(ctx context.Context) {
```
would generate the following documentation for the test. Note that the "TestName" from the Documentation above will

View File

@ -63,7 +63,7 @@ import (
)
var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
/* ... */
ginkgo.It("should sign the new added bootstrap tokens", func() {
ginkgo.It("should sign the new added bootstrap tokens", func(ctx context.Context) {
/* ... */
})
/* etc */

View File

@ -83,7 +83,7 @@ var _ = SIGDescribe("Aggregator", func() {
Description: Ensure that the sample-apiserver code from 1.17 and compiled against 1.17
will work on the current Aggregator/API-Server.
*/
framework.ConformanceIt("Should be able to support the 1.17 Sample API Server using the current Aggregator", func() {
framework.ConformanceIt("Should be able to support the 1.17 Sample API Server using the current Aggregator", func(ctx context.Context) {
// Testing a 1.17 version of the sample-apiserver
TestSampleAPIServer(f, aggrclient, imageutils.GetE2EImage(imageutils.APIServer))
})

View File

@ -81,7 +81,7 @@ var _ = SIGDescribe("kube-apiserver identity [Feature:APIServerIdentity]", func(
f := framework.NewDefaultFramework("kube-apiserver-identity")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.It("kube-apiserver identity should persist after restart [Disruptive]", func() {
ginkgo.It("kube-apiserver identity should persist after restart [Disruptive]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce")
client := f.ClientSet

View File

@ -68,7 +68,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Testname: Server Side Apply, Create
Description: Apply an object. An apply on an object that does not exist MUST create the object.
*/
ginkgo.It("should create an applied object if it does not already exist", func() {
ginkgo.It("should create an applied object if it does not already exist", func(ctx context.Context) {
testCases := []struct {
resource string
name string
@ -162,7 +162,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Testname: Server Side Apply, Subresource
Description: Apply a resource and issue a subsequent apply on a subresource. The subresource MUST be updated with the applied object contents.
*/
ginkgo.It("should work for subresources", func() {
ginkgo.It("should work for subresources", func(ctx context.Context) {
{
testCases := []struct {
resource string
@ -270,7 +270,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Testname: Server Side Apply, unset field
Description: Apply an object. Issue a subsequent apply that removes a field. The particular field MUST be removed.
*/
ginkgo.It("should remove a field if it is owned but removed in the apply request", func() {
ginkgo.It("should remove a field if it is owned but removed in the apply request", func(ctx context.Context) {
obj := []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
@ -373,7 +373,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Testname: Server Side Apply, unset field shared
Description: Apply an object. Unset ownership of a field that is also owned by other managers and make a subsequent apply request. The unset field MUST not be removed from the object.
*/
ginkgo.It("should not remove a field if an owner unsets the field but other managers still have ownership of the field", func() {
ginkgo.It("should not remove a field if an owner unsets the field but other managers still have ownership of the field", func(ctx context.Context) {
// spec.replicas is a optional, defaulted field
// spec.template.spec.hostname is an optional, non-defaulted field
apply := []byte(`{
@ -482,7 +482,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Testname: Server Side Apply, Force Apply
Description: Apply an object. Force apply a modified version of the object such that a conflict will exist in the managed fields. The force apply MUST successfully update the object.
*/
ginkgo.It("should ignore conflict errors if force apply is used", func() {
ginkgo.It("should ignore conflict errors if force apply is used", func(ctx context.Context) {
obj := []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
@ -569,7 +569,7 @@ var _ = SIGDescribe("ServerSideApply", func() {
Testname: Server Side Apply, CRD
Description: Create a CRD and apply a CRD resource. Subsequent apply requests that do not conflict with the previous ones should update the object. Apply requests that cause conflicts should fail.
*/
ginkgo.It("should work for CRDs", func() {
ginkgo.It("should work for CRDs", func(ctx context.Context) {
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("%s", err)
@ -967,7 +967,7 @@ spec:
Testname: Server Side Apply, Update take ownership
Description: Apply an object. Send an Update request which should take ownership of a field. The field should be owned by the new manager and a subsequent apply from the original manager MUST not change the field it does not have ownership of.
*/
ginkgo.It("should give up ownership of a field if forced applied by a controller", func() {
ginkgo.It("should give up ownership of a field if forced applied by a controller", func(ctx context.Context) {
// Applier creates a deployment with replicas set to 3
apply := []byte(`{
"apiVersion": "apps/v1",

View File

@ -76,7 +76,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
})
})
ginkgo.It("should return chunks of results for list calls", func() {
ginkgo.It("should return chunks of results for list calls", func(ctx context.Context) {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
@ -123,7 +123,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources))
})
ginkgo.It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow]", func() {
ginkgo.It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow]", func(ctx context.Context) {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)

View File

@ -146,7 +146,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
Description: Register a conversion webhook and a custom resource definition. Create a v1 custom
resource. Attempts to read it at v2 MUST succeed.
*/
framework.ConformanceIt("should be able to convert from CR v1 to CR v2", func() {
framework.ConformanceIt("should be able to convert from CR v1 to CR v2", func(ctx context.Context) {
testcrd, err := crd.CreateMultiVersionTestCRD(f, "stable.example.com", func(crd *apiextensionsv1.CustomResourceDefinition) {
crd.Spec.Versions = apiVersions
crd.Spec.Conversion = &apiextensionsv1.CustomResourceConversion{
@ -181,7 +181,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
v1. Change the custom resource definition storage to v2. Create a custom resource stored at v2. Attempt to list
the custom resources at v2; the list result MUST contain both custom resources at v2.
*/
framework.ConformanceIt("should be able to convert a non homogeneous list of CRs", func() {
framework.ConformanceIt("should be able to convert a non homogeneous list of CRs", func(ctx context.Context) {
testcrd, err := crd.CreateMultiVersionTestCRD(f, "stable.example.com", func(crd *apiextensionsv1.CustomResourceDefinition) {
crd.Spec.Versions = apiVersions
crd.Spec.Conversion = &apiextensionsv1.CustomResourceConversion{

View File

@ -66,7 +66,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
explain the nested custom resource properties.
All validation should be the same.
*/
framework.ConformanceIt("works for CRD with validation schema", func() {
framework.ConformanceIt("works for CRD with validation schema", func(ctx context.Context) {
crd, err := setupCRD(f, schemaFoo, "foo", "v1")
if err != nil {
framework.Failf("%v", err)
@ -150,7 +150,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
Attempt to create and apply a change a custom resource, via kubectl; kubectl validation MUST accept unknown
properties. Attempt kubectl explain; the output MUST contain a valid DESCRIPTION stanza.
*/
framework.ConformanceIt("works for CRD without validation schema", func() {
framework.ConformanceIt("works for CRD without validation schema", func(ctx context.Context) {
crd, err := setupCRD(f, nil, "empty", "v1")
if err != nil {
framework.Failf("%v", err)
@ -191,7 +191,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
Attempt to create and apply a change a custom resource, via kubectl; kubectl validation MUST accept unknown
properties. Attempt kubectl explain; the output MUST show the custom resource KIND.
*/
framework.ConformanceIt("works for CRD preserving unknown fields at the schema root", func() {
framework.ConformanceIt("works for CRD preserving unknown fields at the schema root", func(ctx context.Context) {
crd, err := setupCRDAndVerifySchema(f, schemaPreserveRoot, nil, "unknown-at-root", "v1")
if err != nil {
framework.Failf("%v", err)
@ -233,7 +233,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
properties. Attempt kubectl explain; the output MUST show that x-preserve-unknown-properties is used on the
nested field.
*/
framework.ConformanceIt("works for CRD preserving unknown fields in an embedded object", func() {
framework.ConformanceIt("works for CRD preserving unknown fields in an embedded object", func(ctx context.Context) {
crd, err := setupCRDAndVerifySchema(f, schemaPreserveNested, nil, "unknown-in-nested", "v1")
if err != nil {
framework.Failf("%v", err)
@ -273,7 +273,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
Description: Register multiple custom resource definitions spanning different groups and versions;
OpenAPI definitions MUST be published for custom resource definitions.
*/
framework.ConformanceIt("works for multiple CRDs of different groups", func() {
framework.ConformanceIt("works for multiple CRDs of different groups", func(ctx context.Context) {
ginkgo.By("CRs in different groups (two CRDs) show up in OpenAPI documentation")
crdFoo, err := setupCRD(f, schemaFoo, "foo", "v1")
if err != nil {
@ -306,7 +306,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
Description: Register a custom resource definition with multiple versions; OpenAPI definitions MUST be published
for custom resource definitions.
*/
framework.ConformanceIt("works for multiple CRDs of same group but different versions", func() {
framework.ConformanceIt("works for multiple CRDs of same group but different versions", func(ctx context.Context) {
ginkgo.By("CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation")
crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3")
if err != nil {
@ -354,7 +354,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
Description: Register multiple custom resource definitions in the same group and version but spanning different kinds;
OpenAPI definitions MUST be published for custom resource definitions.
*/
framework.ConformanceIt("works for multiple CRDs of same group and version but different kinds", func() {
framework.ConformanceIt("works for multiple CRDs of same group and version but different kinds", func(ctx context.Context) {
ginkgo.By("CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation")
crdFoo, err := setupCRD(f, schemaFoo, "common-group", "v6")
if err != nil {
@ -388,7 +388,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
for custom resource definitions. Rename one of the versions of the custom resource definition via a patch;
OpenAPI definitions MUST update to reflect the rename.
*/
framework.ConformanceIt("updates the published spec when one version gets renamed", func() {
framework.ConformanceIt("updates the published spec when one version gets renamed", func(ctx context.Context) {
ginkgo.By("set up a multi version CRD")
crdMultiVer, err := setupCRD(f, schemaFoo, "multi-ver", "v2", "v3")
if err != nil {
@ -439,7 +439,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
for custom resource definitions. Update the custom resource definition to not serve one of the versions. OpenAPI
definitions MUST be updated to not contain the version that is no longer served.
*/
framework.ConformanceIt("removes definition from spec when one version gets changed to not be served", func() {
framework.ConformanceIt("removes definition from spec when one version gets changed to not be served", func(ctx context.Context) {
ginkgo.By("set up a multi version CRD")
crd, err := setupCRD(f, schemaFoo, "multi-to-single-ver", "v5", "v6alpha1")
if err != nil {
@ -479,7 +479,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
})
// Marked as flaky until https://github.com/kubernetes/kubernetes/issues/65517 is solved.
ginkgo.It("[Flaky] kubectl explain works for CR with the same resource name as built-in object.", func() {
ginkgo.It("[Flaky] kubectl explain works for CR with the same resource name as built-in object.", func(ctx context.Context) {
customServiceShortName := fmt.Sprintf("ksvc-%d", time.Now().Unix()) // make short name unique
opt := func(crd *apiextensionsv1.CustomResourceDefinition) {
crd.ObjectMeta = metav1.ObjectMeta{Name: "services." + crd.Spec.Group}

View File

@ -94,7 +94,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
}
}
}`))
ginkgo.It("MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validations rules", func() {
ginkgo.It("MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validations rules", func(ctx context.Context) {
ginkgo.By("Creating a custom resource definition with validation rules")
crd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false)
crd, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
@ -124,7 +124,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
}}, metav1.CreateOptions{})
framework.ExpectNoError(err, "validation rules satisfied")
})
ginkgo.It("MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validations rules", func() {
ginkgo.It("MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validations rules", func(ctx context.Context) {
ginkgo.By("Creating a custom resource definition with validation rules")
crd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false)
crd, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
@ -156,7 +156,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
}
})
ginkgo.It("MUST fail create of a custom resource definition that contains a x-kubernetes-validations rule that refers to a property that do not exist", func() {
ginkgo.It("MUST fail create of a custom resource definition that contains a x-kubernetes-validations rule that refers to a property that do not exist", func(ctx context.Context) {
ginkgo.By("Defining a custom resource definition with a validation rule that refers to a property that do not exist")
var schemaWithInvalidValidationRule = unmarshallSchema([]byte(`{
"type":"object",
@ -181,7 +181,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
}
})
ginkgo.It("MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that contains a syntax error", func() {
ginkgo.It("MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that contains a syntax error", func(ctx context.Context) {
ginkgo.By("Defining a custom resource definition that contains a validation rule with a syntax error")
var schemaWithSyntaxErrorRule = unmarshallSchema([]byte(`{
"type":"object",
@ -203,7 +203,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
}
})
ginkgo.It("MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that exceeds the estimated cost limit", func() {
ginkgo.It("MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that exceeds the estimated cost limit", func(ctx context.Context) {
ginkgo.By("Defining a custom resource definition that contains a validation rule that exceeds the cost limit")
var schemaWithExpensiveRule = unmarshallSchema([]byte(`{
"type":"object",
@ -236,7 +236,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
}
})
ginkgo.It("MUST fail create of a custom resource that exceeds the runtime cost limit for x-kubernetes-validations rule execution", func() {
ginkgo.It("MUST fail create of a custom resource that exceeds the runtime cost limit for x-kubernetes-validations rule execution", func(ctx context.Context) {
ginkgo.By("Defining a custom resource definition including an expensive rule on a large amount of data")
crd := fixtures.NewRandomNameV1CustomResourceDefinitionWithSchema(v1.NamespaceScoped, schemaWithValidationExpression, false)
_, err := fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
@ -266,7 +266,7 @@ var _ = SIGDescribe("CustomResourceValidationRules [Privileged:ClusterAdmin]", f
}
})
ginkgo.It("MUST fail update of a custom resource that does not satisfy a x-kubernetes-validations transition rule", func() {
ginkgo.It("MUST fail update of a custom resource that does not satisfy a x-kubernetes-validations transition rule", func(ctx context.Context) {
ginkgo.By("Defining a custom resource definition with a x-kubernetes-validations transition rule")
var schemaWithTransitionRule = unmarshallSchema([]byte(`{
"type":"object",

View File

@ -48,7 +48,7 @@ var _ = SIGDescribe("CustomResourceDefinition Watch [Privileged:ClusterAdmin]",
Description: Create a Custom Resource Definition. Attempt to watch it; the watch MUST observe create,
modify and delete events.
*/
framework.ConformanceIt("watch on custom resource definition objects", func() {
framework.ConformanceIt("watch on custom resource definition objects", func(ctx context.Context) {
const (
watchCRNameA = "name1"

View File

@ -55,7 +55,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
Create the custom resource definition and then delete it. The creation and deletion MUST
be successful.
*/
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() {
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func(ctx context.Context) {
config, err := framework.LoadConfig()
framework.ExpectNoError(err, "loading config")
@ -82,7 +82,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
custom resource definitions via delete collection; the delete MUST be successful and MUST delete only the
labeled custom resource definitions.
*/
framework.ConformanceIt("listing custom resource definition objects works ", func() {
framework.ConformanceIt("listing custom resource definition objects works ", func(ctx context.Context) {
testListSize := 10
config, err := framework.LoadConfig()
framework.ExpectNoError(err, "loading config")
@ -142,7 +142,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
Description: Create a custom resource definition. Attempt to read, update and patch its status sub-resource;
all mutating sub-resource operations MUST be visible to subsequent reads.
*/
framework.ConformanceIt("getting/updating/patching custom resource definition status sub-resource works ", func() {
framework.ConformanceIt("getting/updating/patching custom resource definition status sub-resource works ", func(ctx context.Context) {
config, err := framework.LoadConfig()
framework.ExpectNoError(err, "loading config")
apiExtensionClient, err := clientset.NewForConfig(config)
@ -195,7 +195,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
Description: Fetch /apis, /apis/apiextensions.k8s.io, and /apis/apiextensions.k8s.io/v1 discovery documents,
and ensure they indicate CustomResourceDefinition apiextensions.k8s.io/v1 resources are available.
*/
framework.ConformanceIt("should include custom resource definition resources in discovery documents", func() {
framework.ConformanceIt("should include custom resource definition resources in discovery documents", func(ctx context.Context) {
{
ginkgo.By("fetching the /apis discovery document")
apiGroupList := &metav1.APIGroupList{}
@ -266,7 +266,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
the default is applied. Create another CR. Remove default, add default for another field and read CR until
new field is defaulted, but old default stays.
*/
framework.ConformanceIt("custom resource defaulting for requests and from storage works ", func() {
framework.ConformanceIt("custom resource defaulting for requests and from storage works ", func(ctx context.Context) {
config, err := framework.LoadConfig()
framework.ExpectNoError(err, "loading config")
apiExtensionClient, err := clientset.NewForConfig(config)

View File

@ -49,7 +49,7 @@ var _ = SIGDescribe("Discovery", func() {
setupServerCert(namespaceName, serviceName)
})
ginkgo.It("should accurately determine present and missing resources", func() {
ginkgo.It("should accurately determine present and missing resources", func(ctx context.Context) {
// checks that legacy api group resources function
ok, err := clientdiscovery.IsResourceEnabled(f.ClientSet.Discovery(), schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"})
framework.ExpectNoError(err)
@ -76,7 +76,7 @@ var _ = SIGDescribe("Discovery", func() {
}
})
ginkgo.It("Custom resource should have storage version hash", func() {
ginkgo.It("Custom resource should have storage version hash", func(ctx context.Context) {
testcrd, err := crd.CreateTestCRD(f)
if err != nil {
return
@ -119,7 +119,7 @@ var _ = SIGDescribe("Discovery", func() {
Description: Ensure that a list of apis is retrieved.
Each api group found MUST return a valid PreferredVersion unless the group suffix is example.com.
*/
framework.ConformanceIt("should validate PreferredVersion for each APIGroup", func() {
framework.ConformanceIt("should validate PreferredVersion for each APIGroup", func(ctx context.Context) {
// get list of APIGroup endpoints
list := &metav1.APIGroupList{}

View File

@ -60,7 +60,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should recover from network partition with master", func() {
ginkgo.It("should recover from network partition with master", func(ctx context.Context) {
etcdFailTest(
f,
"sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
@ -68,7 +68,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
)
})
ginkgo.It("should recover from SIGKILL", func() {
ginkgo.It("should recover from SIGKILL", func(ctx context.Context) {
etcdFailTest(
f,
"pgrep etcd | xargs -I {} sudo kill -9 {}",

View File

@ -55,7 +55,7 @@ var _ = SIGDescribe("API priority and fairness", func() {
f := framework.NewDefaultFramework("apf")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.It("should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration", func() {
ginkgo.It("should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration", func(ctx context.Context) {
testingFlowSchemaName := "e2e-testing-flowschema"
testingPriorityLevelName := "e2e-testing-prioritylevel"
matchingUsername := "noxu"
@ -97,7 +97,7 @@ var _ = SIGDescribe("API priority and fairness", func() {
// clients making requests at different rates, we test to make sure that the
// higher QPS client cannot drown out the other one despite having higher
// priority.
ginkgo.It("should ensure that requests can't be drowned out (priority)", func() {
ginkgo.It("should ensure that requests can't be drowned out (priority)", func(ctx context.Context) {
// See https://github.com/kubernetes/kubernetes/issues/96710
ginkgo.Skip("skipping test until flakiness is resolved")
@ -184,7 +184,7 @@ var _ = SIGDescribe("API priority and fairness", func() {
// and priority level. We expect APF's "ByUser" flow distinguisher to isolate
// the two clients and not allow one client to drown out the other despite
// having a higher QPS.
ginkgo.It("should ensure that requests can't be drowned out (fairness)", func() {
ginkgo.It("should ensure that requests can't be drowned out (fairness)", func(ctx context.Context) {
// See https://github.com/kubernetes/kubernetes/issues/96710
ginkgo.Skip("skipping test until flakiness is resolved")

View File

@ -309,7 +309,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, delete replication controller, propagation policy background
Description: Create a replication controller with 2 Pods. Once RC is created and the first Pod is created, delete RC with deleteOptions.PropagationPolicy set to Background. Deleting the Replication Controller MUST cause pods created by that RC to be deleted.
*/
framework.ConformanceIt("should delete pods created by rc when not orphaning", func() {
framework.ConformanceIt("should delete pods created by rc when not orphaning", func(ctx context.Context) {
clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
@ -367,7 +367,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, delete replication controller, propagation policy orphan
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller MUST cause pods created by that RC to be orphaned.
*/
framework.ConformanceIt("should orphan pods created by rc if delete options say so", func() {
framework.ConformanceIt("should orphan pods created by rc if delete options say so", func(ctx context.Context) {
clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
@ -436,7 +436,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// deleteOptions.OrphanDependents is deprecated in 1.7 and preferred to use the PropagationPolicy.
// Discussion is tracked under https://github.com/kubernetes/kubernetes/issues/65427 to promote for conformance in future.
ginkgo.It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
ginkgo.It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func(ctx context.Context) {
clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
@ -488,7 +488,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, delete deployment, propagation policy background
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Background. Deleting the deployment MUST delete the replicaset created by the deployment and also the Pods that belong to the deployments MUST be deleted.
*/
framework.ConformanceIt("should delete RS created by deployment when not orphaning", func() {
framework.ConformanceIt("should delete RS created by deployment when not orphaning", func(ctx context.Context) {
clientSet := f.ClientSet
deployClient := clientSet.AppsV1().Deployments(f.Namespace.Name)
rsClient := clientSet.AppsV1().ReplicaSets(f.Namespace.Name)
@ -547,7 +547,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, delete deployment, propagation policy orphan
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Orphan. Deleting the deployment MUST cause the replicaset created by the deployment to be orphaned, also the Pods created by the deployments MUST be orphaned.
*/
framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func() {
framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func(ctx context.Context) {
clientSet := f.ClientSet
deployClient := clientSet.AppsV1().Deployments(f.Namespace.Name)
rsClient := clientSet.AppsV1().ReplicaSets(f.Namespace.Name)
@ -647,7 +647,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, delete replication controller, after owned pods
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication Controller MUST cause pods created by that RC to be deleted before the RC is deleted.
*/
framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func(ctx context.Context) {
clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
@ -732,7 +732,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, multiple owners
Description: Create a replication controller RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second replication controller RC2 and set RC2 as owner for half of those replicas. Once RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted or have a deletion timestamp. Deleting the Replication Controller MUST not delete Pods that are owned by multiple replication controllers.
*/
framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func(ctx context.Context) {
clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
@ -846,7 +846,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, dependency cycle
Description: Create three pods, patch them with Owner references such that pod1 has pod3, pod2 has pod1 and pod3 has pod2 as owner references respectively. Delete pod1 MUST delete all pods. The dependency cycle MUST not block the garbage collection.
*/
framework.ConformanceIt("should not be blocked by dependency circle", func() {
framework.ConformanceIt("should not be blocked by dependency circle", func(ctx context.Context) {
clientSet := f.ClientSet
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
pod1Name := "pod1"
@ -902,7 +902,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
})
ginkgo.It("should support cascading deletion of custom resources", func() {
ginkgo.It("should support cascading deletion of custom resources", func(ctx context.Context) {
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("failed to load config: %v", err)
@ -1037,7 +1037,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
})
ginkgo.It("should support orphan deletion of custom resources", func() {
ginkgo.It("should support orphan deletion of custom resources", func(ctx context.Context) {
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("failed to load config: %v", err)
@ -1142,7 +1142,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
})
ginkgo.It("should delete jobs and pods created by cronjob", func() {
ginkgo.It("should delete jobs and pods created by cronjob", func(ctx context.Context) {
ginkgo.By("Create the cronjob")
cronJob := newCronJob("simple", "*/1 * * * ?")

View File

@ -102,7 +102,7 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool)
var _ = SIGDescribe("Generated clientset", func() {
f := framework.NewDefaultFramework("clientset")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() {
ginkgo.It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func(ctx context.Context) {
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
ginkgo.By("constructing the pod")
name := "pod" + string(uuid.NewUUID())
@ -216,7 +216,7 @@ var _ = SIGDescribe("Generated clientset", func() {
f := framework.NewDefaultFramework("clientset")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.It("should create v1 cronJobs, delete cronJobs, watch cronJobs", func() {
ginkgo.It("should create v1 cronJobs, delete cronJobs, watch cronJobs", func(ctx context.Context) {
cronJobClient := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name)
ginkgo.By("constructing the cronJob")
name := "cronjob" + string(uuid.NewUUID())

View File

@ -119,7 +119,7 @@ var _ = SIGDescribe("health handlers", func() {
f := framework.NewDefaultFramework("health")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.It("should contain necessary checks", func() {
ginkgo.It("should contain necessary checks", func(ctx context.Context) {
ginkgo.By("/health")
err := testPath(f.ClientSet, "/healthz?verbose=1", requiredHealthzChecks)
framework.ExpectNoError(err)

View File

@ -265,7 +265,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
The Namespace is patched.
The Namespace and MUST now include the new Label.
*/
framework.ConformanceIt("should patch a Namespace", func() {
framework.ConformanceIt("should patch a Namespace", func(ctx context.Context) {
ginkgo.By("creating a Namespace")
namespaceName := "nspatchtest-" + string(uuid.NewUUID())
ns, err := f.CreateNamespace(namespaceName, nil)
@ -296,7 +296,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
equal the new values. Given the updating of the namespace status, the fields MUST
equal the new values.
*/
framework.ConformanceIt("should apply changes to a namespace status", func() {
framework.ConformanceIt("should apply changes to a namespace status", func(ctx context.Context) {
ns := f.Namespace.Name
dc := f.DynamicClient
nsResource := v1.SchemeGroupVersion.WithResource("namespaces")
@ -363,7 +363,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
Description: When updating the namespace it MUST
succeed and the field MUST equal the new value.
*/
framework.ConformanceIt("should apply an update to a Namespace", func() {
framework.ConformanceIt("should apply an update to a Namespace", func(ctx context.Context) {
var err error
var updatedNamespace *v1.Namespace
ns := f.Namespace.Name
@ -391,7 +391,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
fake finalizer MUST be found. Removing the fake finalizer from
the namespace MUST succeed and MUST NOT be found.
*/
framework.ConformanceIt("should apply a finalizer to a Namespace", func() {
framework.ConformanceIt("should apply a finalizer to a Namespace", func(ctx context.Context) {
fakeFinalizer := v1.FinalizerName("e2e.example.com/fakeFinalizer")
var updatedNamespace *v1.Namespace

View File

@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"io"
"net/http"
"strings"
@ -35,7 +36,7 @@ var _ = SIGDescribe("Server request timeout", func() {
f := framework.NewDefaultFramework("request-timeout")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.It("should return HTTP status code 400 if the user specifies an invalid timeout in the request URL", func() {
ginkgo.It("should return HTTP status code 400 if the user specifies an invalid timeout in the request URL", func(ctx context.Context) {
rt := getRoundTripper(f)
req := newRequest(f, "invalid")
@ -53,7 +54,7 @@ var _ = SIGDescribe("Server request timeout", func() {
}
})
ginkgo.It("the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed", func() {
ginkgo.It("the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed", func(ctx context.Context) {
rt := getRoundTripper(f)
// Choose a timeout that exceeds the default timeout (60s) enforced by the apiserver
req := newRequest(f, "3m")
@ -67,7 +68,7 @@ var _ = SIGDescribe("Server request timeout", func() {
}
})
ginkgo.It("default timeout should be used if the specified timeout in the request URL is 0s", func() {
ginkgo.It("default timeout should be used if the specified timeout in the request URL is 0s", func(ctx context.Context) {
rt := getRoundTripper(f)
req := newRequest(f, "0s")

View File

@ -72,7 +72,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Testname: ResourceQuota, object count quota, resourcequotas
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
*/
framework.ConformanceIt("should create a ResourceQuota and ensure its status is promptly calculated.", func() {
framework.ConformanceIt("should create a ResourceQuota and ensure its status is promptly calculated.", func(ctx context.Context) {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
@ -97,7 +97,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Create a Service. Its creation MUST be successful and resource usage count against the Service object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the Service. Deletion MUST succeed and resource usage count against the Service object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a service.", func() {
framework.ConformanceIt("should create a ResourceQuota and capture the life of a service.", func(ctx context.Context) {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
@ -157,7 +157,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Create a Secret. Its creation MUST be successful and resource usage count against the Secret object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the Secret. Deletion MUST succeed and resource usage count against the Secret object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a secret.", func() {
framework.ConformanceIt("should create a ResourceQuota and capture the life of a secret.", func(ctx context.Context) {
ginkgo.By("Discovering how many secrets are in namespace by default")
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
@ -227,7 +227,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Update the successfully created pod's resource requests. Updation MUST fail as a Pod can not dynamically update its resource requirements.
Delete the successfully created Pod. Pod Deletion MUST be scuccessful and it MUST release the allocated resource counts from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a pod.", func() {
framework.ConformanceIt("should create a ResourceQuota and capture the life of a pod.", func(ctx context.Context) {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
@ -323,7 +323,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Create a ConfigMap. Its creation MUST be successful and resource usage count against the ConfigMap object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the ConfigMap. Deletion MUST succeed and resource usage count against the ConfigMap object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a configMap.", func() {
framework.ConformanceIt("should create a ResourceQuota and capture the life of a configMap.", func(ctx context.Context) {
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
@ -389,7 +389,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Create a ReplicationController. Its creation MUST be successful and resource usage count against the ReplicationController object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the ReplicationController. Deletion MUST succeed and resource usage count against the ReplicationController object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a replication controller.", func() {
framework.ConformanceIt("should create a ResourceQuota and capture the life of a replication controller.", func(ctx context.Context) {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
@ -445,7 +445,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Create a ReplicaSet. Its creation MUST be successful and resource usage count against the ReplicaSet object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
Delete the ReplicaSet. Deletion MUST succeed and resource usage count against the ReplicaSet object MUST be released from ResourceQuotaStatus of the ResourceQuota.
*/
framework.ConformanceIt("should create a ResourceQuota and capture the life of a replica set.", func() {
framework.ConformanceIt("should create a ResourceQuota and capture the life of a replica set.", func(ctx context.Context) {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
@ -492,7 +492,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Delete the PVC. Deletion MUST succeed and resource usage count against its PVC and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota.
[NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour.
*/
ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim", func() {
ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim", func(ctx context.Context) {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
@ -542,7 +542,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Delete the PVC. Deletion MUST succeed and resource usage count against PVC, storageClass and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota.
[NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour.
*/
ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class", func() {
ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class", func(ctx context.Context) {
ginkgo.By("Counting existing ResourceQuota")
c, err := countResourceQuota(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
@ -594,7 +594,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should create a ResourceQuota and capture the life of a custom resource.", func() {
ginkgo.It("should create a ResourceQuota and capture the life of a custom resource.", func(ctx context.Context) {
ginkgo.By("Creating a Custom Resource Definition")
testcrd, err := crd.CreateTestCRD(f)
framework.ExpectNoError(err)
@ -687,7 +687,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Create a pod with specified activeDeadlineSeconds and resourceRequirements for CPU and Memory fall within quota limits. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'Terminating' scoped ResourceQuota but MUST NOT in 'NotTerminating' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'Terminating' scoped ResourceQuota.
*/
framework.ConformanceIt("should verify ResourceQuota with terminating scopes.", func() {
framework.ConformanceIt("should verify ResourceQuota with terminating scopes.", func(ctx context.Context) {
ginkgo.By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
@ -800,7 +800,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
Create a 'NotBestEffort' Pod by explicitly specifying resource limits and requests. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota but MUST NOT in 'BestEffort' scoped ResourceQuota.
Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota.
*/
framework.ConformanceIt("should verify ResourceQuota with best effort scope.", func() {
framework.ConformanceIt("should verify ResourceQuota with best effort scope.", func(ctx context.Context) {
ginkgo.By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
framework.ExpectNoError(err)
@ -881,7 +881,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
When ResourceQuota is updated to modify CPU and Memory quota limits, update MUST succeed with updated values for CPU and Memory limits.
When ResourceQuota is deleted, it MUST not be available in the namespace.
*/
framework.ConformanceIt("should be able to update and delete ResourceQuota.", func() {
framework.ConformanceIt("should be able to update and delete ResourceQuota.", func(ctx context.Context) {
client := f.ClientSet
ns := f.Namespace.Name
@ -940,7 +940,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
the new values. It MUST succeed at deleting a collection of
ResourceQuota via a label selector.
*/
framework.ConformanceIt("should manage the lifecycle of a ResourceQuota", func() {
framework.ConformanceIt("should manage the lifecycle of a ResourceQuota", func(ctx context.Context) {
client := f.ClientSet
ns := f.Namespace.Name
@ -1007,7 +1007,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
values MUST succeed. The spec spec MUST NOT be changed when
patching /status.
*/
framework.ConformanceIt("should apply changes to a resourcequota status", func() {
framework.ConformanceIt("should apply changes to a resourcequota status", func(ctx context.Context) {
ns := f.Namespace.Name
rqClient := f.ClientSet.CoreV1().ResourceQuotas(ns)
rqName := "e2e-rq-status-" + utilrand.String(5)
@ -1063,7 +1063,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err, "Failed to update resourceQuota")
ginkgo.By(fmt.Sprintf("Confirm /status for %q resourceQuota via watch", rqName))
ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStartShort)
ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort)
defer cancel()
_, err = watchtools.Until(ctx, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) {
@ -1202,7 +1202,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
f := framework.NewDefaultFramework("scope-selectors")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func() {
ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func(ctx context.Context) {
ginkgo.By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
framework.ExpectNoError(err)
@ -1275,7 +1275,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota with terminating scopes through scope selectors.", func() {
ginkgo.It("should verify ResourceQuota with terminating scopes through scope selectors.", func(ctx context.Context) {
ginkgo.By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
@ -1384,7 +1384,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
f := framework.NewDefaultFramework("resourcequota-priorityclass")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
@ -1425,7 +1425,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
@ -1472,7 +1472,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
@ -1521,7 +1521,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() {
ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.Failf("unexpected error while creating priority class: %v", err)
@ -1579,7 +1579,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
@ -1615,7 +1615,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
@ -1656,7 +1656,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
@ -1725,7 +1725,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
var _ = SIGDescribe("ResourceQuota", func() {
f := framework.NewDefaultFramework("cross-namespace-pod-affinity")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.It("should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors.", func() {
ginkgo.It("should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors.", func(ctx context.Context) {
ginkgo.By("Creating a ResourceQuota with cross namespace pod affinity scope")
quota, err := createResourceQuota(
f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-cross-namespace-pod-affinity", v1.ResourceQuotaScopeCrossNamespacePodAffinity))

View File

@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"regexp"
"k8s.io/apimachinery/pkg/version"
@ -36,7 +37,7 @@ var _ = SIGDescribe("server version", func() {
Description: Ensure that an API server version can be retrieved.
Both the major and minor versions MUST only be an integer.
*/
framework.ConformanceIt("should find the server version", func() {
framework.ConformanceIt("should find the server version", func(ctx context.Context) {
ginkgo.By("Request ServerVersion")

View File

@ -40,7 +40,7 @@ var _ = SIGDescribe("StorageVersion resources [Feature:StorageVersionAPI]", func
f := framework.NewDefaultFramework("storage-version")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.It("storage version with non-existing id should be GC'ed", func() {
ginkgo.It("storage version with non-existing id should be GC'ed", func(ctx context.Context) {
client := f.ClientSet
sv := &apiserverinternalv1alpha1.StorageVersion{
ObjectMeta: metav1.ObjectMeta{

View File

@ -50,7 +50,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
e2eskipper.SkipUnlessServerVersionGTE(serverPrintVersion, f.ClientSet.Discovery())
})
ginkgo.It("should return pod details", func() {
ginkgo.It("should return pod details", func(ctx context.Context) {
ns := f.Namespace.Name
c := f.ClientSet
@ -77,7 +77,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
framework.Logf("Table:\n%s", out)
})
ginkgo.It("should return chunks of table results for list calls", func() {
ginkgo.It("should return chunks of table results for list calls", func(ctx context.Context) {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
@ -126,7 +126,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
framework.ExpectEqual(pagedTable.Rows[0].Cells[0], "template-0002")
})
ginkgo.It("should return generic metadata details across all namespaces for nodes", func() {
ginkgo.It("should return generic metadata details across all namespaces for nodes", func(ctx context.Context) {
c := f.ClientSet
table := &metav1beta1.Table{}
@ -151,7 +151,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
Description: Issue a HTTP request to the API.
HTTP request MUST return a HTTP status code of 406.
*/
framework.ConformanceIt("should return a 406 for a backend which does not implement metadata", func() {
framework.ConformanceIt("should return a 406 for a backend which does not implement metadata", func(ctx context.Context) {
c := f.ClientSet
table := &metav1beta1.Table{}

View File

@ -57,7 +57,7 @@ var _ = SIGDescribe("Watchers", func() {
update, and delete notifications on configmaps that match a label selector and do
not receive notifications for configmaps which do not match that label selector.
*/
framework.ConformanceIt("should observe add, update, and delete watch notifications on configmaps", func() {
framework.ConformanceIt("should observe add, update, and delete watch notifications on configmaps", func(ctx context.Context) {
c := f.ClientSet
ns := f.Namespace.Name
@ -139,7 +139,7 @@ var _ = SIGDescribe("Watchers", func() {
Description: Ensure that a watch can be opened from a particular resource version
in the past and only notifications happening after that resource version are observed.
*/
framework.ConformanceIt("should be able to start watching from a specific resource version", func() {
framework.ConformanceIt("should be able to start watching from a specific resource version", func(ctx context.Context) {
c := f.ClientSet
ns := f.Namespace.Name
@ -188,7 +188,7 @@ var _ = SIGDescribe("Watchers", func() {
observed by the previous watch, and it will continue delivering notifications from
that point in time.
*/
framework.ConformanceIt("should be able to restart watching from the last resource version observed by the previous watch", func() {
framework.ConformanceIt("should be able to restart watching from the last resource version observed by the previous watch", func(ctx context.Context) {
c := f.ClientSet
ns := f.Namespace.Name
@ -254,7 +254,7 @@ var _ = SIGDescribe("Watchers", func() {
a watch's selector, the watch will observe a delete, and will not observe
notifications for that object until it meets the selector's requirements again.
*/
framework.ConformanceIt("should observe an object deletion if it stops meeting the requirements of the selector", func() {
framework.ConformanceIt("should observe an object deletion if it stops meeting the requirements of the selector", func(ctx context.Context) {
c := f.ClientSet
ns := f.Namespace.Name
@ -331,7 +331,7 @@ var _ = SIGDescribe("Watchers", func() {
for events received from the first watch, initiated at the resource version of the event, and checking that all
resource versions of all events match. Events are produced from writes on a background goroutine.
*/
framework.ConformanceIt("should receive events on concurrent watches in same order", func() {
framework.ConformanceIt("should receive events on concurrent watches in same order", func(ctx context.Context) {
c := f.ClientSet
ns := f.Namespace.Name

View File

@ -114,7 +114,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
The mutatingwebhookconfigurations and validatingwebhookconfigurations resources MUST exist in the
/apis/admissionregistration.k8s.io/v1 discovery document.
*/
framework.ConformanceIt("should include webhook resources in discovery documents", func() {
framework.ConformanceIt("should include webhook resources in discovery documents", func(ctx context.Context) {
{
ginkgo.By("fetching the /apis discovery document")
apiGroupList := &metav1.APIGroupList{}
@ -194,7 +194,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
and the pod creation MUST be denied. An attempt to create a non-compliant configmap in a whitelisted
namespace based on the webhook namespace selector MUST be allowed.
*/
framework.ConformanceIt("should be able to deny pod and configmap creation", func() {
framework.ConformanceIt("should be able to deny pod and configmap creation", func(ctx context.Context) {
webhookCleanup := registerWebhook(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testWebhook(f)
@ -206,7 +206,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Description: Register an admission webhook configuration that denies connecting to a pod's attach sub-resource.
Attempts to attach MUST be denied.
*/
framework.ConformanceIt("should be able to deny attaching pod", func() {
framework.ConformanceIt("should be able to deny attaching pod", func(ctx context.Context) {
webhookCleanup := registerWebhookForAttachingPod(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testAttachingPodWebhook(f)
@ -218,7 +218,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Description: Register an admission webhook configuration that denies creation, update and deletion of
custom resources. Attempts to create, update and delete custom resources MUST be denied.
*/
framework.ConformanceIt("should be able to deny custom resource creation, update and deletion", func() {
framework.ConformanceIt("should be able to deny custom resource creation, update and deletion", func(ctx context.Context) {
testcrd, err := crd.CreateTestCRD(f)
if err != nil {
return
@ -236,7 +236,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Description: Register a webhook with a fail closed policy and without CA bundle so that it cannot be called.
Attempt operations that require the admission webhook; all MUST be denied.
*/
framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func() {
framework.ConformanceIt("should unconditionally reject operations on fail closed webhook", func(ctx context.Context) {
webhookCleanup := registerFailClosedWebhook(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testFailClosedWebhook(f)
@ -249,7 +249,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
adds a data key if the configmap already has a specific key, and another that adds a key if the key added by
the first webhook is present. Attempt to create a config map; both keys MUST be added to the config map.
*/
framework.ConformanceIt("should mutate configmap", func() {
framework.ConformanceIt("should mutate configmap", func(ctx context.Context) {
webhookCleanup := registerMutatingWebhookForConfigMap(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testMutatingConfigMapWebhook(f)
@ -261,7 +261,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Description: Register a mutating webhook that adds an InitContainer to pods. Attempt to create a pod;
the InitContainer MUST be added the TerminationMessagePolicy MUST be defaulted.
*/
framework.ConformanceIt("should mutate pod and apply defaults after mutation", func() {
framework.ConformanceIt("should mutate pod and apply defaults after mutation", func(ctx context.Context) {
webhookCleanup := registerMutatingWebhookForPod(f, f.UniqueName, certCtx, servicePort)
defer webhookCleanup()
testMutatingPodWebhook(f)
@ -274,7 +274,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
and delete a webhook configuration object; both operations MUST be allowed and the webhook configuration object
MUST NOT be mutated the webhooks.
*/
framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func() {
framework.ConformanceIt("should not be able to mutate or prevent deletion of webhook configuration objects", func(ctx context.Context) {
validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
defer validatingWebhookCleanup()
mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, f.UniqueName+"blocking", certCtx, servicePort)
@ -288,7 +288,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Description: Register a webhook that mutates a custom resource. Attempt to create custom resource object;
the custom resource MUST be mutated.
*/
framework.ConformanceIt("should mutate custom resource", func() {
framework.ConformanceIt("should mutate custom resource", func(ctx context.Context) {
testcrd, err := crd.CreateTestCRD(f)
if err != nil {
return
@ -305,7 +305,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
Description: Register a webhook that denies custom resource definition create. Attempt to create a
custom resource definition; the create request MUST be denied.
*/
framework.ConformanceIt("should deny crd creation", func() {
framework.ConformanceIt("should deny crd creation", func(ctx context.Context) {
crdWebhookCleanup := registerValidatingWebhookForCRD(f, f.UniqueName, certCtx, servicePort)
defer crdWebhookCleanup()
@ -320,7 +320,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
the stored version. Attempt to patch the custom resource with a new field and value; the patch MUST be applied
successfully.
*/
framework.ConformanceIt("should mutate custom resource with different stored version", func() {
framework.ConformanceIt("should mutate custom resource with different stored version", func(ctx context.Context) {
testcrd, err := createAdmissionWebhookMultiVersionTestCRDWithV1Storage(f)
if err != nil {
return
@ -338,7 +338,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
with a schema that includes only one of the data keys added by the webhooks. Attempt to a custom resource;
the fields included in the schema MUST be present and field not included in the schema MUST NOT be present.
*/
framework.ConformanceIt("should mutate custom resource with pruning", func() {
framework.ConformanceIt("should mutate custom resource with pruning", func(ctx context.Context) {
const prune = true
testcrd, err := createAdmissionWebhookMultiVersionTestCRDWithV1Storage(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
crd.Spec.PreserveUnknownFields = false
@ -378,7 +378,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
the failure policy is ignore. Requests MUST NOT timeout if configured webhook timeout is 10 seconds (much longer
than the webhook wait duration).
*/
framework.ConformanceIt("should honor timeout", func() {
framework.ConformanceIt("should honor timeout", func(ctx context.Context) {
policyFail := admissionregistrationv1.Fail
policyIgnore := admissionregistrationv1.Ignore
@ -410,7 +410,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
operation and attempt to create an object; the webhook MUST NOT deny the create. Patch the webhook to apply to the
create operation again and attempt to create an object; the webhook MUST deny the create.
*/
framework.ConformanceIt("patching/updating a validating webhook should work", func() {
framework.ConformanceIt("patching/updating a validating webhook should work", func(ctx context.Context) {
client := f.ClientSet
admissionClient := client.AdmissionregistrationV1()
@ -505,7 +505,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
operation and attempt to create an object; the webhook MUST NOT mutate the object. Patch the webhook to apply to the
create operation again and attempt to create an object; the webhook MUST mutate the object.
*/
framework.ConformanceIt("patching/updating a mutating webhook should work", func() {
framework.ConformanceIt("patching/updating a mutating webhook should work", func(ctx context.Context) {
client := f.ClientSet
admissionClient := client.AdmissionregistrationV1()
@ -579,7 +579,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
object; the create MUST be denied. Attempt to remove the webhook configurations matching the label with deletecollection;
all webhook configurations MUST be deleted. Attempt to create an object; the create MUST NOT be denied.
*/
framework.ConformanceIt("listing validating webhooks should work", func() {
framework.ConformanceIt("listing validating webhooks should work", func(ctx context.Context) {
testListSize := 10
testUUID := string(uuid.NewUUID())
@ -653,7 +653,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
object; the object MUST be mutated. Attempt to remove the webhook configurations matching the label with deletecollection;
all webhook configurations MUST be deleted. Attempt to create an object; the object MUST NOT be mutated.
*/
framework.ConformanceIt("listing mutating webhooks should work", func() {
framework.ConformanceIt("listing mutating webhooks should work", func(ctx context.Context) {
testListSize := 10
testUUID := string(uuid.NewUUID())

View File

@ -121,7 +121,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
Listing the ControllerRevisions by label selector MUST find only one.
The current ControllerRevision revision MUST be 3.
*/
framework.ConformanceIt("should manage the lifecycle of a ControllerRevision", func() {
framework.ConformanceIt("should manage the lifecycle of a ControllerRevision", func(ctx context.Context) {
csAppsV1 := f.ClientSet.AppsV1()
dsLabel := map[string]string{"daemonset-name": dsName}

View File

@ -66,7 +66,7 @@ var _ = SIGDescribe("CronJob", func() {
Testname: CronJob AllowConcurrent
Description: CronJob MUST support AllowConcurrent policy, allowing to run multiple jobs at the same time.
*/
framework.ConformanceIt("should schedule multiple jobs concurrently", func() {
framework.ConformanceIt("should schedule multiple jobs concurrently", func(ctx context.Context) {
ginkgo.By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent,
sleepCommand, nil, nil)
@ -93,7 +93,7 @@ var _ = SIGDescribe("CronJob", func() {
Testname: CronJob Suspend
Description: CronJob MUST support suspension, which suppresses creation of new jobs.
*/
framework.ConformanceIt("should not schedule jobs when suspended [Slow]", func() {
framework.ConformanceIt("should not schedule jobs when suspended [Slow]", func(ctx context.Context) {
ginkgo.By("Creating a suspended cronjob")
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1.AllowConcurrent,
sleepCommand, nil, nil)
@ -121,7 +121,7 @@ var _ = SIGDescribe("CronJob", func() {
Testname: CronJob ForbidConcurrent
Description: CronJob MUST support ForbidConcurrent policy, allowing to run single, previous job at the time.
*/
framework.ConformanceIt("should not schedule new jobs when ForbidConcurrent [Slow]", func() {
framework.ConformanceIt("should not schedule new jobs when ForbidConcurrent [Slow]", func(ctx context.Context) {
ginkgo.By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent,
sleepCommand, nil, nil)
@ -157,7 +157,7 @@ var _ = SIGDescribe("CronJob", func() {
Testname: CronJob ReplaceConcurrent
Description: CronJob MUST support ReplaceConcurrent policy, allowing to run single, newer job at the time.
*/
framework.ConformanceIt("should replace jobs when ReplaceConcurrent", func() {
framework.ConformanceIt("should replace jobs when ReplaceConcurrent", func(ctx context.Context) {
ginkgo.By("Creating a ReplaceConcurrent cronjob")
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1.ReplaceConcurrent,
sleepCommand, nil, nil)
@ -188,7 +188,7 @@ var _ = SIGDescribe("CronJob", func() {
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
})
ginkgo.It("should be able to schedule after more than 100 missed schedule", func() {
ginkgo.It("should be able to schedule after more than 100 missed schedule", func(ctx context.Context) {
ginkgo.By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.ForbidConcurrent,
sleepCommand, nil, nil)
@ -215,7 +215,7 @@ var _ = SIGDescribe("CronJob", func() {
})
// shouldn't give us unexpected warnings
ginkgo.It("should not emit unexpected warnings", func() {
ginkgo.It("should not emit unexpected warnings", func(ctx context.Context) {
ginkgo.By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent,
nil, nil, nil)
@ -238,7 +238,7 @@ var _ = SIGDescribe("CronJob", func() {
})
// deleted jobs should be removed from the active list
ginkgo.It("should remove from active list jobs that have been deleted", func() {
ginkgo.It("should remove from active list jobs that have been deleted", func(ctx context.Context) {
ginkgo.By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent,
sleepCommand, nil, nil)
@ -277,7 +277,7 @@ var _ = SIGDescribe("CronJob", func() {
})
// cleanup of successful finished jobs, with limit of one successful job
ginkgo.It("should delete successful finished jobs with limit of one successful job", func() {
ginkgo.It("should delete successful finished jobs with limit of one successful job", func(ctx context.Context) {
ginkgo.By("Creating an AllowConcurrent cronjob with custom history limit")
successLimit := int32(1)
failedLimit := int32(0)
@ -288,7 +288,7 @@ var _ = SIGDescribe("CronJob", func() {
})
// cleanup of failed finished jobs, with limit of one failed job
ginkgo.It("should delete failed finished jobs with limit of one job", func() {
ginkgo.It("should delete failed finished jobs with limit of one job", func(ctx context.Context) {
ginkgo.By("Creating an AllowConcurrent cronjob with custom history limit")
successLimit := int32(0)
failedLimit := int32(1)
@ -298,7 +298,7 @@ var _ = SIGDescribe("CronJob", func() {
ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob)
})
ginkgo.It("should support timezone", func() {
ginkgo.It("should support timezone", func(ctx context.Context) {
ginkgo.By("Creating a cronjob with TimeZone")
cronJob := newTestCronJob("cronjob-with-timezone", "*/1 * * * ?", batchv1.AllowConcurrent,
failureCommand, nil, nil)
@ -316,7 +316,7 @@ var _ = SIGDescribe("CronJob", func() {
CronJob MUST support create, get, list, watch, update, patch, delete, and deletecollection.
CronJob/status MUST support get, update and patch.
*/
framework.ConformanceIt("should support CronJob API operations", func() {
framework.ConformanceIt("should support CronJob API operations", func(ctx context.Context) {
ginkgo.By("Creating a cronjob")
successLimit := int32(1)
failedLimit := int32(0)

View File

@ -270,7 +270,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
close(stopCh)
})
ginkgo.It("Controller Manager should not create/delete replicas across restart", func() {
ginkgo.It("Controller Manager should not create/delete replicas across restart", func(ctx context.Context) {
// Requires master ssh access.
e2eskipper.SkipUnlessProviderIs("gce", "aws")
@ -301,7 +301,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
}
})
ginkgo.It("Scheduler should continue assigning pods to nodes across restart", func() {
ginkgo.It("Scheduler should continue assigning pods to nodes across restart", func(ctx context.Context) {
// Requires master ssh access.
e2eskipper.SkipUnlessProviderIs("gce", "aws")
@ -319,7 +319,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
})
ginkgo.It("Kubelet should not restart containers across restart", func() {
ginkgo.It("Kubelet should not restart containers across restart", func(ctx context.Context) {
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
@ -341,7 +341,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
}
})
ginkgo.It("Kube-proxy should recover after being killed accidentally", func() {
ginkgo.It("Kube-proxy should recover after being killed accidentally", func(ctx context.Context) {
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)

View File

@ -163,7 +163,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Description: A conformant Kubernetes distribution MUST support the creation of DaemonSets. When a DaemonSet
Pod is deleted, the DaemonSet controller MUST create a replacement Pod.
*/
framework.ConformanceIt("should run and stop simple daemon", func() {
framework.ConformanceIt("should run and stop simple daemon", func(ctx context.Context) {
label := map[string]string{daemonsetNameLabel: dsName}
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
@ -191,7 +191,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Description: A conformant Kubernetes distribution MUST support DaemonSet Pod node selection via label
selectors.
*/
framework.ConformanceIt("should run and stop complex daemon", func() {
framework.ConformanceIt("should run and stop complex daemon", func(ctx context.Context) {
complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon %q with a node selector", dsName)
@ -238,7 +238,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
// default scheduler.
ginkgo.It("should run and stop complex daemon with node affinity", func() {
ginkgo.It("should run and stop complex daemon with node affinity", func(ctx context.Context) {
complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon %q with a node affinity", dsName)
@ -291,7 +291,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Testname: DaemonSet-FailedPodCreation
Description: A conformant Kubernetes distribution MUST create new DaemonSet Pods when they fail.
*/
framework.ConformanceIt("should retry creating failed daemon pods", func() {
framework.ConformanceIt("should retry creating failed daemon pods", func(ctx context.Context) {
label := map[string]string{daemonsetNameLabel: dsName}
ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
@ -321,7 +321,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
// This test should not be added to conformance. We will consider deprecating OnDelete when the
// extensions/v1beta1 and apps/v1beta1 are removed.
ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func() {
ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func(ctx context.Context) {
label := map[string]string{daemonsetNameLabel: dsName}
framework.Logf("Creating simple daemon set %s", dsName)
@ -371,7 +371,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Testname: DaemonSet-RollingUpdate
Description: A conformant Kubernetes distribution MUST support DaemonSet RollingUpdates.
*/
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() {
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func(ctx context.Context) {
label := map[string]string{daemonsetNameLabel: dsName}
framework.Logf("Creating simple daemon set %s", dsName)
@ -429,7 +429,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Description: A conformant Kubernetes distribution MUST support automated, minimally disruptive
rollback of updates to a DaemonSet.
*/
framework.ConformanceIt("should rollback without unnecessary restarts", func() {
framework.ConformanceIt("should rollback without unnecessary restarts", func(ctx context.Context) {
schedulableNodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
@ -501,7 +501,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
})
// TODO: This test is expected to be promoted to conformance after the feature is promoted
ginkgo.It("should surge pods onto nodes when spec was updated and update strategy is RollingUpdate", func() {
ginkgo.It("should surge pods onto nodes when spec was updated and update strategy is RollingUpdate", func(ctx context.Context) {
label := map[string]string{daemonsetNameLabel: dsName}
framework.Logf("Creating surge daemon set %s", dsName)
@ -820,7 +820,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
MUST succeed when listing DaemonSets via a label selector. It
MUST succeed when deleting the DaemonSet via deleteCollection.
*/
framework.ConformanceIt("should list and delete a collection of DaemonSets", func() {
framework.ConformanceIt("should list and delete a collection of DaemonSets", func(ctx context.Context) {
label := map[string]string{daemonsetNameLabel: dsName}
labelSelector := labels.SelectorFromSet(label).String()
@ -859,7 +859,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Attempt to read, update and patch its status sub-resource; all
mutating sub-resource operations MUST be visible to subsequent reads.
*/
framework.ConformanceIt("should verify changes to a daemon set status", func() {
framework.ConformanceIt("should verify changes to a daemon set status", func(ctx context.Context) {
label := map[string]string{daemonsetNameLabel: dsName}
labelSelector := labels.SelectorFromSet(label).String()
@ -919,7 +919,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
ginkgo.By("watching for the daemon set status to be updated")
ctx, cancel := context.WithTimeout(context.Background(), dsRetryTimeout)
ctx, cancel := context.WithTimeout(ctx, dsRetryTimeout)
defer cancel()
_, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if ds, ok := event.Object.(*appsv1.DaemonSet); ok {

View File

@ -94,7 +94,7 @@ var _ = SIGDescribe("Deployment", func() {
dc = f.DynamicClient
})
ginkgo.It("deployment reaping should cascade to its replica sets and pods", func() {
ginkgo.It("deployment reaping should cascade to its replica sets and pods", func(ctx context.Context) {
testDeleteDeployment(f)
})
/*
@ -102,7 +102,7 @@ var _ = SIGDescribe("Deployment", func() {
Testname: Deployment RollingUpdate
Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
*/
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func() {
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func(ctx context.Context) {
testRollingUpdateDeployment(f)
})
/*
@ -110,7 +110,7 @@ var _ = SIGDescribe("Deployment", func() {
Testname: Deployment Recreate
Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
*/
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func() {
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func(ctx context.Context) {
testRecreateDeployment(f)
})
/*
@ -119,7 +119,7 @@ var _ = SIGDescribe("Deployment", func() {
Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on
the Deployment's `.spec.revisionHistoryLimit`.
*/
framework.ConformanceIt("deployment should delete old replica sets", func() {
framework.ConformanceIt("deployment should delete old replica sets", func(ctx context.Context) {
testDeploymentCleanUpPolicy(f)
})
/*
@ -129,13 +129,13 @@ var _ = SIGDescribe("Deployment", func() {
i.e. allow arbitrary number of changes to desired state during rolling update
before the rollout finishes.
*/
framework.ConformanceIt("deployment should support rollover", func() {
framework.ConformanceIt("deployment should support rollover", func(ctx context.Context) {
testRolloverDeployment(f)
})
ginkgo.It("iterative rollouts should eventually progress", func() {
ginkgo.It("iterative rollouts should eventually progress", func(ctx context.Context) {
testIterativeDeployments(f)
})
ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func(ctx context.Context) {
testDeploymentsControllerRef(f)
})
@ -147,7 +147,7 @@ var _ = SIGDescribe("Deployment", func() {
The Deployment MUST update and verify the scale subresource. The Deployment MUST patch and verify
a scale subresource.
*/
framework.ConformanceIt("Deployment should have a working scale subresource", func() {
framework.ConformanceIt("Deployment should have a working scale subresource", func(ctx context.Context) {
testDeploymentSubresources(f)
})
/*
@ -157,10 +157,10 @@ var _ = SIGDescribe("Deployment", func() {
proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets
when a Deployment is scaled.
*/
framework.ConformanceIt("deployment should support proportional scaling", func() {
framework.ConformanceIt("deployment should support proportional scaling", func(ctx context.Context) {
testProportionalScalingDeployment(f)
})
ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func() {
ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke")
e2eskipper.SkipIfIPv6("aws")
nodes, err := e2enode.GetReadySchedulableNodes(c)
@ -182,7 +182,7 @@ var _ = SIGDescribe("Deployment", func() {
When fetching and patching the DeploymentStatus it MUST succeed. It MUST succeed when deleting
the Deployment.
*/
framework.ConformanceIt("should run the lifecycle of a Deployment", func() {
framework.ConformanceIt("should run the lifecycle of a Deployment", func(ctx context.Context) {
one := int64(1)
deploymentResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
testNamespaceName := f.Namespace.Name
@ -215,7 +215,7 @@ var _ = SIGDescribe("Deployment", func() {
framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName)
ginkgo.By("waiting for Deployment to be created")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type {
@ -476,7 +476,7 @@ var _ = SIGDescribe("Deployment", func() {
Attempt to read, update and patch its status sub-resource; all
mutating sub-resource operations MUST be visible to subsequent reads.
*/
framework.ConformanceIt("should validate Deployment Status endpoints", func() {
framework.ConformanceIt("should validate Deployment Status endpoints", func(ctx context.Context) {
dClient := c.AppsV1().Deployments(ns)
dName := "test-deployment-" + utilrand.String(5)
labelSelector := "e2e=testing"
@ -542,7 +542,7 @@ var _ = SIGDescribe("Deployment", func() {
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
ginkgo.By("watching for the Deployment status to be updated")
ctx, cancel := context.WithTimeout(context.Background(), dRetryTimeout)
ctx, cancel := context.WithTimeout(ctx, dRetryTimeout)
defer cancel()
_, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) {

View File

@ -84,7 +84,7 @@ var _ = SIGDescribe("DisruptionController", func() {
Testname: PodDisruptionBudget: list and delete collection
Description: PodDisruptionBudget API must support list and deletecollection operations.
*/
framework.ConformanceIt("should list and delete a collection of PodDisruptionBudgets", func() {
framework.ConformanceIt("should list and delete a collection of PodDisruptionBudgets", func(ctx context.Context) {
specialLabels := map[string]string{"foo_pdb": "bar_pdb"}
labelSelector := labels.SelectorFromSet(specialLabels).String()
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(2), specialLabels)
@ -105,7 +105,7 @@ var _ = SIGDescribe("DisruptionController", func() {
Testname: PodDisruptionBudget: create, update, patch, and delete object
Description: PodDisruptionBudget API must support create, update, patch, and delete operations.
*/
framework.ConformanceIt("should create a PodDisruptionBudget", func() {
framework.ConformanceIt("should create a PodDisruptionBudget", func(ctx context.Context) {
ginkgo.By("creating the pdb")
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromString("1%"), defaultLabels)
@ -138,7 +138,7 @@ var _ = SIGDescribe("DisruptionController", func() {
Description: Disruption controller MUST update the PDB status with
how many disruptions are allowed.
*/
framework.ConformanceIt("should observe PodDisruptionBudget status updated", func() {
framework.ConformanceIt("should observe PodDisruptionBudget status updated", func(ctx context.Context) {
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
createPodsOrDie(cs, ns, 3)
@ -161,7 +161,7 @@ var _ = SIGDescribe("DisruptionController", func() {
Testname: PodDisruptionBudget: update and patch status
Description: PodDisruptionBudget API must support update and patch operations on status subresource.
*/
framework.ConformanceIt("should update/patch PodDisruptionBudget status", func() {
framework.ConformanceIt("should update/patch PodDisruptionBudget status", func(ctx context.Context) {
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
ginkgo.By("Updating PodDisruptionBudget status")
@ -287,7 +287,7 @@ var _ = SIGDescribe("DisruptionController", func() {
if c.exclusive {
serial = " [Serial]"
}
ginkgo.It(fmt.Sprintf("evictions: %s => %s%s", c.description, expectation, serial), func() {
ginkgo.It(fmt.Sprintf("evictions: %s => %s%s", c.description, expectation, serial), func(ctx context.Context) {
if c.skipForBigClusters {
e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
}
@ -344,7 +344,7 @@ var _ = SIGDescribe("DisruptionController", func() {
Testname: PodDisruptionBudget: block an eviction until the PDB is updated to allow it
Description: Eviction API must block an eviction until the PDB is updated to allow it
*/
framework.ConformanceIt("should block an eviction until the PDB is updated to allow it", func() {
framework.ConformanceIt("should block an eviction until the PDB is updated to allow it", func(ctx context.Context) {
ginkgo.By("Creating a pdb that targets all three pods in a test replica set")
createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(3), defaultLabels)
createReplicaSetOrDie(cs, ns, 3, false)

View File

@ -79,7 +79,7 @@ var _ = SIGDescribe("Job", func() {
backoffLimit := int32(6) // default value
// Simplest case: N pods succeed
ginkgo.It("should run a job to completion when tasks succeed", func() {
ginkgo.It("should run a job to completion when tasks succeed", func(ctx context.Context) {
ginkgo.By("Creating a job")
job := e2ejob.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
@ -101,7 +101,7 @@ var _ = SIGDescribe("Job", func() {
framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got %d", completions, successes)
})
ginkgo.It("should allow to use the pod failure policy on exit code to fail the job early", func() {
ginkgo.It("should allow to use the pod failure policy on exit code to fail the job early", func(ctx context.Context) {
// We fail the Job's pod only once to ensure the backoffLimit is not
// reached and thus the job is failed due to the pod failure policy
@ -134,7 +134,7 @@ var _ = SIGDescribe("Job", func() {
framework.ExpectNoError(err, "failed to ensure job failure in namespace: %s", f.Namespace.Name)
})
ginkgo.It("should allow to use the pod failure policy to not count the failure towards the backoffLimit", func() {
ginkgo.It("should allow to use the pod failure policy to not count the failure towards the backoffLimit", func(ctx context.Context) {
// We set the backoffLimit to 0 so that any pod failure would trigger
// job failure if not for the pod failure policy to ignore the failed
@ -272,7 +272,7 @@ var _ = SIGDescribe("Job", func() {
}),
)
ginkgo.It("should not create pods when created in suspend state", func() {
ginkgo.It("should not create pods when created in suspend state", func(ctx context.Context) {
ginkgo.By("Creating a job with suspend=true")
job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Spec.Suspend = pointer.BoolPtr(true)
@ -310,7 +310,7 @@ var _ = SIGDescribe("Job", func() {
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
})
ginkgo.It("should delete pods when suspended", func() {
ginkgo.It("should delete pods when suspended", func(ctx context.Context) {
ginkgo.By("Creating a job with suspend=false")
job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Spec.Suspend = pointer.Bool(false)
@ -363,7 +363,7 @@ var _ = SIGDescribe("Job", func() {
Description: Create an Indexed job. Job MUST complete successfully.
Ensure that created pods have completion index annotation and environment variable.
*/
framework.ConformanceIt("should create pods for an Indexed job with completion indexes and specified hostname", func() {
framework.ConformanceIt("should create pods for an Indexed job with completion indexes and specified hostname", func(ctx context.Context) {
ginkgo.By("Creating Indexed job")
job := e2ejob.NewTestJob("succeed", "indexed-job", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
mode := batchv1.IndexedCompletion
@ -398,7 +398,7 @@ var _ = SIGDescribe("Job", func() {
Description: Create a job and ensure the associated pod count is equal to parallelism count. Delete the
job and ensure if the pods associated with the job have been removed
*/
ginkgo.It("should remove pods when job is deleted", func() {
ginkgo.It("should remove pods when job is deleted", func(ctx context.Context) {
ginkgo.By("Creating a job")
job := e2ejob.NewTestJob("notTerminate", "all-pods-removed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
@ -423,7 +423,7 @@ var _ = SIGDescribe("Job", func() {
Description: Explicitly cause the tasks to fail once initially. After restarting, the Job MUST
execute to completion.
*/
framework.ConformanceIt("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
framework.ConformanceIt("should run a job to completion when tasks sometimes fail and are locally restarted", func(ctx context.Context) {
ginkgo.By("Creating a job")
// One failure, then a success, local restarts.
// We can't use the random failure approach, because kubelet will
@ -440,7 +440,7 @@ var _ = SIGDescribe("Job", func() {
})
// Pods sometimes fail, but eventually succeed, after pod restarts
ginkgo.It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
ginkgo.It("should run a job to completion when tasks sometimes fail and are not locally restarted", func(ctx context.Context) {
// One failure, then a success, no local restarts.
// We can't use the random failure approach, because JobController
// will throttle frequently failing Pods of a given Job, ramping
@ -462,7 +462,7 @@ var _ = SIGDescribe("Job", func() {
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
})
ginkgo.It("should fail when exceeds active deadline", func() {
ginkgo.It("should fail when exceeds active deadline", func(ctx context.Context) {
ginkgo.By("Creating a job")
var activeDeadlineSeconds int64 = 1
job := e2ejob.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
@ -478,7 +478,7 @@ var _ = SIGDescribe("Job", func() {
Testname: Jobs, active pods, graceful termination
Description: Create a job. Ensure the active pods reflect parallelism in the namespace and delete the job. Job MUST be deleted successfully.
*/
framework.ConformanceIt("should delete a job", func() {
framework.ConformanceIt("should delete a job", func(ctx context.Context) {
ginkgo.By("Creating a job")
job := e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
@ -504,7 +504,7 @@ var _ = SIGDescribe("Job", func() {
Orphan a Pod by modifying its owner reference. The Job MUST re-adopt the orphan pod.
Modify the labels of one of the Job's Pods. The Job MUST release the Pod.
*/
framework.ConformanceIt("should adopt matching orphans and release non-matching pods", func() {
framework.ConformanceIt("should adopt matching orphans and release non-matching pods", func(ctx context.Context) {
ginkgo.By("Creating a job")
job := e2ejob.NewTestJob("notTerminate", "adopt-release", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
// Replace job with the one returned from Create() so it has the UID.
@ -558,7 +558,7 @@ var _ = SIGDescribe("Job", func() {
)).To(gomega.Succeed(), "wait for pod %q to be released", pod.Name)
})
ginkgo.It("should fail to exceed backoffLimit", func() {
ginkgo.It("should fail to exceed backoffLimit", func(ctx context.Context) {
ginkgo.By("Creating a job")
backoff := 1
job := e2ejob.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
@ -578,7 +578,7 @@ var _ = SIGDescribe("Job", func() {
}
})
ginkgo.It("should run a job to completion with CPU requests [Serial]", func() {
ginkgo.It("should run a job to completion with CPU requests [Serial]", func(ctx context.Context) {
ginkgo.By("Creating a job that with CPU requests")
testNodeName := scheduling.GetNodeThatCanRunPod(f)
@ -633,7 +633,7 @@ var _ = SIGDescribe("Job", func() {
Attempt to replace the job status with a new start time which MUST
succeed. Attempt to read its status sub-resource which MUST succeed
*/
framework.ConformanceIt("should apply changes to a job status", func() {
framework.ConformanceIt("should apply changes to a job status", func(ctx context.Context) {
ns := f.Namespace.Name
jClient := f.ClientSet.BatchV1().Jobs(ns)
@ -700,7 +700,7 @@ var _ = SIGDescribe("Job", func() {
succeed. One list MUST be found. It MUST succeed at deleting a
collection of jobs via a label selector.
*/
framework.ConformanceIt("should manage the lifecycle of a job", func() {
framework.ConformanceIt("should manage the lifecycle of a job", func(ctx context.Context) {
jobName := "e2e-" + utilrand.String(5)
label := map[string]string{"e2e-job-label": jobName}
labelSelector := labels.SelectorFromSet(label).String()

View File

@ -64,11 +64,11 @@ var _ = SIGDescribe("ReplicationController", func() {
Testname: Replication Controller, run basic image
Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP.
*/
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) {
TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
})
ginkgo.It("should serve a basic image on each replica with a private image", func() {
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
// requires private images
e2eskipper.SkipUnlessProviderIs("gce", "gke")
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
@ -80,7 +80,7 @@ var _ = SIGDescribe("ReplicationController", func() {
Testname: Replication Controller, check for issues like exceeding allocated quota
Description: Attempt to create a Replication Controller with pods exceeding the namespace quota. The creation MUST fail
*/
framework.ConformanceIt("should surface a failure condition on a common issue like exceeded quota", func() {
framework.ConformanceIt("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) {
testReplicationControllerConditionCheck(f)
})
@ -89,7 +89,7 @@ var _ = SIGDescribe("ReplicationController", func() {
Testname: Replication Controller, adopt matching pods
Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod
*/
framework.ConformanceIt("should adopt matching pods on creation", func() {
framework.ConformanceIt("should adopt matching pods on creation", func(ctx context.Context) {
testRCAdoptMatchingOrphans(f)
})
@ -98,7 +98,7 @@ var _ = SIGDescribe("ReplicationController", func() {
Testname: Replication Controller, release pods
Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references.
*/
framework.ConformanceIt("should release no longer matching pods", func() {
framework.ConformanceIt("should release no longer matching pods", func(ctx context.Context) {
testRCReleaseControlledNotMatching(f)
})
@ -107,7 +107,7 @@ var _ = SIGDescribe("ReplicationController", func() {
Testname: Replication Controller, lifecycle
Description: A Replication Controller (RC) is created, read, patched, and deleted with verification.
*/
framework.ConformanceIt("should test the lifecycle of a ReplicationController", func() {
framework.ConformanceIt("should test the lifecycle of a ReplicationController", func(ctx context.Context) {
testRcName := "rc-test"
testRcNamespace := ns
testRcInitialReplicaCount := int32(1)
@ -399,7 +399,7 @@ var _ = SIGDescribe("ReplicationController", func() {
succeed when reading the ReplicationController scale. When updating the
ReplicationController scale it MUST succeed and the field MUST equal the new value.
*/
framework.ConformanceIt("should get and update a ReplicationController scale", func() {
framework.ConformanceIt("should get and update a ReplicationController scale", func(ctx context.Context) {
rcClient := f.ClientSet.CoreV1().ReplicationControllers(ns)
rcName := "e2e-rc-" + utilrand.String(5)
initialRCReplicaCount := int32(1)

View File

@ -108,18 +108,18 @@ var _ = SIGDescribe("ReplicaSet", func() {
Testname: Replica Set, run basic image
Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried.
*/
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) {
testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage)
})
ginkgo.It("should serve a basic image on each replica with a private image", func() {
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
// requires private images
e2eskipper.SkipUnlessProviderIs("gce", "gke")
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
testReplicaSetServeImageOrFail(f, "private", privateimage.GetE2EImage())
})
ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func() {
ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) {
testReplicaSetConditionCheck(f)
})
@ -128,7 +128,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
Testname: Replica Set, adopt matching pods and release non matching pods
Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references
*/
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func() {
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func(ctx context.Context) {
testRSAdoptMatchingAndReleaseNotMatching(f)
})
@ -140,7 +140,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
The RS MUST update and verify the scale subresource. The RS MUST patch and verify
a scale subresource.
*/
framework.ConformanceIt("Replicaset should have a working scale subresource", func() {
framework.ConformanceIt("Replicaset should have a working scale subresource", func(ctx context.Context) {
testRSScaleSubresources(f)
})
@ -151,7 +151,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
that it is running. The RS MUST scale to two replicas and verify the scale count
The RS MUST be patched and verify that patch succeeded.
*/
framework.ConformanceIt("Replace and Patch tests", func() {
framework.ConformanceIt("Replace and Patch tests", func(ctx context.Context) {
testRSLifeCycle(f)
})
@ -162,7 +162,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
MUST succeed when listing ReplicaSets via a label selector. It
MUST succeed when deleting the ReplicaSet via deleteCollection.
*/
framework.ConformanceIt("should list and delete a collection of ReplicaSets", func() {
framework.ConformanceIt("should list and delete a collection of ReplicaSets", func(ctx context.Context) {
listRSDeleteCollection(f)
})
@ -173,7 +173,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
Attempt to read, update and patch its status sub-resource; all
mutating sub-resource operations MUST be visible to subsequent reads.
*/
framework.ConformanceIt("should validate Replicaset Status endpoints", func() {
framework.ConformanceIt("should validate Replicaset Status endpoints", func(ctx context.Context) {
testRSStatus(f)
})
})

View File

@ -131,7 +131,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// This can't be Conformance yet because it depends on a default
// StorageClass and a dynamic provisioner.
ginkgo.It("should provide basic identity", func() {
ginkgo.It("should provide basic identity", func(ctx context.Context) {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 3
@ -170,7 +170,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// This can't be Conformance yet because it depends on a default
// StorageClass and a dynamic provisioner.
ginkgo.It("should adopt matching orphans and release non-matching pods", func() {
ginkgo.It("should adopt matching orphans and release non-matching pods", func(ctx context.Context) {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 1
@ -255,7 +255,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// This can't be Conformance yet because it depends on a default
// StorageClass and a dynamic provisioner.
ginkgo.It("should not deadlock when a pod's predecessor fails", func() {
ginkgo.It("should not deadlock when a pod's predecessor fails", func(ctx context.Context) {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 2
@ -291,7 +291,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// This can't be Conformance yet because it depends on a default
// StorageClass and a dynamic provisioner.
ginkgo.It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
ginkgo.It("should perform rolling updates and roll backs of template modifications with PVCs", func(ctx context.Context) {
ginkgo.By("Creating a new StatefulSet with PVCs")
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 3
@ -303,7 +303,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Testname: StatefulSet, Rolling Update
Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods one at a time when the Pod template changes. The StatefulSet's status MUST indicate the CurrentRevision and UpdateRevision. If the template is changed to match a prior revision, StatefulSet MUST detect this as a rollback instead of creating a new revision. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func(ctx context.Context) {
ginkgo.By("Creating a new StatefulSet")
ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
rollbackTest(c, ns, ss)
@ -314,7 +314,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Testname: StatefulSet, Rolling Update with Partition
Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress, StatefulSet MUST restore the Pod without violating the Partition. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() {
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func(ctx context.Context) {
ginkgo.By("Creating a new StatefulSet")
ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
setHTTPProbe(ss)
@ -506,7 +506,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// Do not mark this as Conformance.
// The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs.
ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func() {
ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func(ctx context.Context) {
ginkgo.By("Creating a new StatefulSet")
ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
setHTTPProbe(ss)
@ -584,7 +584,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Testname: StatefulSet, Scaling
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up, and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any Pods belonging to the StatefulSet are unhealthy. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow]", func() {
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow]", func(ctx context.Context) {
psLabels := klabels.Set(labels)
w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
@ -694,7 +694,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Testname: StatefulSet, Burst Scaling
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods [Slow]", func() {
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods [Slow]", func(ctx context.Context) {
psLabels := klabels.Set(labels)
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
@ -736,7 +736,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Testname: StatefulSet, Recreate Failed Pod
Description: StatefulSet MUST delete and recreate Pods it owns that go into a Failed state, such as when they are rejected or evicted by a Node. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("Should recreate evicted statefulset", func() {
framework.ConformanceIt("Should recreate evicted statefulset", func(ctx context.Context) {
podName := "test-pod"
statefulPodName := ssName + "-0"
ginkgo.By("Looking for a node to schedule stateful set and pod")
@ -796,7 +796,7 @@ var _ = SIGDescribe("StatefulSet", func() {
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(context.TODO(), options)
},
}
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), statefulPodTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, statefulPodTimeout)
defer cancel()
// we need to get UID from pod in any state and wait until stateful set controller will remove pod at least once
_, err = watchtools.Until(ctx, pl.ResourceVersion, lw, func(event watch.Event) (bool, error) {
@ -845,7 +845,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Newly created StatefulSet resource MUST have a scale of one.
Bring the scale of the StatefulSet resource up to two. StatefulSet scale MUST be at two replicas.
*/
framework.ConformanceIt("should have a working scale subresource", func() {
framework.ConformanceIt("should have a working scale subresource", func(ctx context.Context) {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
setHTTPProbe(ss)
@ -905,7 +905,7 @@ var _ = SIGDescribe("StatefulSet", func() {
MUST succeed when patching a StatefulSet. It MUST succeed when
deleting the StatefulSet via deleteCollection.
*/
framework.ConformanceIt("should list, patch and delete a collection of StatefulSets", func() {
framework.ConformanceIt("should list, patch and delete a collection of StatefulSets", func(ctx context.Context) {
ssPatchReplicas := int32(2)
ssPatchImage := imageutils.GetE2EImage(imageutils.Pause)
@ -974,7 +974,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Attempt to read, update and patch its status sub-resource; all
mutating sub-resource operations MUST be visible to subsequent reads.
*/
framework.ConformanceIt("should validate Statefulset Status endpoints", func() {
framework.ConformanceIt("should validate Statefulset Status endpoints", func(ctx context.Context) {
ssClient := c.AppsV1().StatefulSets(ns)
labelSelector := "e2e=testing"
@ -1034,7 +1034,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("watching for the statefulset status to be updated")
ctx, cancel := context.WithTimeout(context.Background(), statefulSetTimeout)
ctx, cancel := context.WithTimeout(ctx, statefulSetTimeout)
defer cancel()
_, err = watchtools.Until(ctx, ssList.ResourceVersion, w, func(event watch.Event) (bool, error) {
@ -1118,7 +1118,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// Do not mark this as Conformance.
// StatefulSet Conformance should not be dependent on specific applications.
ginkgo.It("should creating a working zookeeper cluster", func() {
ginkgo.It("should creating a working zookeeper cluster", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(c)
appTester.statefulPod = &zookeeperTester{client: c}
appTester.run()
@ -1126,7 +1126,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// Do not mark this as Conformance.
// StatefulSet Conformance should not be dependent on specific applications.
ginkgo.It("should creating a working redis cluster", func() {
ginkgo.It("should creating a working redis cluster", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(c)
appTester.statefulPod = &redisTester{client: c}
appTester.run()
@ -1134,7 +1134,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// Do not mark this as Conformance.
// StatefulSet Conformance should not be dependent on specific applications.
ginkgo.It("should creating a working mysql cluster", func() {
ginkgo.It("should creating a working mysql cluster", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(c)
appTester.statefulPod = &mysqlGaleraTester{client: c}
appTester.run()
@ -1142,7 +1142,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// Do not mark this as Conformance.
// StatefulSet Conformance should not be dependent on specific applications.
ginkgo.It("should creating a working CockroachDB cluster", func() {
ginkgo.It("should creating a working CockroachDB cluster", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(c)
appTester.statefulPod = &cockroachDBTester{client: c}
appTester.run()
@ -1151,7 +1151,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// Make sure minReadySeconds is honored
// Don't mark it as conformance yet
ginkgo.It("MinReadySeconds should be honored when enabled", func() {
ginkgo.It("MinReadySeconds should be honored when enabled", func(ctx context.Context) {
ssName := "test-ss"
headlessSvcName := "test"
// Define StatefulSet Labels
@ -1166,7 +1166,7 @@ var _ = SIGDescribe("StatefulSet", func() {
e2estatefulset.WaitForStatusAvailableReplicas(c, ss, 1)
})
ginkgo.It("AvailableReplicas should get updated accordingly when MinReadySeconds is enabled", func() {
ginkgo.It("AvailableReplicas should get updated accordingly when MinReadySeconds is enabled", func(ctx context.Context) {
ssName := "test-ss"
headlessSvcName := "test"
// Define StatefulSet Labels
@ -1239,7 +1239,7 @@ var _ = SIGDescribe("StatefulSet", func() {
e2estatefulset.DeleteAllStatefulSets(c, ns)
})
ginkgo.It("should delete PVCs with a WhenDeleted policy", func() {
ginkgo.It("should delete PVCs with a WhenDeleted policy", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(c)
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(ss.Spec.Replicas) = 3
@ -1262,7 +1262,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should delete PVCs with a OnScaledown policy", func() {
ginkgo.It("should delete PVCs with a OnScaledown policy", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(c)
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(ss.Spec.Replicas) = 3
@ -1285,7 +1285,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should delete PVCs after adopting pod (WhenDeleted)", func() {
ginkgo.It("should delete PVCs after adopting pod (WhenDeleted)", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(c)
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(ss.Spec.Replicas) = 3
@ -1316,7 +1316,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should delete PVCs after adopting pod (WhenScaled) [Feature:StatefulSetAutoDeletePVC]", func() {
ginkgo.It("should delete PVCs after adopting pod (WhenScaled) [Feature:StatefulSetAutoDeletePVC]", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(c)
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(ss.Spec.Replicas) = 3

View File

@ -45,7 +45,7 @@ var _ = SIGDescribe("TTLAfterFinished", func() {
f := framework.NewDefaultFramework("ttlafterfinished")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.It("job should be deleted once it finishes after TTL seconds", func() {
ginkgo.It("job should be deleted once it finishes after TTL seconds", func(ctx context.Context) {
testFinishedJob(f)
})
})

View File

@ -17,6 +17,7 @@ limitations under the License.
package architecture
import (
"context"
"time"
"github.com/onsi/ginkgo/v2"
@ -35,7 +36,7 @@ var _ = SIGDescribe("Conformance Tests", func() {
Testname: Conformance tests minimum number of nodes.
Description: Conformance tests requires at least two untainted nodes where pods can be scheduled.
*/
framework.ConformanceIt("should have at least two untainted nodes", func() {
framework.ConformanceIt("should have at least two untainted nodes", func(ctx context.Context) {
ginkgo.By("Getting node addresses")
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)

View File

@ -56,7 +56,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
The certificatesigningrequests resource must accept a request for a certificate signed by kubernetes.io/kube-apiserver-client.
The issued certificate must be valid as a client certificate used to authenticate to the kube-apiserver.
*/
ginkgo.It("should support building a client with a CSR", func() {
ginkgo.It("should support building a client with a CSR", func(ctx context.Context) {
const commonName = "tester-csr"
csrClient := f.ClientSet.CertificatesV1().CertificateSigningRequests()
@ -197,7 +197,7 @@ var _ = SIGDescribe("Certificates API [Privileged:ClusterAdmin]", func() {
The certificatesigningrequests/approval resource must support get, update, patch.
The certificatesigningrequests/status resource must support get, update, patch.
*/
framework.ConformanceIt("should support CSR API operations", func() {
framework.ConformanceIt("should support CSR API operations", func(ctx context.Context) {
// Setup
csrVersion := "v1"

View File

@ -56,7 +56,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
framework.ExpectNotEqual(len(nodeIPs), 0)
})
ginkgo.It("The kubelet's main port 10250 should reject requests with no credentials", func() {
ginkgo.It("The kubelet's main port 10250 should reject requests with no credentials", func(ctx context.Context) {
pod := createNodeAuthTestPod(f)
for _, nodeIP := range nodeIPs {
// Anonymous authentication is disabled by default
@ -66,7 +66,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
}
})
ginkgo.It("The kubelet can delegate ServiceAccount tokens to the API server", func() {
ginkgo.It("The kubelet can delegate ServiceAccount tokens to the API server", func(ctx context.Context) {
ginkgo.By("create a new ServiceAccount for authentication")
trueValue := true
newSA := &v1.ServiceAccount{

View File

@ -68,14 +68,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
framework.ExpectNoError(err, "failed to create Clientset for the given config: %+v", *config)
})
ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() {
ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) {
_, err := c.CoreV1().Secrets(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err)
}
})
ginkgo.It("Getting an existing secret should exit with the Forbidden error", func() {
ginkgo.It("Getting an existing secret should exit with the Forbidden error", func(ctx context.Context) {
ginkgo.By("Create a secret for testing")
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
@ -92,14 +92,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}
})
ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() {
ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func(ctx context.Context) {
_, err := c.CoreV1().ConfigMaps(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
if !apierrors.IsForbidden(err) {
framework.Failf("should be a forbidden error, got %#v", err)
}
})
ginkgo.It("Getting an existing configmap should exit with the Forbidden error", func() {
ginkgo.It("Getting an existing configmap should exit with the Forbidden error", func(ctx context.Context) {
ginkgo.By("Create a configmap for testing")
configmap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@ -118,7 +118,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}
})
ginkgo.It("Getting a secret for a workload the node has access to should succeed", func() {
ginkgo.It("Getting a secret for a workload the node has access to should succeed", func(ctx context.Context) {
ginkgo.By("Create a secret for testing")
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
@ -181,7 +181,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
framework.ExpectNoError(err, "failed to get secret after trying every %v for %v (%s:%s)", itv, dur, ns, secret.Name)
})
ginkgo.It("A node shouldn't be able to create another node", func() {
ginkgo.It("A node shouldn't be able to create another node", func(ctx context.Context) {
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
TypeMeta: metav1.TypeMeta{
@ -203,7 +203,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}
})
ginkgo.It("A node shouldn't be able to delete another node", func() {
ginkgo.It("A node shouldn't be able to delete another node", func(ctx context.Context) {
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
err := c.CoreV1().Nodes().Delete(context.TODO(), "foo", metav1.DeleteOptions{})
if !apierrors.IsForbidden(err) {

View File

@ -41,7 +41,7 @@ var _ = SIGDescribe("SelfSubjectReview [Feature:APISelfSubjectReview]", func() {
The selfsubjectreviews resource MUST exist in the /apis/authentication.k8s.io/v1alpha1 discovery document.
The selfsubjectreviews resource must support create.
*/
ginkgo.It("should support SelfSubjectReview API operations", func() {
ginkgo.It("should support SelfSubjectReview API operations", func(ctx context.Context) {
// Setup
ssarAPIVersion := "v1alpha1"

View File

@ -56,7 +56,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
f := framework.NewDefaultFramework("svcaccounts")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.It("no secret-based service account token should be auto-generated", func() {
ginkgo.It("no secret-based service account token should be auto-generated", func(ctx context.Context) {
{
ginkgo.By("ensuring no secret-based service account token exists")
time.Sleep(10 * time.Second)
@ -75,7 +75,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Token Mount path. All these three files MUST exist and the Service
Account mount path MUST be auto mounted to the Container.
*/
framework.ConformanceIt("should mount an API token into pods ", func() {
framework.ConformanceIt("should mount an API token into pods ", func(ctx context.Context) {
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{})
framework.ExpectNoError(err)
@ -158,7 +158,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
include test cases 1a,1b,2a,2b and 2c.
In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted.
*/
framework.ConformanceIt("should allow opting out of API token automount ", func() {
framework.ConformanceIt("should allow opting out of API token automount ", func(ctx context.Context) {
var err error
trueValue := true
@ -272,7 +272,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Testname: TokenRequestProjection should mount a projected volume with token using TokenRequest API.
Description: Ensure that projected service account token is mounted.
*/
framework.ConformanceIt("should mount projected service account token", func() {
framework.ConformanceIt("should mount projected service account token", func(ctx context.Context) {
var (
podName = "test-pod-" + string(uuid.NewUUID())
@ -333,7 +333,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Containers MUST verify that the projected service account token can be
read and has correct file mode set including ownership and permission.
*/
ginkgo.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
e2eskipper.SkipIfNodeOSDistroIs("windows")
var (
@ -429,7 +429,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
})
ginkgo.It("should support InClusterConfig with token rotation [Slow]", func() {
ginkgo.It("should support InClusterConfig with token rotation [Slow]", func(ctx context.Context) {
tenMin := int64(10 * 60)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"},
@ -528,7 +528,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
endpoints by deploying a Pod that verifies its own
token against these endpoints.
*/
framework.ConformanceIt("ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer", func() {
framework.ConformanceIt("ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer", func(ctx context.Context) {
// Allow the test pod access to the OIDC discovery non-resource URLs.
// The role should have already been automatically created as part of the
@ -646,7 +646,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Listing the ServiceAccounts MUST return the test ServiceAccount with it's patched values.
ServiceAccount will be deleted and MUST find a deleted watch event.
*/
framework.ConformanceIt("should run through the lifecycle of a ServiceAccount", func() {
framework.ConformanceIt("should run through the lifecycle of a ServiceAccount", func(ctx context.Context) {
testNamespaceName := f.Namespace.Name
testServiceAccountName := "testserviceaccount"
testServiceAccountStaticLabels := map[string]string{"test-serviceaccount-static": "true"}
@ -739,7 +739,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
2. Recreated if deleted
3. Reconciled if modified
*/
framework.ConformanceIt("should guarantee kube-root-ca.crt exist in any namespace", func() {
framework.ConformanceIt("should guarantee kube-root-ca.crt exist in any namespace", func(ctx context.Context) {
framework.ExpectNoError(wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), rootCAConfigMapName, metav1.GetOptions{})
if err == nil {
@ -807,7 +807,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
updating the ServiceAccount it MUST succeed and the field MUST equal
the new value.
*/
framework.ConformanceIt("should update a ServiceAccount", func() {
framework.ConformanceIt("should update a ServiceAccount", func(ctx context.Context) {
saClient := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name)
saName := "e2e-sa-" + utilrand.String(5)

View File

@ -86,7 +86,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
}
})
ginkgo.It("takes less than 15 minutes", func() {
ginkgo.It("takes less than 15 minutes", func(ctx context.Context) {
// Measured over multiple samples, scaling takes 10 +/- 2 minutes, so 15 minutes should be fully sufficient.
const timeToWait = 15 * time.Minute

View File

@ -137,7 +137,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
})
ginkgo.It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
ginkgo.It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func(ctx context.Context) {
perNodeReservation := int(float64(memCapacityMb) * 0.95)
replicasPerNode := 10
@ -160,7 +160,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
defer testCleanup()
})
ginkgo.It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() {
ginkgo.It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func(ctx context.Context) {
perNodeReservation := int(float64(memCapacityMb) * 0.95)
replicasPerNode := 10
additionalNodes1 := int(math.Ceil(0.7 * maxNodes))
@ -209,7 +209,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
klog.Infof("Scaled up twice")
})
ginkgo.It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
ginkgo.It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func(ctx context.Context) {
perNodeReservation := int(float64(memCapacityMb) * 0.7)
replicas := int(math.Ceil(maxNodes * 0.7))
totalNodes := maxNodes
@ -237,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
}, scaleDownTimeout))
})
ginkgo.It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() {
ginkgo.It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func(ctx context.Context) {
perPodReservation := int(float64(memCapacityMb) * 0.01)
// underutilizedNodes are 10% full
underutilizedPerNodeReplicas := 10
@ -296,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() {
}, timeout))
})
ginkgo.It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() {
ginkgo.It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func(ctx context.Context) {
fullReservation := int(float64(memCapacityMb) * 0.9)
hostPortPodReservation := int(float64(memCapacityMb) * 0.3)
totalNodes := maxNodes

View File

@ -165,7 +165,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
})
ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
ginkgo.By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
@ -210,7 +210,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
gpuType := os.Getenv("TESTED_GPU_TYPE")
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
@ -237,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
})
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
@ -267,7 +267,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 2)
})
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
@ -296,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 0)
})
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
@ -331,7 +331,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) })
})
ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
// Wait for the situation to stabilize - CA should be running and have up-to-date node readiness info.
status, err := waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.ready == s.target && s.ready <= nodeCount
@ -371,7 +371,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectEqual(len(nodes.Items), status.target+unmanagedNodes)
})
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
@ -405,7 +405,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
@ -418,7 +418,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
})
ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
@ -427,7 +427,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
ginkgo.It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
pods := nodeCount
newPods := 2
labels := map[string]string{
@ -446,7 +446,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
ginkgo.By("creating pods")
pods := nodeCount
newPods := 1
@ -467,7 +467,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce", "gke")
volumeLabels := labels.Set{
@ -539,7 +539,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
labelKey := "cluster-autoscaling-test.special-node"
labelValue := "true"
@ -639,7 +639,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector"))
})
ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
@ -694,7 +694,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
})
ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke")
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
@ -716,7 +716,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute))
})
ginkgo.It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
ginkgo.It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) {
ginkgo.By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@ -724,7 +724,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
})
ginkgo.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() {
ginkgo.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) {
ginkgo.By("No nodes should be removed")
time.Sleep(scaleDownTimeout)
@ -734,7 +734,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
})
ginkgo.It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
ginkgo.It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
runDrainTest(f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) {
ginkgo.By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@ -742,7 +742,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
})
ginkgo.It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
ginkgo.It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
runDrainTest(f, originalSizes, "kube-system", 2, 1, func(increasedSize int) {
ginkgo.By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@ -750,7 +750,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
})
ginkgo.It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
// Provider-specific setup
if framework.ProviderIs("gke") {
// GKE-specific setup
@ -874,7 +874,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectEqual(newSize, 0)
}
ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
if framework.ProviderIs("gke") { // In GKE, we can just add a node pool
gkeScaleToZero()
} else if len(originalSizes) >= 2 {
@ -884,7 +884,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
})
ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
e2eskipper.SkipUnlessSSHKeyPresent()
clusterSize := nodeCount
@ -948,7 +948,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
@ -960,7 +960,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size == nodeCount }, time.Second))
})
ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
@ -970,7 +970,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size > nodeCount }, time.Second))
})
ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
defer createPriorityClasses(f)()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
@ -982,7 +982,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size == nodeCount }, time.Second))
})
ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
@ -993,7 +993,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
})
ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) {
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.

View File

@ -61,7 +61,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.Describe("with Custom Metric of type Pod from Stackdriver", func() {
ginkgo.It("should scale down", func() {
ginkgo.It("should scale down", func(ctx context.Context) {
initialReplicas := 2
// metric should cause scale down
metricValue := int64(100)
@ -80,7 +80,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
tc.Run()
})
ginkgo.It("should scale up with two metrics", func() {
ginkgo.It("should scale up with two metrics", func(ctx context.Context) {
initialReplicas := 1
// metric 1 would cause a scale down, if not for metric 2
metric1Value := int64(100)
@ -115,7 +115,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
tc.Run()
})
ginkgo.It("should scale down with Prometheus", func() {
ginkgo.It("should scale down with Prometheus", func(ctx context.Context) {
initialReplicas := 2
// metric should cause scale down
metricValue := int64(100)
@ -136,7 +136,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
})
ginkgo.Describe("with Custom Metric of type Object from Stackdriver", func() {
ginkgo.It("should scale down", func() {
ginkgo.It("should scale down", func(ctx context.Context) {
initialReplicas := 2
// metric should cause scale down
metricValue := int64(100)
@ -157,7 +157,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
tc.Run()
})
ginkgo.It("should scale down to 0", func() {
ginkgo.It("should scale down to 0", func(ctx context.Context) {
initialReplicas := 2
// metric should cause scale down
metricValue := int64(0)
@ -180,7 +180,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
})
ginkgo.Describe("with External Metric from Stackdriver", func() {
ginkgo.It("should scale down with target value", func() {
ginkgo.It("should scale down with target value", func(ctx context.Context) {
initialReplicas := 2
// metric should cause scale down
metricValue := externalMetricValue
@ -204,7 +204,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
tc.Run()
})
ginkgo.It("should scale down with target average value", func() {
ginkgo.It("should scale down with target average value", func(ctx context.Context) {
initialReplicas := 2
// metric should cause scale down
metricValue := externalMetricValue
@ -228,7 +228,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
tc.Run()
})
ginkgo.It("should scale up with two metrics", func() {
ginkgo.It("should scale up with two metrics", func(ctx context.Context) {
initialReplicas := 1
// metric 1 would cause a scale down, if not for metric 2
metric1Value := externalMetricValue
@ -271,7 +271,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
})
ginkgo.Describe("with multiple metrics of different types", func() {
ginkgo.It("should scale up when one metric is missing (Pod and External metrics)", func() {
ginkgo.It("should scale up when one metric is missing (Pod and External metrics)", func(ctx context.Context) {
initialReplicas := 1
// First metric a pod metric which is missing.
// Second metric is external metric which is present, it should cause scale up.
@ -300,7 +300,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
tc.Run()
})
ginkgo.It("should scale up when one metric is missing (Resource and Object metrics)", func() {
ginkgo.It("should scale up when one metric is missing (Resource and Object metrics)", func(ctx context.Context) {
initialReplicas := 1
metricValue := int64(100)
// First metric a resource metric which is missing (no consumption).
@ -320,7 +320,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
tc.Run()
})
ginkgo.It("should not scale down when one metric is missing (Container Resource and External Metrics)", func() {
ginkgo.It("should not scale down when one metric is missing (Container Resource and External Metrics)", func(ctx context.Context) {
initialReplicas := 2
// First metric a container resource metric which is missing.
// Second metric is external metric which is present, it should cause scale down if the first metric wasn't missing.
@ -350,7 +350,7 @@ var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod aut
tc.Run()
})
ginkgo.It("should not scale down when one metric is missing (Pod and Object Metrics)", func() {
ginkgo.It("should not scale down when one metric is missing (Pod and Object Metrics)", func(ctx context.Context) {
initialReplicas := 2
metricValue := int64(100)
// First metric an object metric which is missing.

View File

@ -104,7 +104,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
// This test is separated because it is slow and need to run serially.
// Will take around 5 minutes to run on a 4 nodes cluster.
ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func(ctx context.Context) {
numNodes, err := e2enode.TotalRegistered(c)
framework.ExpectNoError(err)
@ -168,7 +168,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
framework.ExpectNoError(err)
})
ginkgo.It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
ginkgo.It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func(ctx context.Context) {
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))

View File

@ -17,6 +17,7 @@ limitations under the License.
package autoscaling
import (
"context"
"time"
"github.com/onsi/ginkgo/v2"
@ -46,47 +47,47 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
f.NamespacePodSecurityEnforceLevel = api.LevelBaseline
ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f)
})
ginkgo.It(titleDown+titleAverageUtilization, func() {
ginkgo.It(titleDown+titleAverageUtilization, func(ctx context.Context) {
scaleDown("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, false, f)
})
})
ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, f)
})
})
ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() {
ginkgo.It(titleUp, func() {
ginkgo.It(titleUp, func(ctx context.Context) {
scaleUp("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
})
ginkgo.It(titleDown, func() {
ginkgo.It(titleDown, func(ctx context.Context) {
scaleDown("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
})
})
// These tests take ~20 minutes each.
ginkgo.Describe("[Serial] [Slow] ReplicationController", func() {
ginkgo.It(titleUp+" and verify decision stability", func() {
ginkgo.It(titleUp+" and verify decision stability", func(ctx context.Context) {
scaleUp("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f)
})
ginkgo.It(titleDown+" and verify decision stability", func() {
ginkgo.It(titleDown+" and verify decision stability", func(ctx context.Context) {
scaleDown("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f)
})
})
ginkgo.Describe("ReplicationController light", func() {
ginkgo.It("Should scale from 1 pod to 2 pods", func() {
ginkgo.It("Should scale from 1 pod to 2 pods", func(ctx context.Context) {
st := &HPAScaleTest{
initPods: 1,
initCPUTotal: 150,
@ -100,7 +101,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
}
st.run("rc-light", e2eautoscaling.KindRC, f)
})
ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func() {
ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func(ctx context.Context) {
st := &HPAScaleTest{
initPods: 2,
initCPUTotal: 50,
@ -118,18 +119,18 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() {
// ContainerResource CPU autoscaling on idle sidecar
ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func() {
ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func(ctx context.Context) {
scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
})
// ContainerResource CPU autoscaling on busy sidecar
ginkgo.It("Should not scale up on a busy sidecar with an idle application", func() {
ginkgo.It("Should not scale up on a busy sidecar with an idle application", func(ctx context.Context) {
doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, true, f)
})
})
ginkgo.Describe("CustomResourceDefinition", func() {
ginkgo.It("Should scale with a CRD targetRef", func() {
ginkgo.It("Should scale with a CRD targetRef", func(ctx context.Context) {
scaleTest := &HPAScaleTest{
initPods: 1,
initCPUTotal: 150,
@ -151,19 +152,19 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: M
f.NamespacePodSecurityEnforceLevel = api.LevelBaseline
ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, false, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, false, f)
})
})
ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
ginkgo.It(titleUp+titleAverageValue, func(ctx context.Context) {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, f)
})
})

View File

@ -17,6 +17,7 @@ limitations under the License.
package autoscaling
import (
"context"
"time"
autoscalingv2 "k8s.io/api/autoscaling/v2"
@ -53,7 +54,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
waitBuffer := 1 * time.Minute
ginkgo.Describe("with short downscale stabilization window", func() {
ginkgo.It("should scale down soon after the stabilization period", func() {
ginkgo.It("should scale down soon after the stabilization period", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica
@ -94,7 +95,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
})
ginkgo.Describe("with long upscale stabilization window", func() {
ginkgo.It("should scale up only after the stabilization period", func() {
ginkgo.It("should scale up only after the stabilization period", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 2
initCPUUsageTotal := initPods * usageForSingleReplica
@ -135,7 +136,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
})
ginkgo.Describe("with autoscaling disabled", func() {
ginkgo.It("shouldn't scale up", func() {
ginkgo.It("shouldn't scale up", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica
@ -170,7 +171,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
})
ginkgo.It("shouldn't scale down", func() {
ginkgo.It("shouldn't scale down", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 3
initCPUUsageTotal := initPods * usageForSingleReplica
@ -213,7 +214,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
targetCPUUtilizationPercent := 25
usageForSingleReplica := 45
ginkgo.It("should scale up no more than given number of Pods per minute", func() {
ginkgo.It("should scale up no more than given number of Pods per minute", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica
@ -255,7 +256,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
})
ginkgo.It("should scale down no more than given number of Pods per minute", func() {
ginkgo.It("should scale down no more than given number of Pods per minute", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 3
initCPUUsageTotal := initPods * usageForSingleReplica
@ -303,7 +304,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
targetCPUUtilizationPercent := 25
usageForSingleReplica := 45
ginkgo.It("should scale up no more than given percentage of current Pods per minute", func() {
ginkgo.It("should scale up no more than given percentage of current Pods per minute", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 2
initCPUUsageTotal := initPods * usageForSingleReplica
@ -346,7 +347,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline)
})
ginkgo.It("should scale down no more than given percentage of current Pods per minute", func() {
ginkgo.It("should scale down no more than given percentage of current Pods per minute", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 7
initCPUUsageTotal := initPods * usageForSingleReplica
@ -393,7 +394,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
ginkgo.Describe("with both scale up and down controls configured", func() {
waitBuffer := 2 * time.Minute
ginkgo.It("should keep recommendation within the range over two stabilization windows", func() {
ginkgo.It("should keep recommendation within the range over two stabilization windows", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 2
initCPUUsageTotal := initPods * usageForSingleReplica
@ -444,7 +445,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
framework.ExpectEqual(timeWaited < waitDeadline, true, "waited %s, wanted less than %s", timeWaited, waitDeadline)
})
ginkgo.It("should keep recommendation within the range with stabilization window and pod limit rate", func() {
ginkgo.It("should keep recommendation within the range with stabilization window and pod limit rate", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 2
initCPUUsageTotal := initPods * usageForSingleReplica

View File

@ -241,7 +241,7 @@ var _ = SIGDescribe("Addon update", func() {
})
// WARNING: the test is not parallel-friendly!
ginkgo.It("should propagate add-on file changes [Slow]", func() {
ginkgo.It("should propagate add-on file changes [Slow]", func(ctx context.Context) {
// This test requires:
// - SSH
// - master access

View File

@ -17,6 +17,8 @@ limitations under the License.
package apps
import (
"context"
"k8s.io/kubernetes/test/e2e/cloud/gcp/common"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
@ -40,7 +42,7 @@ var _ = SIGDescribe("stateful Upgrade [Feature:StatefulUpgrade]", func() {
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
ginkgo.Describe("stateful upgrade", func() {
ginkgo.It("should maintain a functioning cluster", func() {
ginkgo.It("should maintain a functioning cluster", func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(f.ClientSet)
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)

View File

@ -17,6 +17,8 @@ limitations under the License.
package auth
import (
"context"
"k8s.io/kubernetes/test/e2e/cloud/gcp/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
@ -37,7 +39,7 @@ var _ = SIGDescribe("ServiceAccount admission controller migration [Feature:Boun
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
ginkgo.Describe("master upgrade", func() {
ginkgo.It("should maintain a functioning cluster", func() {
ginkgo.It("should maintain a functioning cluster", func(ctx context.Context) {
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)

View File

@ -17,6 +17,8 @@ limitations under the License.
package gcp
import (
"context"
"k8s.io/kubernetes/test/e2e/cloud/gcp/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
@ -58,7 +60,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
// Create the frameworks here because we can only create them
// in a "Describe".
ginkgo.Describe("master upgrade", func() {
ginkgo.It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
ginkgo.It("should maintain a functioning cluster [Feature:MasterUpgrade]", func(ctx context.Context) {
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)
@ -75,7 +77,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
})
ginkgo.Describe("cluster upgrade", func() {
ginkgo.It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
ginkgo.It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func(ctx context.Context) {
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)
@ -95,7 +97,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
ginkgo.Describe("cluster downgrade", func() {
ginkgo.It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() {
ginkgo.It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func(ctx context.Context) {
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)

View File

@ -17,6 +17,7 @@ limitations under the License.
package gcp
import (
"context"
"fmt"
"os/exec"
@ -37,7 +38,7 @@ var _ = SIGDescribe("GKE node pools [Feature:GKENodePool]", func() {
e2eskipper.SkipUnlessProviderIs("gke")
})
ginkgo.It("should create a cluster with multiple node pools [Feature:GKENodePool]", func() {
ginkgo.It("should create a cluster with multiple node pools [Feature:GKENodePool]", func(ctx context.Context) {
framework.Logf("Start create node pool test")
testCreateDeleteNodePool(f, "test-pool")
})

View File

@ -227,7 +227,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
verifyRCs(c, ns, existingRCs)
}
ginkgo.It("survive addition/removal replicas same zone [Serial][Disruptive]", func() {
ginkgo.It("survive addition/removal replicas same zone [Serial][Disruptive]", func(ctx context.Context) {
zone := framework.TestContext.CloudConfig.Zone
step(None, "")
numAdditionalReplicas := 2
@ -239,7 +239,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
}
})
ginkgo.It("survive addition/removal replicas different zones [Serial][Disruptive]", func() {
ginkgo.It("survive addition/removal replicas different zones [Serial][Disruptive]", func(ctx context.Context) {
zone := framework.TestContext.CloudConfig.Zone
region := findRegionForZone(zone)
zones := findZonesForRegion(region)
@ -257,7 +257,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
}
})
ginkgo.It("survive addition/removal replicas multizone workers [Serial][Disruptive]", func() {
ginkgo.It("survive addition/removal replicas multizone workers [Serial][Disruptive]", func(ctx context.Context) {
zone := framework.TestContext.CloudConfig.Zone
region := findRegionForZone(zone)
zones := findZonesForRegion(region)

View File

@ -17,6 +17,7 @@ limitations under the License.
package gcp
import (
"context"
"fmt"
"net"
"net/http"
@ -47,7 +48,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
})
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func(ctx context.Context) {
result, err := e2ekubelet.ProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
framework.ExpectNoError(err)
@ -55,7 +56,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
result.StatusCode(&statusCode)
framework.ExpectNotEqual(statusCode, http.StatusOK)
})
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func(ctx context.Context) {
result, err := e2ekubelet.ProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
framework.ExpectNoError(err)
@ -68,7 +69,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
disabledPorts := []int{ports.KubeletReadOnlyPort, 4194}
for _, port := range disabledPorts {
port := port
ginkgo.It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() {
ginkgo.It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func(ctx context.Context) {
portClosedTest(f, node, port)
})
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package network
import (
"context"
"fmt"
"k8s.io/kubernetes/test/e2e/cloud/gcp/common"
@ -55,7 +56,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
})
ginkgo.Describe("Upgrade kube-proxy from static pods to a DaemonSet", func() {
ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() {
ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func(ctx context.Context) {
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)
@ -73,7 +74,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
})
ginkgo.Describe("Downgrade kube-proxy from a DaemonSet to static pods", func() {
ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() {
ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func(ctx context.Context) {
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)

View File

@ -17,6 +17,8 @@ limitations under the License.
package node
import (
"context"
"k8s.io/kubernetes/test/e2e/cloud/gcp/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
@ -37,7 +39,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
ginkgo.Describe("master upgrade", func() {
ginkgo.It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func() {
ginkgo.It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func(ctx context.Context) {
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)
@ -50,7 +52,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
})
})
ginkgo.Describe("cluster upgrade", func() {
ginkgo.It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func() {
ginkgo.It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func(ctx context.Context) {
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)
@ -63,7 +65,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
})
})
ginkgo.Describe("cluster downgrade", func() {
ginkgo.It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() {
ginkgo.It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func(ctx context.Context) {
upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery())
framework.ExpectNoError(err)

View File

@ -101,7 +101,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
framework.ExpectNoError(err)
})
ginkgo.It("node lease should be deleted when corresponding node is deleted", func() {
ginkgo.It("node lease should be deleted when corresponding node is deleted", func(ctx context.Context) {
leaseClient := c.CoordinationV1().Leases(v1.NamespaceNodeLease)
err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
framework.ExpectNoError(err)

View File

@ -94,25 +94,25 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
f = framework.NewDefaultFramework("reboot")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.It("each node by ordering clean reboot and ensure they function upon restart", func() {
ginkgo.It("each node by ordering clean reboot and ensure they function upon restart", func(ctx context.Context) {
// clean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &", nil)
})
ginkgo.It("each node by ordering unclean reboot and ensure they function upon restart", func() {
ginkgo.It("each node by ordering unclean reboot and ensure they function upon restart", func(ctx context.Context) {
// unclean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
})
ginkgo.It("each node by triggering kernel panic and ensure they function upon restart", func() {
ginkgo.It("each node by triggering kernel panic and ensure they function upon restart", func(ctx context.Context) {
// kernel panic
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
})
ginkgo.It("each node by switching off the network interface and ensure they function upon switch on", func() {
ginkgo.It("each node by switching off the network interface and ensure they function upon switch on", func(ctx context.Context) {
// switch the network interface off for a while to simulate a network outage
// We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down.
cmd := "nohup sh -c '" +
@ -133,7 +133,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
testReboot(f.ClientSet, cmd, nil)
})
ginkgo.It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() {
ginkgo.It("each node by dropping all inbound packets for a while and ensure they function afterwards", func(ctx context.Context) {
// tell the firewall to drop all inbound packets for a while
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets.
// We still accept packages send from localhost to prevent monit from restarting kubelet.
@ -141,7 +141,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
testReboot(f.ClientSet, dropPacketsScript("INPUT", tmpLogPath), catLogHook(tmpLogPath))
})
ginkgo.It("each node by dropping all outbound packets for a while and ensure they function afterwards", func() {
ginkgo.It("each node by dropping all outbound packets for a while and ensure they function afterwards", func(ctx context.Context) {
// tell the firewall to drop all outbound packets for a while
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets.
// We still accept packages send to localhost to prevent monit from restarting kubelet.

View File

@ -91,7 +91,7 @@ var _ = SIGDescribe("Recreate [Feature:Recreate]", func() {
}
})
ginkgo.It("recreate nodes and ensure they function upon restart", func() {
ginkgo.It("recreate nodes and ensure they function upon restart", func(ctx context.Context) {
testRecreate(f.ClientSet, ps, systemNamespace, originalNodes, originalPodNames)
})
})

View File

@ -112,7 +112,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.ExpectNoError(err)
})
ginkgo.It("should be able to delete nodes", func() {
ginkgo.It("should be able to delete nodes", func(ctx context.Context) {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node"
@ -142,7 +142,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
})
// TODO: Bug here - testName is not correct
ginkgo.It("should be able to add nodes", func() {
ginkgo.It("should be able to add nodes", func(ctx context.Context) {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node"

View File

@ -17,6 +17,7 @@ limitations under the License.
package gcp
import (
"context"
"time"
v1 "k8s.io/api/core/v1"
@ -87,7 +88,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
}
})
ginkgo.It("should restart all nodes and ensure all nodes and pods recover", func() {
ginkgo.It("should restart all nodes and ensure all nodes and pods recover", func(ctx context.Context) {
ginkgo.By("restarting all of the nodes")
err := common.RestartNodes(f.ClientSet, originalNodes)
framework.ExpectNoError(err)

View File

@ -44,7 +44,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
c = f.ClientSet
})
ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func() {
ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func(ctx context.Context) {
ginkgo.By("deleting a node on the cloud provider")
nodeToDelete, err := e2enode.GetRandomReadySchedulableNode(c)

View File

@ -17,6 +17,8 @@ limitations under the License.
package network
import (
"context"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
@ -79,7 +81,7 @@ var _ = SIGDescribe("Networking", func() {
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func() {
framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, false)
checkPodToPodConnectivity(config, "http", e2enetwork.EndpointHTTPPort)
})
@ -90,7 +92,7 @@ var _ = SIGDescribe("Networking", func() {
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func() {
framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, false)
checkPodToPodConnectivity(config, "udp", e2enetwork.EndpointUDPPort)
})
@ -102,7 +104,7 @@ var _ = SIGDescribe("Networking", func() {
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
This test is marked LinuxOnly it breaks when using Overlay networking with Windows.
*/
framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, true)
for _, endpointPod := range config.EndpointPods {
err := config.DialFromNode("http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
@ -119,7 +121,7 @@ var _ = SIGDescribe("Networking", func() {
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
This test is marked LinuxOnly it breaks when using Overlay networking with Windows.
*/
framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
config := e2enetwork.NewCoreNetworkingTestConfig(f, true)
for _, endpointPod := range config.EndpointPods {
err := config.DialFromNode("udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
@ -130,13 +132,13 @@ var _ = SIGDescribe("Networking", func() {
})
// [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes
ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func() {
ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) {
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP)
checkPodToPodConnectivity(config, "sctp", e2enetwork.EndpointSCTPPort)
})
// [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes
ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func() {
ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) {
ginkgo.Skip("Skipping SCTP node to pod test until DialFromNode supports SCTP #96482")
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP)
for _, endpointPod := range config.EndpointPods {

View File

@ -42,7 +42,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap, from environment field
Description: Create a Pod with an environment variable value set using a value from ConfigMap. A ConfigMap value MUST be accessible in the container environment.
*/
framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func() {
framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func(ctx context.Context) {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
@ -90,7 +90,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap, from environment variables
Description: Create a Pod with a environment source from ConfigMap. All ConfigMap values MUST be available as environment variables in the container.
*/
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func(ctx context.Context) {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
@ -135,12 +135,12 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap, with empty-key
Description: Attempt to create a ConfigMap with an empty key. The creation MUST fail.
*/
framework.ConformanceIt("should fail to create ConfigMap with empty key", func() {
framework.ConformanceIt("should fail to create ConfigMap with empty key", func(ctx context.Context) {
configMap, err := newConfigMapWithEmptyKey(f)
framework.ExpectError(err, "created configMap %q with empty key in namespace %q", configMap.Name, f.Namespace.Name)
})
ginkgo.It("should update ConfigMap successfully", func() {
ginkgo.It("should update ConfigMap successfully", func(ctx context.Context) {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating ConfigMap %v/%v", f.Namespace.Name, configMap.Name))
@ -166,7 +166,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Attempt to create a ConfigMap. Patch the created ConfigMap. Fetching the ConfigMap MUST reflect changes.
By fetching all the ConfigMaps via a Label selector it MUST find the ConfigMap by it's static label and updated value. The ConfigMap must be deleted by Collection.
*/
framework.ConformanceIt("should run through a ConfigMap lifecycle", func() {
framework.ConformanceIt("should run through a ConfigMap lifecycle", func(ctx context.Context) {
testNamespaceName := f.Namespace.Name
testConfigMapName := "test-configmap" + string(uuid.NewUUID())

View File

@ -69,7 +69,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod readiness probe, with initial delay
Description: Create a Pod that is configured with a initial delay set on the readiness probe. Check the Pod Start time to compare to the initial delay. The Pod MUST be ready only after the specified initial delay.
*/
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() {
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func(ctx context.Context) {
containerName := "test-webserver"
p := podClient.Create(testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80))
e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)
@ -105,7 +105,7 @@ var _ = SIGDescribe("Probing container", func() {
Description: Create a Pod with a readiness probe that fails consistently. When this Pod is created,
then the Pod MUST never be ready, never be running and restart count MUST be zero.
*/
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() {
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func(ctx context.Context) {
p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80))
gomega.Consistently(func() (bool, error) {
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{})
@ -132,7 +132,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe, using local file, restart
Description: Create a Pod with liveness probe that uses ExecAction handler to cat /temp/health file. The Container deletes the file /temp/health after 10 second, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func(ctx context.Context) {
cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"}
livenessProbe := &v1.Probe{
ProbeHandler: execHandler([]string{"cat", "/tmp/health"}),
@ -149,7 +149,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe, using local file, no restart
Description: Pod is created with liveness probe that uses 'exec' command to cat /temp/health file. Liveness probe MUST not fail to check health and the restart count should remain 0.
*/
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func(ctx context.Context) {
cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"}
livenessProbe := &v1.Probe{
ProbeHandler: execHandler([]string{"cat", "/tmp/health"}),
@ -166,7 +166,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe, using http endpoint, restart
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() {
framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func(ctx context.Context) {
livenessProbe := &v1.Probe{
ProbeHandler: httpGetHandler("/healthz", 8080),
InitialDelaySeconds: 15,
@ -181,7 +181,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe, using tcp socket, no restart
Description: A Pod is created with liveness probe on tcp socket 8080. The http handler on port 8080 will return http errors after 10 seconds, but the socket will remain open. Liveness probe MUST not fail to check health and the restart count should remain 0.
*/
framework.ConformanceIt("should *not* be restarted with a tcp:8080 liveness probe [NodeConformance]", func() {
framework.ConformanceIt("should *not* be restarted with a tcp:8080 liveness probe [NodeConformance]", func(ctx context.Context) {
livenessProbe := &v1.Probe{
ProbeHandler: tcpSocketHandler(8080),
InitialDelaySeconds: 15,
@ -196,7 +196,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe, using http endpoint, multiple restarts (slow)
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment every time health check fails, measure up to 5 restart.
*/
framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func() {
framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func(ctx context.Context) {
livenessProbe := &v1.Probe{
ProbeHandler: httpGetHandler("/healthz", 8080),
InitialDelaySeconds: 5,
@ -212,7 +212,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe, using http endpoint, failure
Description: A Pod is created with liveness probe on http endpoint '/'. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero.
*/
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() {
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func(ctx context.Context) {
livenessProbe := &v1.Probe{
ProbeHandler: httpGetHandler("/", 80),
InitialDelaySeconds: 15,
@ -228,7 +228,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe, container exec timeout, restart
Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod.
*/
ginkgo.It("should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func() {
ginkgo.It("should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func(ctx context.Context) {
cmd := []string{"/bin/sh", "-c", "sleep 600"}
livenessProbe := &v1.Probe{
ProbeHandler: execHandler([]string{"/bin/sh", "-c", "sleep 10"}),
@ -245,7 +245,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod readiness probe, container exec timeout, not ready
Description: A Pod is created with readiness probe with a Exec action on the Pod. If the readiness probe call does not return within the timeout specified, readiness probe MUST not be Ready.
*/
ginkgo.It("should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func() {
ginkgo.It("should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func(ctx context.Context) {
cmd := []string{"/bin/sh", "-c", "sleep 600"}
readinessProbe := &v1.Probe{
ProbeHandler: execHandler([]string{"/bin/sh", "-c", "sleep 10"}),
@ -262,7 +262,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe, container exec timeout, restart
Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod. When ExecProbeTimeout feature gate is disabled and cluster is using dockershim, the timeout is ignored BUT a failing liveness probe MUST restart the Pod.
*/
ginkgo.It("should be restarted with a failing exec liveness probe that took longer than the timeout", func() {
ginkgo.It("should be restarted with a failing exec liveness probe that took longer than the timeout", func(ctx context.Context) {
cmd := []string{"/bin/sh", "-c", "sleep 600"}
livenessProbe := &v1.Probe{
ProbeHandler: execHandler([]string{"/bin/sh", "-c", "sleep 10 & exit 1"}),
@ -279,7 +279,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod http liveness probe, redirected to a local address
Description: A Pod is created with liveness probe on http endpoint /redirect?loc=healthz. The http handler on the /redirect will redirect to the /healthz endpoint, which will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
ginkgo.It("should be restarted with a local redirect http liveness probe", func() {
ginkgo.It("should be restarted with a local redirect http liveness probe", func(ctx context.Context) {
livenessProbe := &v1.Probe{
ProbeHandler: httpGetHandler("/redirect?loc="+url.QueryEscape("/healthz"), 8080),
InitialDelaySeconds: 15,
@ -294,7 +294,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod http liveness probe, redirected to a non-local address
Description: A Pod is created with liveness probe on http endpoint /redirect with a redirect to http://0.0.0.0/. The http handler on the /redirect should not follow the redirect, but instead treat it as a success and generate an event.
*/
ginkgo.It("should *not* be restarted with a non-local redirect http liveness probe", func() {
ginkgo.It("should *not* be restarted with a non-local redirect http liveness probe", func(ctx context.Context) {
livenessProbe := &v1.Probe{
ProbeHandler: httpGetHandler("/redirect?loc="+url.QueryEscape("http://0.0.0.0/"), 8080),
InitialDelaySeconds: 15,
@ -318,7 +318,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod startup probe restart
Description: A Pod is created with a failing startup probe. The Pod MUST be killed and restarted incrementing restart count to 1, even if liveness would succeed.
*/
ginkgo.It("should be restarted startup probe fails", func() {
ginkgo.It("should be restarted startup probe fails", func(ctx context.Context) {
cmd := []string{"/bin/sh", "-c", "sleep 600"}
livenessProbe := &v1.Probe{
ProbeHandler: v1.ProbeHandler{
@ -347,7 +347,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe delayed (long) by startup probe
Description: A Pod is created with failing liveness and startup probes. Liveness probe MUST NOT fail until startup probe expires.
*/
ginkgo.It("should *not* be restarted by liveness probe because startup probe delays it", func() {
ginkgo.It("should *not* be restarted by liveness probe because startup probe delays it", func(ctx context.Context) {
cmd := []string{"/bin/sh", "-c", "sleep 600"}
livenessProbe := &v1.Probe{
ProbeHandler: v1.ProbeHandler{
@ -376,7 +376,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe fails after startup success
Description: A Pod is created with failing liveness probe and delayed startup probe that uses 'exec' command to cat /temp/health file. The Container is started by creating /tmp/startup after 10 seconds, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
ginkgo.It("should be restarted by liveness probe after startup probe enables it", func() {
ginkgo.It("should be restarted by liveness probe after startup probe enables it", func(ctx context.Context) {
cmd := []string{"/bin/sh", "-c", "sleep 10; echo ok >/tmp/startup; sleep 600"}
livenessProbe := &v1.Probe{
ProbeHandler: v1.ProbeHandler{
@ -405,7 +405,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod readiness probe, delayed by startup probe
Description: A Pod is created with startup and readiness probes. The Container is started by creating /tmp/startup after 45 seconds, delaying the ready state by this amount of time. This is similar to the "Pod readiness probe, with initial delay" test.
*/
ginkgo.It("should be ready immediately after startupProbe succeeds", func() {
ginkgo.It("should be ready immediately after startupProbe succeeds", func(ctx context.Context) {
// Probe workers sleep at Kubelet start for a random time which is at most PeriodSeconds
// this test requires both readiness and startup workers running before updating statuses
// to avoid flakes, ensure sleep before startup (32s) > readinessProbe.PeriodSeconds
@ -460,7 +460,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Set terminationGracePeriodSeconds for livenessProbe
Description: A pod with a long terminationGracePeriod is created with a shorter livenessProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used.
*/
ginkgo.It("should override timeoutGracePeriodSeconds when LivenessProbe field is set [Feature:ProbeTerminationGracePeriod]", func() {
ginkgo.It("should override timeoutGracePeriodSeconds when LivenessProbe field is set [Feature:ProbeTerminationGracePeriod]", func(ctx context.Context) {
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "liveness-override-"+string(uuid.NewUUID()), nil, nil, nil, "/bin/sh", "-c", "sleep 1000")
longGracePeriod := int64(500)
pod.Spec.TerminationGracePeriodSeconds = &longGracePeriod
@ -488,7 +488,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Set terminationGracePeriodSeconds for startupProbe
Description: A pod with a long terminationGracePeriod is created with a shorter startupProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used.
*/
ginkgo.It("should override timeoutGracePeriodSeconds when StartupProbe field is set [Feature:ProbeTerminationGracePeriod]", func() {
ginkgo.It("should override timeoutGracePeriodSeconds when StartupProbe field is set [Feature:ProbeTerminationGracePeriod]", func(ctx context.Context) {
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "startup-override-"+string(uuid.NewUUID()), nil, nil, nil, "/bin/sh", "-c", "sleep 1000")
longGracePeriod := int64(500)
pod.Spec.TerminationGracePeriodSeconds = &longGracePeriod
@ -521,7 +521,7 @@ var _ = SIGDescribe("Probing container", func() {
Testname: Pod liveness probe, using grpc call, success
Description: A Pod is created with liveness probe on grpc service. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero.
*/
ginkgo.It("should *not* be restarted with a GRPC liveness probe [NodeConformance]", func() {
ginkgo.It("should *not* be restarted with a GRPC liveness probe [NodeConformance]", func(ctx context.Context) {
livenessProbe := &v1.Probe{
ProbeHandler: v1.ProbeHandler{
GRPC: &v1.GRPCAction{
@ -544,7 +544,7 @@ var _ = SIGDescribe("Probing container", func() {
Description: A Pod is created with liveness probe on grpc service. Liveness probe on this endpoint should fail because of wrong probe port.
When liveness probe does fail then the restart count should +1.
*/
ginkgo.It("should be restarted with a GRPC liveness probe [NodeConformance]", func() {
ginkgo.It("should be restarted with a GRPC liveness probe [NodeConformance]", func(ctx context.Context) {
livenessProbe := &v1.Probe{
ProbeHandler: v1.ProbeHandler{
GRPC: &v1.GRPCAction{
@ -559,7 +559,7 @@ var _ = SIGDescribe("Probing container", func() {
RunLivenessTest(f, pod, 1, defaultObservationTimeout)
})
ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func() {
ginkgo.It("should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe", func(ctx context.Context) {
podName := "probe-test-" + string(uuid.NewUUID())
podClient := e2epod.NewPodClient(f)
terminationGracePeriod := int64(30)
@ -623,7 +623,7 @@ done
framework.ExpectNoError(err)
})
ginkgo.It("should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating", func() {
ginkgo.It("should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating", func(ctx context.Context) {
podName := "probe-test-" + string(uuid.NewUUID())
podClient := e2epod.NewPodClient(f)
terminationGracePeriod := int64(30)

View File

@ -17,6 +17,8 @@ limitations under the License.
package node
import (
"context"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
@ -36,7 +38,7 @@ var _ = SIGDescribe("Containers", func() {
Testname: Containers, without command and arguments
Description: Default command and arguments from the container image entrypoint MUST be used when Pod does not specify the container command
*/
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func(ctx context.Context) {
pod := entrypointTestPod(f.Namespace.Name)
pod.Spec.Containers[0].Args = nil
pod = e2epod.NewPodClient(f).Create(pod)
@ -56,7 +58,7 @@ var _ = SIGDescribe("Containers", func() {
Testname: Containers, with arguments
Description: Default command and from the container image entrypoint MUST be used when Pod does not specify the container command but the arguments from Pod spec MUST override when specified.
*/
framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func() {
framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func(ctx context.Context) {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
e2epodoutput.TestContainerOutput(f, "override arguments", pod, 0, []string{
"[/agnhost entrypoint-tester override arguments]",
@ -70,7 +72,7 @@ var _ = SIGDescribe("Containers", func() {
Testname: Containers, with command
Description: Default command from the container image entrypoint MUST NOT be used when Pod specifies the container command. Command from Pod spec MUST override the command in the image.
*/
framework.ConformanceIt("should be able to override the image's default command (container entrypoint) [NodeConformance]", func() {
framework.ConformanceIt("should be able to override the image's default command (container entrypoint) [NodeConformance]", func(ctx context.Context) {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
@ -84,7 +86,7 @@ var _ = SIGDescribe("Containers", func() {
Testname: Containers, with command and arguments
Description: Default command and arguments from the container image entrypoint MUST NOT be used when Pod specifies the container command and arguments. Command and arguments from Pod spec MUST override the command and arguments in the image.
*/
framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() {
framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func(ctx context.Context) {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"}

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
@ -41,7 +42,7 @@ var _ = SIGDescribe("Downward API", func() {
Testname: DownwardAPI, environment for name, namespace and ip
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify Pod Name, namespace and IP as environment variable in the Pod Spec are visible at runtime in the container.
*/
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func() {
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
@ -87,7 +88,7 @@ var _ = SIGDescribe("Downward API", func() {
Testname: DownwardAPI, environment for host ip
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify host IP as environment variable in the Pod Spec are visible at runtime in the container.
*/
framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() {
framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
@ -108,7 +109,7 @@ var _ = SIGDescribe("Downward API", func() {
testDownwardAPI(f, podName, env, expectations)
})
ginkgo.It("should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]", func() {
ginkgo.It("should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
@ -163,7 +164,7 @@ var _ = SIGDescribe("Downward API", func() {
Testname: DownwardAPI, environment for CPU and memory limits and requests
Description: Downward API MUST expose CPU request and Memory request set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func() {
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
@ -214,7 +215,7 @@ var _ = SIGDescribe("Downward API", func() {
Testname: DownwardAPI, environment for default CPU and memory limits and requests
Description: Downward API MUST expose CPU request and Memory limits set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func() {
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
@ -264,7 +265,7 @@ var _ = SIGDescribe("Downward API", func() {
Testname: DownwardAPI, environment for Pod UID
Description: Downward API MUST expose Pod UID set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() {
framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
@ -291,7 +292,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.Context("Downward API tests for hugepages", func() {
ginkgo.It("should provide container's limits.hugepages-<pagesize> and requests.hugepages-<pagesize> as env vars", func() {
ginkgo.It("should provide container's limits.hugepages-<pagesize> and requests.hugepages-<pagesize> as env vars", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
@ -346,7 +347,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPI
testDownwardAPIUsingPod(f, pod, env, expectations)
})
ginkgo.It("should provide default limits.hugepages-<pagesize> from node allocatable", func() {
ginkgo.It("should provide default limits.hugepages-<pagesize> from node allocatable", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"time"
v1 "k8s.io/api/core/v1"
@ -42,7 +43,7 @@ var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() {
// Release: 1.25
// Testname: Ephemeral Container Creation
// Description: Adding an ephemeral container to pod.spec MUST result in the container running.
framework.ConformanceIt("will start an ephemeral container in an existing pod", func() {
framework.ConformanceIt("will start an ephemeral container in an existing pod", func(ctx context.Context) {
ginkgo.By("creating a target pod")
pod := podClient.CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ephemeral-containers-target-pod"},

View File

@ -17,6 +17,8 @@ limitations under the License.
package node
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@ -41,7 +43,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
Testname: Environment variables, expansion
Description: Create a Pod with environment variables. Environment variables defined using previously defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func() {
framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func(ctx context.Context) {
envVars := []v1.EnvVar{
{
Name: "FOO",
@ -70,7 +72,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
Testname: Environment variables, command expansion
Description: Create a Pod with environment variables and container command using them. Container command using the defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func() {
framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func(ctx context.Context) {
envVars := []v1.EnvVar{
{
Name: "TEST_VAR",
@ -89,7 +91,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
Testname: Environment variables, command argument expansion
Description: Create a Pod with environment variables and container command arguments using them. Container command arguments using the defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func() {
framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func(ctx context.Context) {
envVars := []v1.EnvVar{
{
Name: "TEST_VAR",
@ -109,7 +111,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
Testname: VolumeSubpathEnvExpansion, subpath expansion
Description: Make sure a container's subpath can be set using an expansion of environment variables.
*/
framework.ConformanceIt("should allow substituting values in a volume subpath", func() {
framework.ConformanceIt("should allow substituting values in a volume subpath", func(ctx context.Context) {
envVars := []v1.EnvVar{
{
Name: "POD_NAME",
@ -149,7 +151,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
Testname: VolumeSubpathEnvExpansion, subpath with backticks
Description: Make sure a container's subpath can not be set using an expansion of environment variables when backticks are supplied.
*/
framework.ConformanceIt("should fail substituting values in a volume subpath with backticks [Slow]", func() {
framework.ConformanceIt("should fail substituting values in a volume subpath with backticks [Slow]", func(ctx context.Context) {
envVars := []v1.EnvVar{
{
@ -183,7 +185,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
Testname: VolumeSubpathEnvExpansion, subpath with absolute path
Description: Make sure a container's subpath can not be set using an expansion of environment variables when absolute path is supplied.
*/
framework.ConformanceIt("should fail substituting values in a volume subpath with absolute path [Slow]", func() {
framework.ConformanceIt("should fail substituting values in a volume subpath with absolute path [Slow]", func(ctx context.Context) {
absolutePath := "/tmp"
if framework.NodeOSDistroIs("windows") {
// Windows does not typically have a C:\tmp folder.
@ -222,7 +224,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
Testname: VolumeSubpathEnvExpansion, subpath ready from failed state
Description: Verify that a failing subpath expansion can be modified during the lifecycle of a container.
*/
framework.ConformanceIt("should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow]", func() {
framework.ConformanceIt("should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow]", func(ctx context.Context) {
envVars := []v1.EnvVar{
{
@ -294,7 +296,7 @@ var _ = SIGDescribe("Variable Expansion", func() {
3. successful expansion of the subpathexpr isn't required for volume cleanup
*/
framework.ConformanceIt("should succeed in writing subpaths in container [Slow]", func() {
framework.ConformanceIt("should succeed in writing subpaths in container [Slow]", func(ctx context.Context) {
envVars := []v1.EnvVar{
{

View File

@ -174,7 +174,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
and the system is not going to restart any of these containers
when Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should invoke init containers on a RestartNever pod", func() {
framework.ConformanceIt("should invoke init containers on a RestartNever pod", func(ctx context.Context) {
ginkgo.By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -220,7 +220,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
},
}
var events []watch.Event
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout)
defer cancel()
event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w,
recordEvents(events, conditions.PodCompleted),
@ -252,7 +252,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
and at least one container is still running or is in the process of being restarted
when Pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should invoke init containers on a RestartAlways pod", func() {
framework.ConformanceIt("should invoke init containers on a RestartAlways pod", func(ctx context.Context) {
ginkgo.By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -301,7 +301,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
},
}
var events []watch.Event
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout)
defer cancel()
event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w, recordEvents(events, conditions.PodRunning))
framework.ExpectNoError(err)
@ -331,7 +331,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
and Pod has restarted for few occurrences
and pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should not start app containers if init containers fail on a RestartAlways pod", func() {
framework.ConformanceIt("should not start app containers if init containers fail on a RestartAlways pod", func(ctx context.Context) {
ginkgo.By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -382,7 +382,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
}
var events []watch.Event
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout)
defer cancel()
event, err := watchtools.Until(
ctx,
@ -455,7 +455,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
Description: Ensure that app container is not started
when at least one InitContainer fails to start and Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
framework.ConformanceIt("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func(ctx context.Context) {
ginkgo.By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -507,7 +507,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
}
var events []watch.Event
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout)
defer cancel()
event, err := watchtools.Until(
ctx, startedPod.ResourceVersion, w,

View File

@ -49,7 +49,7 @@ var _ = SIGDescribe("Kubelet", func() {
Testname: Kubelet, log output, default
Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs.
*/
framework.ConformanceIt("should print the output to logs [NodeConformance]", func() {
framework.ConformanceIt("should print the output to logs [NodeConformance]", func(ctx context.Context) {
podClient.CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
@ -107,7 +107,7 @@ var _ = SIGDescribe("Kubelet", func() {
Testname: Kubelet, failed pod, terminated reason
Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason.
*/
framework.ConformanceIt("should have an terminated reason [NodeConformance]", func() {
framework.ConformanceIt("should have an terminated reason [NodeConformance]", func(ctx context.Context) {
gomega.Eventually(func() error {
podData, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
@ -132,7 +132,7 @@ var _ = SIGDescribe("Kubelet", func() {
Testname: Kubelet, failed pod, delete
Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted.
*/
framework.ConformanceIt("should be possible to delete [NodeConformance]", func() {
framework.ConformanceIt("should be possible to delete [NodeConformance]", func(ctx context.Context) {
err := podClient.Delete(context.TODO(), podName, metav1.DeleteOptions{})
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
})
@ -145,7 +145,7 @@ var _ = SIGDescribe("Kubelet", func() {
Testname: Kubelet, hostAliases
Description: Create a Pod with hostAliases and a container with command to output /etc/hosts entries. Pod's logs MUST have matching entries of specified hostAliases to the output of /etc/hosts entries.
*/
framework.ConformanceIt("should write entries to /etc/hosts [NodeConformance]", func() {
framework.ConformanceIt("should write entries to /etc/hosts [NodeConformance]", func(ctx context.Context) {
pod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, nil, "etc-hosts")
// Don't restart the Pod since it is expected to exit
pod.Spec.RestartPolicy = v1.RestartPolicyNever
@ -181,7 +181,7 @@ var _ = SIGDescribe("Kubelet", func() {
Description: Create a Pod with security context set with ReadOnlyRootFileSystem set to true. The Pod then tries to write to the /file on the root, write operation to the root filesystem MUST fail as expected.
This test is marked LinuxOnly since Windows does not support creating containers with read-only access.
*/
framework.ConformanceIt("should not write to root filesystem [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should not write to root filesystem [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
isReadOnly := true
podClient.CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"strings"
"time"
@ -60,7 +61,7 @@ var _ = SIGDescribe("KubeletManagedEtcHosts", func() {
3. The Pod with hostNetwork=true , /etc/hosts file MUST not be managed by the Kubelet.
This test is marked LinuxOnly since Windows cannot mount individual files in Containers.
*/
framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
ginkgo.By("Setting up the test")
config.setup()

View File

@ -69,7 +69,7 @@ var _ = SIGDescribe("Lease", func() {
return just the remaining lease. Delete the lease; delete MUST be successful. Get the lease; get
MUST return not found error.
*/
framework.ConformanceIt("lease API should be available", func() {
framework.ConformanceIt("lease API should be available", func(ctx context.Context) {
leaseClient := f.ClientSet.CoordinationV1().Leases(f.Namespace.Name)
name := "lease"

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"fmt"
"strings"
"time"
@ -131,7 +132,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
Testname: Pod Lifecycle, post start exec hook
Description: When a post start handler is specified in the container lifecycle using a 'Exec' action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod using ExecAction to validate that the post start is executed.
*/
framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() {
framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func(ctx context.Context) {
lifecycle := &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
Exec: &v1.ExecAction{
@ -148,7 +149,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
Testname: Pod Lifecycle, prestop exec hook
Description: When a pre-stop handler is specified in the container lifecycle using a 'Exec' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod using ExecAction to validate that the pre-stop is executed.
*/
framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() {
framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func(ctx context.Context) {
lifecycle := &v1.Lifecycle{
PreStop: &v1.LifecycleHandler{
Exec: &v1.ExecAction{
@ -164,7 +165,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
Testname: Pod Lifecycle, post start http hook
Description: When a post start handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod on the same node with a container lifecycle specifying a post start that invokes the server pod to validate that the post start is executed.
*/
framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() {
framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func(ctx context.Context) {
lifecycle := &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{
@ -186,7 +187,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
Testname: Pod Lifecycle, poststart https hook
Description: When a post-start handler is specified in the container lifecycle using a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve https requests, create a second pod on the same node with a container lifecycle specifying a post-start that invokes the server pod to validate that the post-start is executed.
*/
ginkgo.It("should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func() {
ginkgo.It("should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func(ctx context.Context) {
lifecycle := &v1.Lifecycle{
PostStart: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{
@ -209,7 +210,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
Testname: Pod Lifecycle, prestop http hook
Description: When a pre-stop handler is specified in the container lifecycle using a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod on the same node with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed.
*/
framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() {
framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func(ctx context.Context) {
lifecycle := &v1.Lifecycle{
PreStop: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{
@ -231,7 +232,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
Testname: Pod Lifecycle, prestop https hook
Description: When a pre-stop handler is specified in the container lifecycle using a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve https requests, create a second pod on the same node with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed.
*/
ginkgo.It("should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func() {
ginkgo.It("should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func(ctx context.Context) {
lifecycle := &v1.Lifecycle{
PreStop: &v1.LifecycleHandler{
HTTPGet: &v1.HTTPGetAction{

View File

@ -49,7 +49,7 @@ var _ = SIGDescribe("NodeLease", func() {
})
ginkgo.Context("NodeLease", func() {
ginkgo.It("the kubelet should create and update a lease in the kube-node-lease namespace", func() {
ginkgo.It("the kubelet should create and update a lease in the kube-node-lease namespace", func(ctx context.Context) {
leaseClient := f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease)
var (
err error
@ -87,7 +87,7 @@ var _ = SIGDescribe("NodeLease", func() {
time.Duration(*lease.Spec.LeaseDurationSeconds/4)*time.Second)
})
ginkgo.It("should have OwnerReferences set", func() {
ginkgo.It("should have OwnerReferences set", func(ctx context.Context) {
leaseClient := f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease)
var (
err error
@ -111,7 +111,7 @@ var _ = SIGDescribe("NodeLease", func() {
}
})
ginkgo.It("the kubelet should report node status infrequently", func() {
ginkgo.It("the kubelet should report node status infrequently", func(ctx context.Context) {
ginkgo.By("wait until node is ready")
e2enode.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute)

View File

@ -36,7 +36,7 @@ var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() {
f := framework.NewDefaultFramework("pod-os-rejection")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
ginkgo.Context("Kubelet", func() {
ginkgo.It("should reject pod when the node OS doesn't match pod's OS", func() {
ginkgo.It("should reject pod when the node OS doesn't match pod's OS", func(ctx context.Context) {
linuxNode, err := findLinuxNode(f)
framework.ExpectNoError(err)
pod := &v1.Pod{

View File

@ -201,7 +201,7 @@ var _ = SIGDescribe("Pods", func() {
Testname: Pods, assigned hostip
Description: Create a Pod. Pod status MUST return successfully and contains a valid IP address.
*/
framework.ConformanceIt("should get a host IP [NodeConformance]", func() {
framework.ConformanceIt("should get a host IP [NodeConformance]", func(ctx context.Context) {
name := "pod-hostip-" + string(uuid.NewUUID())
testHostIP(podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -223,7 +223,7 @@ var _ = SIGDescribe("Pods", func() {
Testname: Pods, lifecycle
Description: A Pod is created with a unique label. Pod MUST be accessible when queried using the label selector upon creation. Add a watch, check if the Pod is running. Pod then deleted, The pod deletion timestamp is observed. The watch MUST return the pod deleted event. Query with the original selector for the Pod MUST return empty list.
*/
framework.ConformanceIt("should be submitted and removed [NodeConformance]", func() {
framework.ConformanceIt("should be submitted and removed [NodeConformance]", func(ctx context.Context) {
ginkgo.By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -266,7 +266,7 @@ var _ = SIGDescribe("Pods", func() {
_, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{})
defer w.Stop()
ctx, cancelCtx := context.WithTimeout(context.TODO(), wait.ForeverTestTimeout)
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
defer cancelCtx()
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
framework.Failf("Timeout while waiting to Pod informer to sync")
@ -341,7 +341,7 @@ var _ = SIGDescribe("Pods", func() {
Testname: Pods, update
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. Update the pod to change the value of the Label. Query for the Pod with the new value for the label MUST be successful.
*/
framework.ConformanceIt("should be updated [NodeConformance]", func() {
framework.ConformanceIt("should be updated [NodeConformance]", func(ctx context.Context) {
ginkgo.By("creating the pod")
name := "pod-update-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -395,7 +395,7 @@ var _ = SIGDescribe("Pods", func() {
Testname: Pods, ActiveDeadlineSeconds
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds set on the Pod spec. Pod MUST terminate of the specified time elapses.
*/
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func() {
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func(ctx context.Context) {
ginkgo.By("creating the pod")
name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -441,7 +441,7 @@ var _ = SIGDescribe("Pods", func() {
Testname: Pods, service environment variables
Description: Create a server Pod listening on port 9376. A Service called fooservice is created for the server Pod listening on port 8765 targeting port 8080. If a new Pod is created in the cluster then the Pod MUST have the fooservice environment variables available from this new Pod. The new create Pod MUST have environment variables such as FOOSERVICE_SERVICE_HOST, FOOSERVICE_SERVICE_PORT, FOOSERVICE_PORT, FOOSERVICE_PORT_8765_TCP_PORT, FOOSERVICE_PORT_8765_TCP_PROTO, FOOSERVICE_PORT_8765_TCP and FOOSERVICE_PORT_8765_TCP_ADDR that are populated with proper values.
*/
framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func() {
framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func(ctx context.Context) {
// Make a pod that will be a service.
// This pod serves its hostname via HTTP.
serverName := "server-envvars-" + string(uuid.NewUUID())
@ -533,7 +533,7 @@ var _ = SIGDescribe("Pods", func() {
Description: A Pod is created. Websocket is created to retrieve exec command output from this pod.
Message retrieved form Websocket MUST match with expected exec command output.
*/
framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() {
framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func(ctx context.Context) {
config, err := framework.LoadConfig()
framework.ExpectNoError(err, "unable to get base config")
@ -615,7 +615,7 @@ var _ = SIGDescribe("Pods", func() {
Description: A Pod is created. Websocket is created to retrieve log of a container from this pod.
Message retrieved form Websocket MUST match with container's output.
*/
framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() {
framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func(ctx context.Context) {
config, err := framework.LoadConfig()
framework.ExpectNoError(err, "unable to get base config")
@ -673,7 +673,7 @@ var _ = SIGDescribe("Pods", func() {
})
// Slow (~7 mins)
ginkgo.It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func() {
ginkgo.It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func(ctx context.Context) {
podName := "pod-back-off-image"
containerName := "back-off"
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
@ -714,7 +714,7 @@ var _ = SIGDescribe("Pods", func() {
})
// Slow by design (~27 mins) issue #19027
ginkgo.It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func() {
ginkgo.It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func(ctx context.Context) {
podName := "back-off-cap"
containerName := "back-off-cap"
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
@ -768,7 +768,7 @@ var _ = SIGDescribe("Pods", func() {
}
})
ginkgo.It("should support pod readiness gates [NodeConformance]", func() {
ginkgo.It("should support pod readiness gates [NodeConformance]", func(ctx context.Context) {
podName := "pod-ready"
readinessGate1 := "k8s.io/test-condition1"
readinessGate2 := "k8s.io/test-condition2"
@ -842,7 +842,7 @@ var _ = SIGDescribe("Pods", func() {
Description: A set of pods is created with a label selector which MUST be found when listed.
The set of pods is deleted and MUST NOT show up when listed by its label selector.
*/
framework.ConformanceIt("should delete a collection of pods", func() {
framework.ConformanceIt("should delete a collection of pods", func(ctx context.Context) {
podTestNames := []string{"test-pod-1", "test-pod-2", "test-pod-3"}
one := int64(1)
@ -893,7 +893,7 @@ var _ = SIGDescribe("Pods", func() {
patching the label and the pod data. When checking and replacing the PodStatus it MUST
succeed. It MUST succeed when deleting the Pod.
*/
framework.ConformanceIt("should run through the lifecycle of Pods and PodStatus", func() {
framework.ConformanceIt("should run through the lifecycle of Pods and PodStatus", func(ctx context.Context) {
podResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
testNamespaceName := f.Namespace.Name
testPodName := "pod-test"
@ -932,7 +932,7 @@ var _ = SIGDescribe("Pods", func() {
framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName)
ginkgo.By("watching for Pod to be ready")
ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart)
ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel()
_, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if pod, ok := event.Object.(*v1.Pod); ok {
@ -1080,7 +1080,7 @@ var _ = SIGDescribe("Pods", func() {
MUST succeed. Given the patching of the pod status,
the fields MUST equal the new values.
*/
framework.ConformanceIt("should patch a pod status", func() {
framework.ConformanceIt("should patch a pod status", func(ctx context.Context) {
ns := f.Namespace.Name
podClient := f.ClientSet.CoreV1().Pods(ns)
podName := "pod-" + utilrand.String(5)

View File

@ -50,7 +50,7 @@ var _ = SIGDescribe("PodTemplates", func() {
Description: Attempt to create a PodTemplate. Patch the created PodTemplate. Fetching the PodTemplate MUST reflect changes.
By fetching all the PodTemplates via a Label selector it MUST find the PodTemplate by it's static label and updated value. The PodTemplate must be deleted.
*/
framework.ConformanceIt("should run the lifecycle of PodTemplates", func() {
framework.ConformanceIt("should run the lifecycle of PodTemplates", func(ctx context.Context) {
testNamespaceName := f.Namespace.Name
podTemplateName := "nginx-pod-template-" + string(uuid.NewUUID())
@ -119,7 +119,7 @@ var _ = SIGDescribe("PodTemplates", func() {
Description: A set of Pod Templates is created with a label selector which MUST be found when listed.
The set of Pod Templates is deleted and MUST NOT show up when listed by its label selector.
*/
framework.ConformanceIt("should delete a collection of pod templates", func() {
framework.ConformanceIt("should delete a collection of pod templates", func(ctx context.Context) {
podTemplateNames := []string{"test-podtemplate-1", "test-podtemplate-2", "test-podtemplate-3"}
ginkgo.By("Create set of pod templates")
@ -173,7 +173,7 @@ var _ = SIGDescribe("PodTemplates", func() {
Attempt to replace the PodTemplate to include a new annotation
which MUST succeed. The annotation MUST be found in the new PodTemplate.
*/
framework.ConformanceIt("should replace a pod template", func() {
framework.ConformanceIt("should replace a pod template", func(ctx context.Context) {
ptClient := f.ClientSet.CoreV1().PodTemplates(f.Namespace.Name)
ptName := "podtemplate-" + utilrand.String(5)

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"fmt"
"github.com/onsi/ginkgo/v2"
@ -50,7 +51,7 @@ var _ = SIGDescribe("PrivilegedPod [NodeConformance]", func() {
notPrivilegedContainer: "not-privileged-container",
}
ginkgo.It("should enable privileged commands [LinuxOnly]", func() {
ginkgo.It("should enable privileged commands [LinuxOnly]", func(ctx context.Context) {
// Windows does not support privileged containers.
ginkgo.By("Creating a pod with a privileged container")
config.createPods()

View File

@ -49,7 +49,7 @@ var _ = SIGDescribe("Container Runtime", func() {
Testname: Container Runtime, Restart Policy, Pod Phases
Description: If the restart policy is set to 'Always', Pod MUST be restarted when terminated, If restart policy is 'OnFailure', Pod MUST be started only if it is terminated with non-zero exit code. If the restart policy is 'Never', Pod MUST never be restarted. All these three test cases MUST verify the restart counts accordingly.
*/
framework.ConformanceIt("should run with the expected status [NodeConformance]", func() {
framework.ConformanceIt("should run with the expected status [NodeConformance]", func(ctx context.Context) {
restartCountVolumeName := "restart-count"
restartCountVolumePath := "/restart-count"
testContainer := v1.Container{
@ -171,7 +171,7 @@ while true; do sleep 1; done
gomega.Expect(c.Delete()).To(gomega.Succeed())
}
ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func() {
ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func(ctx context.Context) {
container := v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
@ -192,7 +192,7 @@ while true; do sleep 1; done
Testname: Container Runtime, TerminationMessagePath, non-root user and non-default path
Description: Create a pod with a container to run it as a non-root user with a custom TerminationMessagePath set. Pod redirects the output to the provided path successfully. When the container is terminated, the termination message MUST match the expected output logged in the provided custom path.
*/
framework.ConformanceIt("should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance]", func() {
framework.ConformanceIt("should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance]", func(ctx context.Context) {
container := v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
@ -213,7 +213,7 @@ while true; do sleep 1; done
Testname: Container Runtime, TerminationMessage, from container's log output of failing container
Description: Create a pod with an container. Container's output is recorded in log and container exits with an error. When container is terminated, termination message MUST match the expected output recorded from container's log.
*/
framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func() {
framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func(ctx context.Context) {
container := v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
@ -229,7 +229,7 @@ while true; do sleep 1; done
Testname: Container Runtime, TerminationMessage, from log output of succeeding container
Description: Create a pod with an container. Container's output is recorded in log and container exits successfully without an error. When container is terminated, terminationMessage MUST have no content as container succeed.
*/
framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func() {
framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func(ctx context.Context) {
container := v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
@ -245,7 +245,7 @@ while true; do sleep 1; done
Testname: Container Runtime, TerminationMessage, from file of succeeding container
Description: Create a pod with an container. Container's output is recorded in a file and the container exits successfully without an error. When container is terminated, terminationMessage MUST match with the content from file.
*/
framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func() {
framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func(ctx context.Context) {
container := v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
@ -368,23 +368,23 @@ while true; do sleep 1; done
}
}
ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func() {
ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func(ctx context.Context) {
image := imageutils.GetE2EImage(imageutils.InvalidRegistryImage)
imagePullTest(image, false, v1.PodPending, true, false)
})
ginkgo.It("should be able to pull image [NodeConformance]", func() {
ginkgo.It("should be able to pull image [NodeConformance]", func(ctx context.Context) {
// NOTE(claudiub): The agnhost image is supposed to work on both Linux and Windows.
image := imageutils.GetE2EImage(imageutils.Agnhost)
imagePullTest(image, false, v1.PodRunning, false, false)
})
ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func() {
ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func(ctx context.Context) {
image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine)
imagePullTest(image, false, v1.PodPending, true, false)
})
ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func() {
ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func(ctx context.Context) {
image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine)
isWindows := false
if framework.NodeOSDistroIs("windows") {

View File

@ -52,13 +52,13 @@ var _ = SIGDescribe("RuntimeClass", func() {
Testname: Pod with the non-existing RuntimeClass is rejected.
Description: The Pod requesting the non-existing RuntimeClass must be rejected.
*/
framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass [NodeConformance]", func() {
framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass [NodeConformance]", func(ctx context.Context) {
rcName := f.Namespace.Name + "-nonexistent"
expectPodRejection(f, e2enode.NewRuntimeClassPod(rcName))
})
// The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed.
ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func() {
ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) {
handler := f.Namespace.Name + "-handler"
rcName := createRuntimeClass(f, "unconfigured-handler", handler, nil)
defer deleteRuntimeClass(f, rcName)
@ -82,7 +82,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
// This test requires that the PreconfiguredRuntimeClassHandler has already been set up on nodes.
// The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler installed and working.
ginkgo.It("should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]", func() {
ginkgo.It("should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) {
// Requires special setup of test-handler which is only done in GCE kube-up environment
// see https://github.com/kubernetes/kubernetes/blob/eb729620c522753bc7ae61fc2c7b7ea19d4aad2f/cluster/gce/gci/configure-helper.sh#L3069-L3076
e2eskipper.SkipUnlessProviderIs("gce")
@ -101,7 +101,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
depends on container runtime and preconfigured handler. Runtime-specific functionality
is not being tested here.
*/
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func() {
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, nil)
defer deleteRuntimeClass(f, rcName)
pod := e2epod.NewPodClient(f).Create(e2enode.NewRuntimeClassPod(rcName))
@ -126,7 +126,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
depends on container runtime and preconfigured handler. Runtime-specific functionality
is not being tested here.
*/
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance]", func() {
framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{
PodFixed: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"),
@ -153,7 +153,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
Testname: Pod with the deleted RuntimeClass is rejected.
Description: Pod requesting the deleted RuntimeClass must be rejected.
*/
framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass [NodeConformance]", func() {
framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass [NodeConformance]", func(ctx context.Context) {
rcName := createRuntimeClass(f, "delete-me", "runc", nil)
rcClient := f.ClientSet.NodeV1().RuntimeClasses()
@ -186,7 +186,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
The runtimeclasses resource MUST exist in the /apis/node.k8s.io/v1 discovery document.
The runtimeclasses resource must support create, get, list, watch, update, patch, delete, and deletecollection.
*/
framework.ConformanceIt(" should support RuntimeClasses API operations", func() {
framework.ConformanceIt(" should support RuntimeClasses API operations", func(ctx context.Context) {
// Setup
rcVersion := "v1"
rcClient := f.ClientSet.NodeV1().RuntimeClasses()

View File

@ -43,7 +43,7 @@ var _ = SIGDescribe("Secrets", func() {
Testname: Secrets, pod environment field
Description: Create a secret. Create a Pod with Container that declares a environment variable which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret.
*/
framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func(ctx context.Context) {
name := "secret-test-" + string(uuid.NewUUID())
secret := secretForTest(f.Namespace.Name, name)
@ -92,7 +92,7 @@ var _ = SIGDescribe("Secrets", func() {
Testname: Secrets, pod environment from source
Description: Create a secret. Create a Pod with Container that declares a environment variable using 'EnvFrom' which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret.
*/
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func(ctx context.Context) {
name := "secret-test-" + string(uuid.NewUUID())
secret := secretForTest(f.Namespace.Name, name)
ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
@ -137,7 +137,7 @@ var _ = SIGDescribe("Secrets", func() {
Testname: Secrets, with empty-key
Description: Attempt to create a Secret with an empty key. The creation MUST fail.
*/
framework.ConformanceIt("should fail to create secret due to empty secret key", func() {
framework.ConformanceIt("should fail to create secret due to empty secret key", func(ctx context.Context) {
secret, err := createEmptyKeySecretForTest(f)
framework.ExpectError(err, "created secret %q with empty key in namespace %q", secret.Name, f.Namespace.Name)
})
@ -151,7 +151,7 @@ var _ = SIGDescribe("Secrets", func() {
The Secret is deleted by it's static Label.
Secrets are listed finally, the list MUST NOT include the originally created Secret.
*/
framework.ConformanceIt("should patch a secret", func() {
framework.ConformanceIt("should patch a secret", func(ctx context.Context) {
ginkgo.By("creating a secret")
secretTestName := "test-secret-" + string(uuid.NewUUID())

View File

@ -72,7 +72,7 @@ var _ = SIGDescribe("Security Context", func() {
}
}
ginkgo.It("must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() {
ginkgo.It("must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func(ctx context.Context) {
// with hostUsers=false the pod must use a new user namespace
podClient := e2epod.PodClientNS(f, f.Namespace.Name)
@ -110,7 +110,7 @@ var _ = SIGDescribe("Security Context", func() {
}
})
ginkgo.It("must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() {
ginkgo.It("must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func(ctx context.Context) {
// with hostUsers=true the pod must use the host user namespace
pod := makePod(true)
// When running in the host's user namespace, the /proc/self/uid_map file content looks like:
@ -121,7 +121,7 @@ var _ = SIGDescribe("Security Context", func() {
})
})
ginkgo.It("should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() {
ginkgo.It("should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func(ctx context.Context) {
// Create all volume types supported: configmap, secret, downwardAPI, projected.
// Create configmap.
@ -245,7 +245,7 @@ var _ = SIGDescribe("Security Context", func() {
})
})
ginkgo.It("should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func() {
ginkgo.It("should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesStatelessPodsSupport]", func(ctx context.Context) {
// Create configmap.
name := "userns-volumes-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
@ -344,7 +344,7 @@ var _ = SIGDescribe("Security Context", func() {
Description: Container is created with runAsUser option by passing uid 65534 to run as unpriviledged user. Pod MUST be in Succeeded phase.
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID.
*/
framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(65534)
})
@ -355,7 +355,7 @@ var _ = SIGDescribe("Security Context", func() {
This e2e can not be promoted to Conformance because a Conformant platform may not allow to run containers with 'uid 0' or running privileged operations.
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID.
*/
ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(0)
})
})
@ -385,7 +385,7 @@ var _ = SIGDescribe("Security Context", func() {
}
}
ginkgo.It("should run with an explicit non-root user ID [LinuxOnly]", func() {
ginkgo.It("should run with an explicit non-root user ID [LinuxOnly]", func(ctx context.Context) {
// creates a pod with RunAsUser, which is not supported on Windows.
e2eskipper.SkipIfNodeOSDistroIs("windows")
name := "explicit-nonroot-uid"
@ -395,7 +395,7 @@ var _ = SIGDescribe("Security Context", func() {
podClient.WaitForSuccess(name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1000"))
})
ginkgo.It("should not run with an explicit root user ID [LinuxOnly]", func() {
ginkgo.It("should not run with an explicit root user ID [LinuxOnly]", func(ctx context.Context) {
// creates a pod with RunAsUser, which is not supported on Windows.
e2eskipper.SkipIfNodeOSDistroIs("windows")
name := "explicit-root-uid"
@ -407,7 +407,7 @@ var _ = SIGDescribe("Security Context", func() {
gomega.Expect(ev).NotTo(gomega.BeNil())
framework.ExpectEqual(ev.Reason, events.FailedToCreateContainer)
})
ginkgo.It("should run with an image specified user ID", func() {
ginkgo.It("should run with an image specified user ID", func(ctx context.Context) {
name := "implicit-nonroot-uid"
pod := makeNonRootPod(name, nonRootImage, nil)
podClient.Create(pod)
@ -415,7 +415,7 @@ var _ = SIGDescribe("Security Context", func() {
podClient.WaitForSuccess(name, framework.PodStartTimeout)
framework.ExpectNoError(podClient.MatchContainerOutput(name, name, "1234"))
})
ginkgo.It("should not run without a specified user ID", func() {
ginkgo.It("should not run without a specified user ID", func(ctx context.Context) {
name := "implicit-root-uid"
pod := makeNonRootPod(name, rootImage, nil)
pod = podClient.Create(pod)
@ -473,7 +473,7 @@ var _ = SIGDescribe("Security Context", func() {
At this moment we are not considering this test for Conformance due to use of SecurityContext.
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support creating containers with read-only access.
*/
ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(true)
})
@ -483,7 +483,7 @@ var _ = SIGDescribe("Security Context", func() {
Description: Container is configured to run with readOnlyRootFilesystem to false.
Write operation MUST be allowed and Pod MUST be in Succeeded state.
*/
framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func() {
framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func(ctx context.Context) {
createAndWaitUserPod(false)
})
})
@ -525,7 +525,7 @@ var _ = SIGDescribe("Security Context", func() {
Description: Create a container to run in unprivileged mode by setting pod's SecurityContext Privileged option as false. Pod MUST be in Succeeded phase.
[LinuxOnly]: This test is marked as LinuxOnly since it runs a Linux-specific command.
*/
framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := createAndWaitUserPod(false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
@ -538,7 +538,7 @@ var _ = SIGDescribe("Security Context", func() {
}
})
ginkgo.It("should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]", func() {
ginkgo.It("should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]", func(ctx context.Context) {
podName := createAndWaitUserPod(true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
@ -591,7 +591,7 @@ var _ = SIGDescribe("Security Context", func() {
This e2e Can not be promoted to Conformance as it is Container Runtime dependent and not all conformant platforms will require this behavior.
[LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID, or privilege escalation.
*/
ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
@ -606,7 +606,7 @@ var _ = SIGDescribe("Security Context", func() {
When the container is run, container's output MUST match with expected output verifying container ran with given uid i.e. uid=1000.
[LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID, or privilege escalation.
*/
framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false
if err := createAndMatchOutput(podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil {
@ -623,7 +623,7 @@ var _ = SIGDescribe("Security Context", func() {
This e2e Can not be promoted to Conformance as it is Container Runtime dependent and runtime may not allow to run.
[LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID.
*/
ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil {

View File

@ -74,7 +74,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
Description: Pod is created with kernel.shm_rmid_forced sysctl. Kernel.shm_rmid_forced must be set to 1
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls
*/
framework.ConformanceIt("should support sysctls [MinimumKubeletVersion:1.21]", func() {
framework.ConformanceIt("should support sysctls [MinimumKubeletVersion:1.21]", func(ctx context.Context) {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{
@ -120,7 +120,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
Description: Pod is created with one valid and two invalid sysctls. Pod should not apply invalid sysctls.
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls
*/
framework.ConformanceIt("should reject invalid sysctls [MinimumKubeletVersion:1.21]", func() {
framework.ConformanceIt("should reject invalid sysctls [MinimumKubeletVersion:1.21]", func(ctx context.Context) {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{
@ -156,7 +156,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
})
// Pod is created with kernel.msgmax, an unsafe sysctl.
ginkgo.It("should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21]", func() {
ginkgo.It("should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21]", func(ctx context.Context) {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{
@ -182,7 +182,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
Description: Pod is created with kernel/shm_rmid_forced sysctl. Support slashes as sysctl separator. The '/' separator is also accepted in place of a '.'
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls
*/
ginkgo.It("should support sysctls with slashes as separator [MinimumKubeletVersion:1.23]", func() {
ginkgo.It("should support sysctls with slashes as separator [MinimumKubeletVersion:1.23]", func(ctx context.Context) {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{

View File

@ -44,7 +44,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap Volume, without mapping
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithoutMappings(f, false, 0, nil)
})
@ -54,12 +54,12 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of '0x400'
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400)
doConfigMapE2EWithoutMappings(f, false, 0, &defaultMode)
})
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows")
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
@ -71,11 +71,11 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap Volume, without mapping, non-root user
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithoutMappings(f, true, 0, nil)
})
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
doConfigMapE2EWithoutMappings(f, true, 1001, nil)
@ -86,7 +86,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap Volume, with mapping
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithMappings(f, false, 0, nil)
})
@ -96,7 +96,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of '0x400'
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400)
doConfigMapE2EWithMappings(f, false, 0, &mode)
})
@ -106,11 +106,11 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap Volume, with mapping, non-root user
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) {
doConfigMapE2EWithMappings(f, true, 0, nil)
})
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
doConfigMapE2EWithMappings(f, true, 1001, nil)
@ -121,7 +121,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap Volume, update
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
@ -172,7 +172,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap Volume, text data, binary data
Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod.
*/
framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func() {
framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
@ -237,7 +237,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap Volume, create, update and delete
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file).
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
@ -420,7 +420,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Testname: ConfigMap Volume, multiple volume maps
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to multiple paths in the Pod. The content MUST be accessible from all the mapped volume mounts.
*/
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func(ctx context.Context) {
var (
name = "configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
@ -501,7 +501,7 @@ var _ = SIGDescribe("ConfigMap", func() {
Try to update the ConfigMap`s metadata (labels), the update must succeed.
Try to delete the ConfigMap, the deletion must succeed.
*/
framework.ConformanceIt("should be immutable if `immutable` field is set", func() {
framework.ConformanceIt("should be immutable if `immutable` field is set", func(ctx context.Context) {
name := "immutable"
configMap := newConfigMap(f, name)
@ -554,7 +554,7 @@ var _ = SIGDescribe("ConfigMap", func() {
// The pod is in pending during volume creation until the configMap objects are available
// or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional.
// Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/configmap-volumes"
pod, err := createNonOptionalConfigMapPod(f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
@ -563,7 +563,7 @@ var _ = SIGDescribe("ConfigMap", func() {
// ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional, during the pod creation.
// Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/configmap-volumes"
pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)

View File

@ -17,6 +17,7 @@ limitations under the License.
package storage
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
@ -36,7 +37,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.Context("Downward API tests for local ephemeral storage", func() {
ginkgo.It("should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars", func() {
ginkgo.It("should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
@ -64,7 +65,7 @@ var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorag
testDownwardAPIForEphemeralStorage(f, podName, env, expectations)
})
ginkgo.It("should provide default limits.ephemeral-storage from node allocatable", func() {
ginkgo.It("should provide default limits.ephemeral-storage from node allocatable", func(ctx context.Context) {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{

View File

@ -17,6 +17,7 @@ limitations under the License.
package storage
import (
"context"
"fmt"
"time"
@ -50,7 +51,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Testname: DownwardAPI volume, pod name
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
framework.ConformanceIt("should provide podname only [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
@ -65,7 +66,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Description: A Pod is configured with DownwardAPIVolumeSource with the volumesource mode set to -r-------- and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
defaultMode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
@ -81,7 +82,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
mode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
@ -91,7 +92,7 @@ var _ = SIGDescribe("Downward API volume", func() {
})
})
ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
podName := "metadata-volume-" + string(uuid.NewUUID())
@ -106,7 +107,7 @@ var _ = SIGDescribe("Downward API volume", func() {
})
})
ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows")
podName := "metadata-volume-" + string(uuid.NewUUID())
@ -127,7 +128,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Testname: DownwardAPI volume, update label
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume.
*/
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
framework.ConformanceIt("should update labels on modification [NodeConformance]", func(ctx context.Context) {
labels := map[string]string{}
labels["key1"] = "value1"
labels["key2"] = "value2"
@ -159,7 +160,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Testname: DownwardAPI volume, update annotations
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume.
*/
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func(ctx context.Context) {
annotations := map[string]string{}
annotations["builder"] = "bar"
podName := "annotationupdate" + string(uuid.NewUUID())
@ -190,7 +191,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Testname: DownwardAPI volume, CPU limits
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
@ -204,7 +205,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Testname: DownwardAPI volume, memory limits
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
@ -218,7 +219,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Testname: DownwardAPI volume, CPU request
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
@ -232,7 +233,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Testname: DownwardAPI volume, memory request
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
@ -246,7 +247,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Testname: DownwardAPI volume, CPU limit, default node allocatable
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable.
*/
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
@ -258,7 +259,7 @@ var _ = SIGDescribe("Downward API volume", func() {
Testname: DownwardAPI volume, memory limit, default node allocatable
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable.
*/
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")

View File

@ -53,27 +53,27 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
e2eskipper.SkipIfNodeOSDistroIs("windows")
})
ginkgo.It("new files should be created with FSGroup ownership when container is root", func() {
ginkgo.It("new files should be created with FSGroup ownership when container is root", func(ctx context.Context) {
doTestSetgidFSGroup(f, 0, v1.StorageMediumMemory)
})
ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func() {
ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func(ctx context.Context) {
doTestSetgidFSGroup(f, nonRootUID, v1.StorageMediumMemory)
})
ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func() {
ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func(ctx context.Context) {
doTestSubPathFSGroup(f, nonRootUID, v1.StorageMediumMemory)
})
ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func(ctx context.Context) {
doTest0644FSGroup(f, 0, v1.StorageMediumMemory)
})
ginkgo.It("volume on default medium should have the correct mode using FSGroup", func() {
ginkgo.It("volume on default medium should have the correct mode using FSGroup", func(ctx context.Context) {
doTestVolumeModeFSGroup(f, 0, v1.StorageMediumDefault)
})
ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func() {
ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func(ctx context.Context) {
doTestVolumeModeFSGroup(f, 0, v1.StorageMediumMemory)
})
})
@ -84,7 +84,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or the medium = 'Memory'.
*/
framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTestVolumeMode(f, 0, v1.StorageMediumMemory)
})
@ -94,7 +94,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, 0, v1.StorageMediumMemory)
})
@ -104,7 +104,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, 0, v1.StorageMediumMemory)
})
@ -114,7 +114,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, 0, v1.StorageMediumMemory)
})
@ -124,7 +124,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, nonRootUID, v1.StorageMediumMemory)
})
@ -134,7 +134,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, nonRootUID, v1.StorageMediumMemory)
})
@ -144,7 +144,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
*/
framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, nonRootUID, v1.StorageMediumMemory)
})
@ -154,7 +154,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume, the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTestVolumeMode(f, 0, v1.StorageMediumDefault)
})
@ -164,7 +164,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, 0, v1.StorageMediumDefault)
})
@ -174,7 +174,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, 0, v1.StorageMediumDefault)
})
@ -184,7 +184,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, 0, v1.StorageMediumDefault)
})
@ -194,7 +194,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0644(f, nonRootUID, v1.StorageMediumDefault)
})
@ -204,7 +204,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0666(f, nonRootUID, v1.StorageMediumDefault)
})
@ -214,7 +214,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
doTest0777(f, nonRootUID, v1.StorageMediumDefault)
})
@ -224,7 +224,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Description: A Pod created with an 'emptyDir' Volume, should share volumes between the containeres in the pod. The two busybox image containers should share the volumes mounted to the pod.
The main container should wait until the sub container drops a file, and main container access the shared data.
*/
framework.ConformanceIt("pod should support shared volumes between containers", func() {
framework.ConformanceIt("pod should support shared volumes between containers", func(ctx context.Context) {
var (
volumeName = "shared-data"
busyBoxMainVolumeMountPath = "/usr/share/volumeshare"
@ -296,7 +296,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
Testname: EmptyDir, Memory backed volume is sized to specified limit
Description: A Pod created with an 'emptyDir' Volume backed by memory should be sized to user provided value.
*/
ginkgo.It("pod should support memory backed volumes of specified size", func() {
ginkgo.It("pod should support memory backed volumes of specified size", func(ctx context.Context) {
var (
volumeName = "shared-data"
busyBoxMainVolumeMountPath = "/usr/share/volumeshare"

View File

@ -17,6 +17,7 @@ limitations under the License.
package storage
import (
"context"
"fmt"
"os"
"path"
@ -48,7 +49,7 @@ var _ = SIGDescribe("HostPath", func() {
Create a Pod with host volume mounted. The volume mounted MUST be a directory with permissions mode -rwxrwxrwx and that is has the sticky bit (mode flag t) set.
This test is marked LinuxOnly since Windows does not support setting the sticky bit (mode flag t).
*/
ginkgo.It("should give a volume the correct mode [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should give a volume the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
@ -65,7 +66,7 @@ var _ = SIGDescribe("HostPath", func() {
})
// This test requires mounting a folder into a container with write privileges.
ginkgo.It("should support r/w [NodeConformance]", func() {
ginkgo.It("should support r/w [NodeConformance]", func(ctx context.Context) {
filePath := path.Join(volumePath, "test-file")
retryDuration := 180
source := &v1.HostPathVolumeSource{
@ -93,7 +94,7 @@ var _ = SIGDescribe("HostPath", func() {
})
})
ginkgo.It("should support subPath [NodeConformance]", func() {
ginkgo.It("should support subPath [NodeConformance]", func(ctx context.Context) {
subPath := "sub-path"
fileName := "test-file"
retryDuration := 180

View File

@ -41,7 +41,7 @@ var _ = SIGDescribe("Projected combined", func() {
Testname: Projected Volume, multiple projections
Description: A Pod is created with a projected volume source for secrets, configMap and downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the secrets, configMap values and the cpu and memory limits as well as cpu and memory requests from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func() {
framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func(ctx context.Context) {
var err error
podName := "projected-volume-" + string(uuid.NewUUID())
secretName := "secret-projected-all-test-volume-" + string(uuid.NewUUID())

View File

@ -44,7 +44,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Testname: Projected Volume, ConfigMap, volume mode default
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithoutMappings(f, false, 0, nil)
})
@ -54,12 +54,12 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with permission mode set to 0400. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -r--------.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400)
doProjectedConfigMapE2EWithoutMappings(f, false, 0, &defaultMode)
})
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows")
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
@ -71,11 +71,11 @@ var _ = SIGDescribe("Projected configMap", func() {
Testname: Projected Volume, ConfigMap, non-root user
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithoutMappings(f, true, 0, nil)
})
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
doProjectedConfigMapE2EWithoutMappings(f, true, 1001, nil)
@ -86,7 +86,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Testname: Projected Volume, ConfigMap, mapped
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw-r--r--.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithMappings(f, false, 0, nil)
})
@ -96,7 +96,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with permission mode set to 0400. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400)
doProjectedConfigMapE2EWithMappings(f, false, 0, &mode)
})
@ -106,11 +106,11 @@ var _ = SIGDescribe("Projected configMap", func() {
Testname: Projected Volume, ConfigMap, mapped, non-root user
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) {
doProjectedConfigMapE2EWithMappings(f, true, 0, nil)
})
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
doProjectedConfigMapE2EWithMappings(f, true, 1001, nil)
@ -121,7 +121,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Testname: Projected Volume, ConfigMap, update
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
@ -171,7 +171,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Testname: Projected Volume, ConfigMap, create, update and delete
Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as 'value-1'. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
@ -372,7 +372,7 @@ var _ = SIGDescribe("Projected configMap", func() {
Testname: Projected Volume, ConfigMap, multiple volume paths
Description: A Pod is created with a projected volume source 'ConfigMap' to store a configMap. The configMap is mapped to two different volume mounts. Pod MUST be able to read the content of the configMap successfully from the two volume mounts.
*/
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func(ctx context.Context) {
var (
name = "projected-configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "projected-configmap-volume"
@ -460,7 +460,7 @@ var _ = SIGDescribe("Projected configMap", func() {
//The pod is in pending during volume creation until the configMap objects are available
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional.
//Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/projected-configmap-volumes"
pod, err := createNonOptionalConfigMapPod(f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
@ -469,7 +469,7 @@ var _ = SIGDescribe("Projected configMap", func() {
//ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/configmap-volumes"
pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath)
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)

View File

@ -17,6 +17,7 @@ limitations under the License.
package storage
import (
"context"
"fmt"
"time"
@ -50,7 +51,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Testname: Projected Volume, DownwardAPI, pod name
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
framework.ConformanceIt("should provide podname only [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
@ -65,7 +66,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
defaultMode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
@ -81,7 +82,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
mode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
@ -91,7 +92,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
})
})
ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows")
podName := "metadata-volume-" + string(uuid.NewUUID())
@ -106,7 +107,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
})
})
ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func() {
ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows")
podName := "metadata-volume-" + string(uuid.NewUUID())
@ -127,7 +128,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Testname: Projected Volume, DownwardAPI, update labels
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and label items. Pod MUST be able to read the labels from the mounted DownwardAPIVolumeFiles. Labels are then updated. Pod MUST be able to read the updated values for the Labels.
*/
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
framework.ConformanceIt("should update labels on modification [NodeConformance]", func(ctx context.Context) {
labels := map[string]string{}
labels["key1"] = "value1"
labels["key2"] = "value2"
@ -159,7 +160,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Testname: Projected Volume, DownwardAPI, update annotation
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and annotation items. Pod MUST be able to read the annotations from the mounted DownwardAPIVolumeFiles. Annotations are then updated. Pod MUST be able to read the updated values for the Annotations.
*/
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func(ctx context.Context) {
annotations := map[string]string{}
annotations["builder"] = "bar"
podName := "annotationupdate" + string(uuid.NewUUID())
@ -190,7 +191,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Testname: Projected Volume, DownwardAPI, CPU limits
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
@ -204,7 +205,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Testname: Projected Volume, DownwardAPI, memory limits
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
@ -218,7 +219,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Testname: Projected Volume, DownwardAPI, CPU request
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu request from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
@ -232,7 +233,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Testname: Projected Volume, DownwardAPI, memory request
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory request from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
@ -246,7 +247,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Testname: Projected Volume, DownwardAPI, CPU limit, node allocatable
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default cpu limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
@ -258,7 +259,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
Testname: Projected Volume, DownwardAPI, memory limit, node allocatable
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default memory limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func(ctx context.Context) {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")

View File

@ -43,7 +43,7 @@ var _ = SIGDescribe("Projected secret", func() {
Testname: Projected Volume, Secrets, volume mode default
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
})
@ -53,7 +53,7 @@ var _ = SIGDescribe("Projected secret", func() {
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with permission mode set to 0x400 on the Pod. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400)
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
})
@ -64,7 +64,7 @@ var _ = SIGDescribe("Projected secret", func() {
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key. The volume has permission mode set to 0440, fsgroup set to 1001 and user set to non-root uid of 1000. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--r-----.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
fsGroup := int64(1001)
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
@ -75,7 +75,7 @@ var _ = SIGDescribe("Projected secret", func() {
Testname: Projected Volume, Secrets, mapped
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------on the mapped volume.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doProjectedSecretE2EWithMapping(f, nil)
})
@ -85,12 +85,12 @@ var _ = SIGDescribe("Projected secret", func() {
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with permission mode set to 0400. The secret is also mapped to a specific name. Pod MUST be able to read the content of the key successfully and the mode MUST be -r-------- on the mapped volume.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400)
doProjectedSecretE2EWithMapping(f, &mode)
})
ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) {
var (
namespace2 *v1.Namespace
err error
@ -116,7 +116,7 @@ var _ = SIGDescribe("Projected secret", func() {
Testname: Projected Volume, Secrets, mapped, multiple paths
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key. The secret is mapped to two different volume mounts. Pod MUST be able to read the content of the key successfully from the two volume mounts and the mode MUST be -r-------- on the mapped volumes.
*/
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func(ctx context.Context) {
// This test ensures that the same secret can be mounted in multiple
// volumes in the same pod. This test case exists to prevent
// regressions that break this use-case.
@ -212,7 +212,7 @@ var _ = SIGDescribe("Projected secret", func() {
Testname: Projected Volume, Secrets, create, update delete
Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
@ -411,7 +411,7 @@ var _ = SIGDescribe("Projected secret", func() {
//The secret is in pending during volume creation until the secret objects are available
//or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional.
//Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/projected-secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
@ -421,7 +421,7 @@ var _ = SIGDescribe("Projected secret", func() {
//Secret object defined for the pod, If a key is specified which is not present in the secret,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)

View File

@ -44,7 +44,7 @@ var _ = SIGDescribe("Secrets", func() {
Testname: Secrets Volume, default
Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) {
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
@ -54,7 +54,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r-------- by default.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0400)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
@ -65,7 +65,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x440 as a non-root user with uid 1000 and fsGroup id 1001. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r--r-----by default.
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
fsGroup := int64(1001)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
@ -76,7 +76,7 @@ var _ = SIGDescribe("Secrets", func() {
Testname: Secrets Volume, mapping
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) {
doSecretE2EWithMapping(f, nil)
})
@ -86,7 +86,7 @@ var _ = SIGDescribe("Secrets", func() {
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path and file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -r--r--r--.
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func() {
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) {
mode := int32(0400)
doSecretE2EWithMapping(f, &mode)
})
@ -96,7 +96,7 @@ var _ = SIGDescribe("Secrets", func() {
Testname: Secrets Volume, volume mode default, secret with same name in different namespace
Description: Create a secret with same name in two namespaces. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secrets from the mounted volume from the container runtime and only secrets which are associated with namespace where pod is created. The file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) {
var (
namespace2 *v1.Namespace
err error
@ -122,7 +122,7 @@ var _ = SIGDescribe("Secrets", func() {
Testname: Secrets Volume, mapping multiple volume paths
Description: Create a secret. Create a Pod with two secret volume sources configured into the container in to two different custom paths. Pod MUST be able to read the secret from the both the mounted volumes from the two specified custom paths.
*/
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func(ctx context.Context) {
// This test ensures that the same secret can be mounted in multiple
// volumes in the same pod. This test case exists to prevent
// regressions that break this use-case.
@ -202,7 +202,7 @@ var _ = SIGDescribe("Secrets", func() {
Testname: Secrets Volume, create, update and delete
Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) {
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
@ -383,7 +383,7 @@ var _ = SIGDescribe("Secrets", func() {
Try to update the secret`s metadata (labels), the update must succeed.
Try to delete the secret, the deletion must succeed.
*/
framework.ConformanceIt("should be immutable if `immutable` field is set", func() {
framework.ConformanceIt("should be immutable if `immutable` field is set", func(ctx context.Context) {
name := "immutable"
secret := secretForTest(f.Namespace.Name, name)
@ -436,7 +436,7 @@ var _ = SIGDescribe("Secrets", func() {
// The secret is in pending during volume creation until the secret objects are available
// or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional.
// Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
@ -446,7 +446,7 @@ var _ = SIGDescribe("Secrets", func() {
// Secret object defined for the pod, If a key is specified which is not present in the secret,
// the volume setup will error unless it is marked optional, during the pod creation.
// Slow (~5 mins)
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)

View File

@ -43,6 +43,8 @@ limitations under the License.
package storage
import (
"context"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@ -74,7 +76,7 @@ var _ = SIGDescribe("Volumes", func() {
// NFS
////////////////////////////////////////////////////////////////////////
ginkgo.Describe("NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config)
@ -98,7 +100,7 @@ var _ = SIGDescribe("Volumes", func() {
})
ginkgo.Describe("NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func(ctx context.Context) {
config, _, serverHost := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config)

View File

@ -52,7 +52,6 @@ func networkResources() app.Resources {
var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", func() {
f := framework.NewDefaultFramework("dra")
ctx := context.Background()
// The driver containers have to run with sufficient privileges to
// modify /var/lib/kubelet/plugins.
@ -62,12 +61,12 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
nodes := NewNodes(f, 1, 1)
driver := NewDriver(f, nodes, networkResources) // All tests get their own driver instance.
b := newBuilder(f, driver)
ginkgo.It("registers plugin", func() {
ginkgo.It("registers plugin", func(ctx context.Context) {
ginkgo.By("the driver is running")
})
// This test does not pass at the moment because kubelet doesn't retry.
ginkgo.It("must retry NodePrepareResource", func() {
ginkgo.It("must retry NodePrepareResource", func(ctx context.Context) {
// We have exactly one host.
m := MethodInstance{driver.Nodenames()[0], NodePrepareResourceMethod}
@ -96,7 +95,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
framework.Fail("NodePrepareResource should have been called again")
}
})
ginkgo.It("must not run a pod if a claim is not reserved for it", func() {
ginkgo.It("must not run a pod if a claim is not reserved for it", func(ctx context.Context) {
parameters := b.parameters()
claim := b.externalClaim(resourcev1alpha1.AllocationModeImmediate)
pod := b.podExternal()
@ -119,7 +118,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
return nil
}, 20*time.Second, 200*time.Millisecond).Should(gomega.BeNil())
})
ginkgo.It("must unprepare resources for force-deleted pod", func() {
ginkgo.It("must unprepare resources for force-deleted pod", func(ctx context.Context) {
parameters := b.parameters()
claim := b.externalClaim(resourcev1alpha1.AllocationModeImmediate)
pod := b.podExternal()
@ -151,7 +150,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.parametersCounter = 1
b.classParametersName = b.parametersName()
ginkgo.It("supports claim and class parameters", func() {
ginkgo.It("supports claim and class parameters", func(ctx context.Context) {
classParameters := b.parameters("x", "y")
claimParameters := b.parameters()
pod, template := b.podInline(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
@ -170,7 +169,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
// claimTests tries out several different combinations of pods with
// claims, both inline and external.
claimTests := func(allocationMode resourcev1alpha1.AllocationMode) {
ginkgo.It("supports simple pod referencing inline resource claim", func() {
ginkgo.It("supports simple pod referencing inline resource claim", func(ctx context.Context) {
parameters := b.parameters()
pod, template := b.podInline(allocationMode)
b.create(ctx, parameters, pod, template)
@ -178,7 +177,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.testPod(f.ClientSet, pod)
})
ginkgo.It("supports inline claim referenced by multiple containers", func() {
ginkgo.It("supports inline claim referenced by multiple containers", func(ctx context.Context) {
parameters := b.parameters()
pod, template := b.podInlineMultiple(allocationMode)
b.create(ctx, parameters, pod, template)
@ -186,7 +185,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.testPod(f.ClientSet, pod)
})
ginkgo.It("supports simple pod referencing external resource claim", func() {
ginkgo.It("supports simple pod referencing external resource claim", func(ctx context.Context) {
parameters := b.parameters()
pod := b.podExternal()
b.create(ctx, parameters, b.externalClaim(allocationMode), pod)
@ -194,7 +193,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b.testPod(f.ClientSet, pod)
})
ginkgo.It("supports external claim referenced by multiple pods", func() {
ginkgo.It("supports external claim referenced by multiple pods", func(ctx context.Context) {
parameters := b.parameters()
pod1 := b.podExternal()
pod2 := b.podExternal()
@ -207,7 +206,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
}
})
ginkgo.It("supports external claim referenced by multiple containers of multiple pods", func() {
ginkgo.It("supports external claim referenced by multiple containers of multiple pods", func(ctx context.Context) {
parameters := b.parameters()
pod1 := b.podExternalMultiple()
pod2 := b.podExternalMultiple()
@ -220,7 +219,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
}
})
ginkgo.It("supports init containers", func() {
ginkgo.It("supports init containers", func(ctx context.Context) {
parameters := b.parameters()
pod, template := b.podInline(allocationMode)
pod.Spec.InitContainers = []v1.Container{pod.Spec.Containers[0]}
@ -248,7 +247,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
driver := NewDriver(f, nodes, networkResources)
b := newBuilder(f, driver)
ginkgo.It("schedules onto different nodes", func() {
ginkgo.It("schedules onto different nodes", func(ctx context.Context) {
parameters := b.parameters()
label := "app.kubernetes.io/instance"
instance := f.UniqueName + "-test-app"
@ -295,7 +294,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
b := newBuilder(f, driver)
tests := func(allocationMode resourcev1alpha1.AllocationMode) {
ginkgo.It("uses all resources", func() {
ginkgo.It("uses all resources", func(ctx context.Context) {
var objs = []klog.KMetadata{
b.parameters(),
}
@ -360,7 +359,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
})
b := newBuilder(f, driver)
ginkgo.It("works", func() {
ginkgo.It("works", func(ctx context.Context) {
// A pod with two claims can run on a node, but
// only if allocation of both succeeds. This
// tests simulates the scenario where one claim
@ -474,7 +473,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
driver2.NameSuffix = "-other"
b2 := newBuilder(f, driver2)
ginkgo.It("work", func() {
ginkgo.It("work", func(ctx context.Context) {
parameters1 := b1.parameters()
parameters2 := b2.parameters()
claim1 := b1.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer)

View File

@ -57,7 +57,7 @@ ginkgo.AfterEach(func() {
# Do something with f.ClientSet.
}
ginkgo.It("test something", func() {
ginkgo.It("test something", func(ctx context.Context) {
# The actual test.
})
```

Some files were not shown because too many files have changed in this diff Show More