diff --git a/test/conformance/BUILD b/test/conformance/BUILD index 4ba5ff36368..be4fd088544 100644 --- a/test/conformance/BUILD +++ b/test/conformance/BUILD @@ -2,9 +2,16 @@ load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["walk.go"], + srcs = [ + "doc.go", + "walk.go", + ], importpath = "k8s.io/kubernetes/test/conformance", visibility = ["//visibility:private"], + deps = [ + "//vendor/github.com/onsi/ginkgo/types:go_default_library", + "//vendor/gopkg.in/yaml.v2:go_default_library", + ], ) go_binary( @@ -32,22 +39,35 @@ filegroup( ) genrule( - name = "list_conformance_tests", + name = "list_conformance_specs", srcs = [ "//test/e2e:all-srcs", "//test/e2e_node:all-srcs", + "//test/e2e:e2e.test_binary", + "//vendor/github.com/onsi/ginkgo/ginkgo", ], - outs = ["conformance.txt"], - cmd = "./$(location :conformance) $(locations //test/e2e:all-srcs) > $@", + outs = ["specsummaries.json"], + cmd = "$(location //vendor/github.com/onsi/ginkgo/ginkgo) --dryRun=true --focus=[Conformance] $(location //test/e2e:e2e.test_binary) -- --spec-dump $$(pwd)/$@ > /dev/null", + message = "Getting all conformance spec summaries.", +) + +genrule( + name = "list_conformance_tests", + srcs = [ + ":list_conformance_specs", + ":conformance", + "//test/e2e:all-srcs", + ], + outs = ["conformance.yaml"], + cmd = "$(location :conformance) $(location :list_conformance_specs) > $@", message = "Listing all conformance tests.", - tools = [":conformance"], ) sh_test( name = "conformance_test", srcs = ["conformance_test.sh"], data = [ - "testdata/conformance.txt", + "testdata/conformance.yaml", ":list_conformance_tests", ], ) @@ -58,3 +78,16 @@ go_test( data = glob(["testdata/**"]), embed = [":go_default_library"], ) + +genrule( + name = "gen_conformance_docs", + srcs = [ + ":list_conformance_specs", + ":conformance", + "//test/e2e:all-srcs", + ":package-srcs", + ], + outs = ["conformance.md"], + cmd = "$(location :conformance) --docs $(location :list_conformance_specs) > $@", + message = "Listing all conformance tests.", +) diff --git a/test/conformance/README.md b/test/conformance/README.md index 1af372ef8a1..82204a6b318 100644 --- a/test/conformance/README.md +++ b/test/conformance/README.md @@ -9,7 +9,7 @@ To update the list, run ```console bazel build //test/conformance:list_conformance_tests -cp bazel-genfiles/test/conformance/conformance.txt test/conformance/testdata +cp bazel-genfiles/test/conformance/conformance.yaml test/conformance/testdata ``` Add the changed file to your PR, then send for review. diff --git a/test/conformance/cf_header.md b/test/conformance/cf_header.md index bcdeba01756..6bc9d3db40c 100644 --- a/test/conformance/cf_header.md +++ b/test/conformance/cf_header.md @@ -15,13 +15,11 @@ clean up the tests. Example: ``` /* - Testname: Kubelet-OutputToLogs - Release: v1.9 - Description: By default the stdout and stderr from the process - being executed in a pod MUST be sent to the pod's logs. + Release : v1.13 + Testname: Kubelet, log output, default + Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs. */ -// Note this test needs to be fixed to also test for stderr -It("it should print the output to logs [Conformance]", func() { +framework.ConformanceIt("should print the output to logs [NodeConformance]", func() { ``` would generate the following documentation for the test. Note that the "TestName" from the Documentation above will @@ -29,12 +27,12 @@ be used to document the test which make it more human readable. The "Description documentation for that test. ### **Output:** -## [Kubelet-OutputToLogs](https://github.com/kubernetes/kubernetes/blob/release-1.9/test/e2e_node/kubelet_test.go#L42) +## [Kubelet, log output, default](https://github.com/kubernetes/kubernetes/tree/master/test/e2e/common/kubelet.go#L48) -### Release v1.9 -By default the stdout and stderr from the process -being executed in a pod MUST be sent to the pod's logs. -Note this test needs to be fixed to also test for stderr +- Added to conformance in release v1.13 +- Defined in code as: [k8s.io] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance] + +By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs. Notational Conventions when documenting the tests with the key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). diff --git a/test/conformance/conformance_test.sh b/test/conformance/conformance_test.sh index dc448849c63..5d8b2d6ad71 100755 --- a/test/conformance/conformance_test.sh +++ b/test/conformance/conformance_test.sh @@ -21,7 +21,7 @@ set -o errexit -if diff -u test/conformance/testdata/conformance.txt test/conformance/conformance.txt; then +if diff -u test/conformance/testdata/conformance.yaml test/conformance/conformance.yaml; then echo PASS exit 0 fi diff --git a/test/conformance/doc.go b/test/conformance/doc.go new file mode 100644 index 00000000000..f8b0d6fbee2 --- /dev/null +++ b/test/conformance/doc.go @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This stand-alone package is utilized for dynamically generating/maintaining a list of +conformance tests. It utilizes a two step approach: + - The test binary is built + - The test binary is run in dry mode with a custom ginkgo reporter dumping out + types.SpecSummary objects which contain full test names and file/code information. + - The SpecSummary information is parsed to get file/line info on Conformance tests and + then we use a simplified AST parser to grab the comments above the test. + +Due to the complicated nature of how tests can be declared/wrapped in various contexts, +this approach is much simpler to maintain than a pure-AST parser and allows us to easily +capture the full test names/locations of the tests using the pre-existing ginkgo logic. +*/ +package main diff --git a/test/conformance/testdata/conformance.txt b/test/conformance/testdata/conformance.txt deleted file mode 100644 index 6519e1426da..00000000000 --- a/test/conformance/testdata/conformance.txt +++ /dev/null @@ -1,277 +0,0 @@ -test/e2e/apimachinery/aggregator.go: "Should be able to support the 1.17 Sample API Server using the current Aggregator" -test/e2e/apimachinery/crd_conversion_webhook.go: "should be able to convert from CR v1 to CR v2" -test/e2e/apimachinery/crd_conversion_webhook.go: "should be able to convert a non homogeneous list of CRs" -test/e2e/apimachinery/crd_publish_openapi.go: "works for CRD with validation schema" -test/e2e/apimachinery/crd_publish_openapi.go: "works for CRD without validation schema" -test/e2e/apimachinery/crd_publish_openapi.go: "works for CRD preserving unknown fields at the schema root" -test/e2e/apimachinery/crd_publish_openapi.go: "works for CRD preserving unknown fields in an embedded object" -test/e2e/apimachinery/crd_publish_openapi.go: "works for multiple CRDs of different groups" -test/e2e/apimachinery/crd_publish_openapi.go: "works for multiple CRDs of same group but different versions" -test/e2e/apimachinery/crd_publish_openapi.go: "works for multiple CRDs of same group and version but different kinds" -test/e2e/apimachinery/crd_publish_openapi.go: "updates the published spec when one version gets renamed" -test/e2e/apimachinery/crd_publish_openapi.go: "removes definition from spec when one version gets changed to not be served" -test/e2e/apimachinery/crd_watch.go: "watch on custom resource definition objects" -test/e2e/apimachinery/custom_resource_definition.go: "creating/deleting custom resource definition objects works" -test/e2e/apimachinery/custom_resource_definition.go: "listing custom resource definition objects works" -test/e2e/apimachinery/custom_resource_definition.go: "getting/updating/patching custom resource definition status sub-resource works" -test/e2e/apimachinery/custom_resource_definition.go: "should include custom resource definition resources in discovery documents" -test/e2e/apimachinery/custom_resource_definition.go: "custom resource defaulting for requests and from storage works" -test/e2e/apimachinery/garbage_collector.go: "should delete pods created by rc when not orphaning" -test/e2e/apimachinery/garbage_collector.go: "should orphan pods created by rc if delete options say so" -test/e2e/apimachinery/garbage_collector.go: "should delete RS created by deployment when not orphaning" -test/e2e/apimachinery/garbage_collector.go: "should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan" -test/e2e/apimachinery/garbage_collector.go: "should keep the rc around until all its pods are deleted if the deleteOptions says so" -test/e2e/apimachinery/garbage_collector.go: "should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted" -test/e2e/apimachinery/garbage_collector.go: "should not be blocked by dependency circle" -test/e2e/apimachinery/namespace.go: "should ensure that all pods are removed when a namespace is deleted" -test/e2e/apimachinery/namespace.go: "should ensure that all services are removed when a namespace is deleted" -test/e2e/apimachinery/namespace.go: "should patch a Namespace" -test/e2e/apimachinery/resource_quota.go: "should create a ResourceQuota and ensure its status is promptly calculated." -test/e2e/apimachinery/resource_quota.go: "should create a ResourceQuota and capture the life of a service." -test/e2e/apimachinery/resource_quota.go: "should create a ResourceQuota and capture the life of a secret." -test/e2e/apimachinery/resource_quota.go: "should create a ResourceQuota and capture the life of a pod." -test/e2e/apimachinery/resource_quota.go: "should create a ResourceQuota and capture the life of a configMap." -test/e2e/apimachinery/resource_quota.go: "should create a ResourceQuota and capture the life of a replication controller." -test/e2e/apimachinery/resource_quota.go: "should create a ResourceQuota and capture the life of a replica set." -test/e2e/apimachinery/resource_quota.go: "should verify ResourceQuota with terminating scopes." -test/e2e/apimachinery/resource_quota.go: "should verify ResourceQuota with best effort scope." -test/e2e/apimachinery/resource_quota.go: "should be able to update and delete ResourceQuota." -test/e2e/apimachinery/table_conversion.go: "should return a 406 for a backend which does not implement metadata" -test/e2e/apimachinery/watch.go: "should observe add, update, and delete watch notifications on configmaps" -test/e2e/apimachinery/watch.go: "should be able to start watching from a specific resource version" -test/e2e/apimachinery/watch.go: "should be able to restart watching from the last resource version observed by the previous watch" -test/e2e/apimachinery/watch.go: "should observe an object deletion if it stops meeting the requirements of the selector" -test/e2e/apimachinery/watch.go: "should receive events on concurrent watches in same order" -test/e2e/apimachinery/webhook.go: "should include webhook resources in discovery documents" -test/e2e/apimachinery/webhook.go: "should be able to deny pod and configmap creation" -test/e2e/apimachinery/webhook.go: "should be able to deny attaching pod" -test/e2e/apimachinery/webhook.go: "should be able to deny custom resource creation, update and deletion" -test/e2e/apimachinery/webhook.go: "should unconditionally reject operations on fail closed webhook" -test/e2e/apimachinery/webhook.go: "should mutate configmap" -test/e2e/apimachinery/webhook.go: "should mutate pod and apply defaults after mutation" -test/e2e/apimachinery/webhook.go: "should not be able to mutate or prevent deletion of webhook configuration objects" -test/e2e/apimachinery/webhook.go: "should mutate custom resource" -test/e2e/apimachinery/webhook.go: "should deny crd creation" -test/e2e/apimachinery/webhook.go: "should mutate custom resource with different stored version" -test/e2e/apimachinery/webhook.go: "should mutate custom resource with pruning" -test/e2e/apimachinery/webhook.go: "should honor timeout" -test/e2e/apimachinery/webhook.go: "patching/updating a validating webhook should work" -test/e2e/apimachinery/webhook.go: "patching/updating a mutating webhook should work" -test/e2e/apimachinery/webhook.go: "listing validating webhooks should work" -test/e2e/apimachinery/webhook.go: "listing mutating webhooks should work" -test/e2e/apps/daemon_set.go: "should run and stop simple daemon" -test/e2e/apps/daemon_set.go: "should run and stop complex daemon" -test/e2e/apps/daemon_set.go: "should retry creating failed daemon pods" -test/e2e/apps/daemon_set.go: "should update pod when spec was updated and update strategy is RollingUpdate" -test/e2e/apps/daemon_set.go: "should rollback without unnecessary restarts" -test/e2e/apps/deployment.go: "RollingUpdateDeployment should delete old pods and create new ones" -test/e2e/apps/deployment.go: "RecreateDeployment should delete old pods and create new ones" -test/e2e/apps/deployment.go: "deployment should delete old replica sets" -test/e2e/apps/deployment.go: "deployment should support rollover" -test/e2e/apps/deployment.go: "deployment should support proportional scaling" -test/e2e/apps/job.go: "should run a job to completion when tasks sometimes fail and are locally restarted" -test/e2e/apps/job.go: "should delete a job" -test/e2e/apps/job.go: "should adopt matching orphans and release non-matching pods" -test/e2e/apps/rc.go: "should serve a basic image on each replica with a public image" -test/e2e/apps/rc.go: "should surface a failure condition on a common issue like exceeded quota" -test/e2e/apps/rc.go: "should adopt matching pods on creation" -test/e2e/apps/rc.go: "should release no longer matching pods" -test/e2e/apps/replica_set.go: "should serve a basic image on each replica with a public image" -test/e2e/apps/replica_set.go: "should adopt matching pods on creation and release no longer matching pods" -test/e2e/apps/statefulset.go: "should perform rolling updates and roll backs of template modifications" -test/e2e/apps/statefulset.go: "should perform canary updates and phased rolling updates of template modifications" -test/e2e/apps/statefulset.go: "Scaling should happen in predictable order and halt if any stateful pod is unhealthy" -test/e2e/apps/statefulset.go: "Burst scaling should run to completion even with unhealthy pods" -test/e2e/apps/statefulset.go: "Should recreate evicted statefulset" -test/e2e/apps/statefulset.go: "should have a working scale subresource" -test/e2e/auth/service_accounts.go: "should mount an API token into pods" -test/e2e/auth/service_accounts.go: "should allow opting out of API token automount" -test/e2e/common/configmap.go: "should be consumable via environment variable" -test/e2e/common/configmap.go: "should be consumable via the environment" -test/e2e/common/configmap.go: "should fail to create ConfigMap with empty key" -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume" -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with defaultMode set" -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume as non-root" -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings" -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings and Item mode set" -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings as non-root" -test/e2e/common/configmap_volume.go: "updates should be reflected in volume" -test/e2e/common/configmap_volume.go: "binary data should be reflected in volume" -test/e2e/common/configmap_volume.go: "optional updates should be reflected in volume" -test/e2e/common/configmap_volume.go: "should be consumable in multiple volumes in the same pod" -test/e2e/common/container_probe.go: "with readiness probe should not be ready before initial delay and never restart" -test/e2e/common/container_probe.go: "with readiness probe that fails should never be ready and never restart" -test/e2e/common/container_probe.go: "should be restarted with a exec \\\"cat /tmp/health\\\" liveness probe" -test/e2e/common/container_probe.go: "should *not* be restarted with a exec \\\"cat /tmp/health\\\" liveness probe" -test/e2e/common/container_probe.go: "should be restarted with a /healthz http liveness probe" -test/e2e/common/container_probe.go: "should *not* be restarted with a tcp:8080 liveness probe" -test/e2e/common/container_probe.go: "should have monotonically increasing restart count" -test/e2e/common/container_probe.go: "should *not* be restarted with a /healthz http liveness probe" -test/e2e/common/docker_containers.go: "should use the image defaults if command and args are blank" -test/e2e/common/docker_containers.go: "should be able to override the image's default arguments (docker cmd)" -test/e2e/common/docker_containers.go: "should be able to override the image's default command (docker entrypoint)" -test/e2e/common/docker_containers.go: "should be able to override the image's default command and arguments" -test/e2e/common/downward_api.go: "should provide pod name, namespace and IP address as env vars" -test/e2e/common/downward_api.go: "should provide host IP as an env var" -test/e2e/common/downward_api.go: "should provide container's limits.cpu/memory and requests.cpu/memory as env vars" -test/e2e/common/downward_api.go: "should provide default limits.cpu/memory from node allocatable" -test/e2e/common/downward_api.go: "should provide pod UID as env vars" -test/e2e/common/downwardapi_volume.go: "should provide podname only" -test/e2e/common/downwardapi_volume.go: "should set DefaultMode on files" -test/e2e/common/downwardapi_volume.go: "should set mode on item file" -test/e2e/common/downwardapi_volume.go: "should update labels on modification" -test/e2e/common/downwardapi_volume.go: "should update annotations on modification" -test/e2e/common/downwardapi_volume.go: "should provide container's cpu limit" -test/e2e/common/downwardapi_volume.go: "should provide container's memory limit" -test/e2e/common/downwardapi_volume.go: "should provide container's cpu request" -test/e2e/common/downwardapi_volume.go: "should provide container's memory request" -test/e2e/common/downwardapi_volume.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set" -test/e2e/common/downwardapi_volume.go: "should provide node allocatable (memory) as default memory limit if the limit is not set" -test/e2e/common/empty_dir.go: "volume on tmpfs should have the correct mode" -test/e2e/common/empty_dir.go: "should support (root,0644,tmpfs)" -test/e2e/common/empty_dir.go: "should support (root,0666,tmpfs)" -test/e2e/common/empty_dir.go: "should support (root,0777,tmpfs)" -test/e2e/common/empty_dir.go: "should support (non-root,0644,tmpfs)" -test/e2e/common/empty_dir.go: "should support (non-root,0666,tmpfs)" -test/e2e/common/empty_dir.go: "should support (non-root,0777,tmpfs)" -test/e2e/common/empty_dir.go: "volume on default medium should have the correct mode" -test/e2e/common/empty_dir.go: "should support (root,0644,default)" -test/e2e/common/empty_dir.go: "should support (root,0666,default)" -test/e2e/common/empty_dir.go: "should support (root,0777,default)" -test/e2e/common/empty_dir.go: "should support (non-root,0644,default)" -test/e2e/common/empty_dir.go: "should support (non-root,0666,default)" -test/e2e/common/empty_dir.go: "should support (non-root,0777,default)" -test/e2e/common/empty_dir.go: "pod should support shared volumes between containers" -test/e2e/common/expansion.go: "should allow composing env vars into new env vars" -test/e2e/common/expansion.go: "should allow substituting values in a container's command" -test/e2e/common/expansion.go: "should allow substituting values in a container's args" -test/e2e/common/host_path.go: "should give a volume the correct mode" -test/e2e/common/init_container.go: "should invoke init containers on a RestartNever pod" -test/e2e/common/init_container.go: "should invoke init containers on a RestartAlways pod" -test/e2e/common/init_container.go: "should not start app containers if init containers fail on a RestartAlways pod" -test/e2e/common/init_container.go: "should not start app containers and fail the pod if init containers fail on a RestartNever pod" -test/e2e/common/kubelet.go: "should print the output to logs" -test/e2e/common/kubelet.go: "should have an terminated reason" -test/e2e/common/kubelet.go: "should be possible to delete" -test/e2e/common/kubelet.go: "should write entries to /etc/hosts" -test/e2e/common/kubelet.go: "should not write to root filesystem" -test/e2e/common/kubelet_etc_hosts.go: "should test kubelet managed /etc/hosts file" -test/e2e/common/lease.go: "lease API should be available" -test/e2e/common/lifecycle_hook.go: "should execute poststart exec hook properly" -test/e2e/common/lifecycle_hook.go: "should execute prestop exec hook properly" -test/e2e/common/lifecycle_hook.go: "should execute poststart http hook properly" -test/e2e/common/lifecycle_hook.go: "should execute prestop http hook properly" -test/e2e/common/networking.go: "should function for intra-pod communication: http" -test/e2e/common/networking.go: "should function for intra-pod communication: udp" -test/e2e/common/networking.go: "should function for node-pod communication: http" -test/e2e/common/networking.go: "should function for node-pod communication: udp" -test/e2e/common/pods.go: "should get a host IP" -test/e2e/common/pods.go: "should be submitted and removed" -test/e2e/common/pods.go: "should be updated" -test/e2e/common/pods.go: "should allow activeDeadlineSeconds to be updated" -test/e2e/common/pods.go: "should contain environment variables for services" -test/e2e/common/pods.go: "should support remote command execution over websockets" -test/e2e/common/pods.go: "should support retrieving logs from the container over websockets" -test/e2e/common/projected_combined.go: "should project all components that make up the projection API" -test/e2e/common/projected_configmap.go: "should be consumable from pods in volume" -test/e2e/common/projected_configmap.go: "should be consumable from pods in volume with defaultMode set" -test/e2e/common/projected_configmap.go: "should be consumable from pods in volume as non-root" -test/e2e/common/projected_configmap.go: "should be consumable from pods in volume with mappings" -test/e2e/common/projected_configmap.go: "should be consumable from pods in volume with mappings and Item mode set" -test/e2e/common/projected_configmap.go: "should be consumable from pods in volume with mappings as non-root" -test/e2e/common/projected_configmap.go: "updates should be reflected in volume" -test/e2e/common/projected_configmap.go: "optional updates should be reflected in volume" -test/e2e/common/projected_configmap.go: "should be consumable in multiple volumes in the same pod" -test/e2e/common/projected_downwardapi.go: "should provide podname only" -test/e2e/common/projected_downwardapi.go: "should set DefaultMode on files" -test/e2e/common/projected_downwardapi.go: "should set mode on item file" -test/e2e/common/projected_downwardapi.go: "should update labels on modification" -test/e2e/common/projected_downwardapi.go: "should update annotations on modification" -test/e2e/common/projected_downwardapi.go: "should provide container's cpu limit" -test/e2e/common/projected_downwardapi.go: "should provide container's memory limit" -test/e2e/common/projected_downwardapi.go: "should provide container's cpu request" -test/e2e/common/projected_downwardapi.go: "should provide container's memory request" -test/e2e/common/projected_downwardapi.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set" -test/e2e/common/projected_downwardapi.go: "should provide node allocatable (memory) as default memory limit if the limit is not set" -test/e2e/common/projected_secret.go: "should be consumable from pods in volume" -test/e2e/common/projected_secret.go: "should be consumable from pods in volume with defaultMode set" -test/e2e/common/projected_secret.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set" -test/e2e/common/projected_secret.go: "should be consumable from pods in volume with mappings" -test/e2e/common/projected_secret.go: "should be consumable from pods in volume with mappings and Item Mode set" -test/e2e/common/projected_secret.go: "should be consumable in multiple volumes in a pod" -test/e2e/common/projected_secret.go: "optional updates should be reflected in volume" -test/e2e/common/runtime.go: "should run with the expected status" -test/e2e/common/runtime.go: "should report termination message if TerminationMessagePath is set as non-root user and at a non-default path" -test/e2e/common/runtime.go: "should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set" -test/e2e/common/runtime.go: "should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set" -test/e2e/common/runtime.go: "should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set" -test/e2e/common/secrets.go: "should be consumable from pods in env vars" -test/e2e/common/secrets.go: "should be consumable via the environment" -test/e2e/common/secrets.go: "should fail to create secret due to empty secret key" -test/e2e/common/secrets.go: "should patch a secret" -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume" -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with defaultMode set" -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set" -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings" -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings and Item Mode set" -test/e2e/common/secrets_volume.go: "should be able to mount in a volume regardless of a different secret existing with same name in different namespace" -test/e2e/common/secrets_volume.go: "should be consumable in multiple volumes in a pod" -test/e2e/common/secrets_volume.go: "optional updates should be reflected in volume" -test/e2e/common/security_context.go: "should run the container with uid 65534" -test/e2e/common/security_context.go: "should run the container with writable rootfs when readOnlyRootFilesystem=false" -test/e2e/common/security_context.go: "should run the container as unprivileged when false" -test/e2e/common/security_context.go: "should not allow privilege escalation when false" -test/e2e/kubectl/kubectl.go: "should create and stop a replication controller" -test/e2e/kubectl/kubectl.go: "should scale a replication controller" -test/e2e/kubectl/kubectl.go: "should create and stop a working application" -test/e2e/kubectl/kubectl.go: "should check if v1 is in available api versions" -test/e2e/kubectl/kubectl.go: "should check if Kubernetes master services is included in cluster-info" -test/e2e/kubectl/kubectl.go: "should check if kubectl describe prints relevant information for rc and pods" -test/e2e/kubectl/kubectl.go: "should create services for rc" -test/e2e/kubectl/kubectl.go: "should update the label on a resource" -test/e2e/kubectl/kubectl.go: "should be able to retrieve and filter logs" -test/e2e/kubectl/kubectl.go: "should add annotations for pods in rc" -test/e2e/kubectl/kubectl.go: "should check is all data is printed" -test/e2e/kubectl/kubectl.go: "should create a pod from an image when restart is Never" -test/e2e/kubectl/kubectl.go: "should update a single-container pod's image" -test/e2e/kubectl/kubectl.go: "should support proxy with --port 0" -test/e2e/kubectl/kubectl.go: "should support --unix-socket=/path" -test/e2e/network/dns.go: "should provide DNS for the cluster" -test/e2e/network/dns.go: "should provide /etc/hosts entries for the cluster" -test/e2e/network/dns.go: "should provide DNS for services" -test/e2e/network/dns.go: "should resolve DNS of partial qualified names for services" -test/e2e/network/dns.go: "should provide DNS for pods for Hostname" -test/e2e/network/dns.go: "should provide DNS for pods for Subdomain" -test/e2e/network/dns.go: "should provide DNS for ExternalName services" -test/e2e/network/dns.go: "should support configurable pod DNS nameservers" -test/e2e/network/proxy.go: "should proxy logs on node with explicit kubelet port using proxy subresource" -test/e2e/network/proxy.go: "should proxy logs on node using proxy subresource" -test/e2e/network/proxy.go: "should proxy through a service and a pod" -test/e2e/network/service.go: "should provide secure master service" -test/e2e/network/service.go: "should serve a basic endpoint from pods" -test/e2e/network/service.go: "should serve multiport endpoints from pods" -test/e2e/network/service.go: "should be able to create a functioning NodePort service" -test/e2e/network/service.go: "should be able to change the type from ExternalName to ClusterIP" -test/e2e/network/service.go: "should be able to change the type from ExternalName to NodePort" -test/e2e/network/service.go: "should be able to change the type from ClusterIP to ExternalName" -test/e2e/network/service.go: "should be able to change the type from NodePort to ExternalName" -test/e2e/network/service.go: "should find a service from listing all namespaces" -test/e2e/network/service_latency.go: "should not be very high" -test/e2e/node/events.go: "should be sent by kubelets and the scheduler about pods scheduling and running" -test/e2e/node/pods.go: "should be set on Pods with matching resource requests and limits for memory and cpu" -test/e2e/node/pre_stop.go: "should call prestop when killing a pod" -test/e2e/scheduling/limit_range.go: "should create a LimitRange with defaults and ensure pod has those defaults applied." -test/e2e/scheduling/predicates.go: "validates resource limits of pods that are allowed to run" -test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if not matching" -test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching" -test/e2e/scheduling/predicates.go: "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" -test/e2e/scheduling/predicates.go: "validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP" -test/e2e/scheduling/taints.go: "removing taint cancels eviction" -test/e2e/scheduling/taints.go: "evicts pods with minTolerationSeconds" -test/e2e/storage/empty_dir_wrapper.go: "should not conflict" -test/e2e/storage/empty_dir_wrapper.go: "should not cause race condition when used for configmaps" -test/e2e/storage/subpath.go: "should support subpaths with secret pod" -test/e2e/storage/subpath.go: "should support subpaths with configmap pod" -test/e2e/storage/subpath.go: "should support subpaths with configmap pod with mountPath of existing file" -test/e2e/storage/subpath.go: "should support subpaths with downward pod" -test/e2e/storage/subpath.go: "should support subpaths with projected pod" diff --git a/test/conformance/testdata/conformance.yaml b/test/conformance/testdata/conformance.yaml new file mode 100755 index 00000000000..d69a8d3c366 --- /dev/null +++ b/test/conformance/testdata/conformance.yaml @@ -0,0 +1,2512 @@ +- testname: Pod Lifecycle, post start exec hook + codename: '[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart exec hook properly [NodeConformance] [Conformance]' + description: When a post start handler is specified in the container lifecycle using + a 'Exec' action, then the handler MUST be invoked after the start of the container. + A server pod is created that will serve http requests, create a second pod with + a container lifecycle specifying a post start that invokes the server pod using + ExecAction to validate that the post start is executed. + release: v1.9 + file: test/e2e/common/lifecycle_hook.go +- testname: Pod Lifecycle, post start http hook + codename: '[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart http hook properly [NodeConformance] [Conformance]' + description: When a post start handler is specified in the container lifecycle using + a HttpGet action, then the handler MUST be invoked after the start of the container. + A server pod is created that will serve http requests, create a second pod with + a container lifecycle specifying a post start that invokes the server pod to validate + that the post start is executed. + release: v1.9 + file: test/e2e/common/lifecycle_hook.go +- testname: Pod Lifecycle, prestop exec hook + codename: '[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop exec hook properly [NodeConformance] [Conformance]' + description: When a pre-stop handler is specified in the container lifecycle using + a 'Exec' action, then the handler MUST be invoked before the container is terminated. + A server pod is created that will serve http requests, create a second pod with + a container lifecycle specifying a pre-stop that invokes the server pod using + ExecAction to validate that the pre-stop is executed. + release: v1.9 + file: test/e2e/common/lifecycle_hook.go +- testname: Pod Lifecycle, prestop http hook + codename: '[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop http hook properly [NodeConformance] [Conformance]' + description: When a pre-stop handler is specified in the container lifecycle using + a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. + A server pod is created that will serve http requests, create a second pod with + a container lifecycle specifying a pre-stop that invokes the server pod to validate + that the pre-stop is executed. + release: v1.9 + file: test/e2e/common/lifecycle_hook.go +- testname: "" + codename: '[k8s.io] Container Runtime blackbox test on terminated container should + report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy + FallbackToLogsOnError is set [NodeConformance] [Conformance]' + description: 'Name: Container Runtime, TerminationMessage, from log output of succeeding + container Create a pod with an container. Container''s output is recorded in log + and container exits successfully without an error. When container is terminated, + terminationMessage MUST have no content as container succeed. [LinuxOnly]: Cannot + mount files in Windows Containers.' + release: v1.15 + file: test/e2e/common/runtime.go +- testname: "" + codename: '[k8s.io] Container Runtime blackbox test on terminated container should + report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy + FallbackToLogsOnError is set [NodeConformance] [Conformance]' + description: 'Name: Container Runtime, TerminationMessage, from file of succeeding + container Create a pod with an container. Container''s output is recorded in a + file and the container exits successfully without an error. When container is + terminated, terminationMessage MUST match with the content from file. [LinuxOnly]: + Cannot mount files in Windows Containers.' + release: v1.15 + file: test/e2e/common/runtime.go +- testname: "" + codename: '[k8s.io] Container Runtime blackbox test on terminated container should + report termination message [LinuxOnly] from log output if TerminationMessagePolicy + FallbackToLogsOnError is set [NodeConformance] [Conformance]' + description: 'Name: Container Runtime, TerminationMessage, from container''s log + output of failing container Create a pod with an container. Container''s output + is recorded in log and container exits with an error. When container is terminated, + termination message MUST match the expected output recorded from container''s + log. [LinuxOnly]: Cannot mount files in Windows Containers.' + release: v1.15 + file: test/e2e/common/runtime.go +- testname: "" + codename: '[k8s.io] Container Runtime blackbox test on terminated container should + report termination message [LinuxOnly] if TerminationMessagePath is set as non-root + user and at a non-default path [NodeConformance] [Conformance]' + description: 'Name: Container Runtime, TerminationMessagePath, non-root user and + non-default path Create a pod with a container to run it as a non-root user with + a custom TerminationMessagePath set. Pod redirects the output to the provided + path successfully. When the container is terminated, the termination message MUST + match the expected output logged in the provided custom path. [LinuxOnly]: Tagged + LinuxOnly due to use of ''uid'' and unable to mount files in Windows Containers.' + release: v1.15 + file: test/e2e/common/runtime.go +- testname: Container Runtime, Restart Policy, Pod Phases + codename: '[k8s.io] Container Runtime blackbox test when starting a container that + exits should run with the expected status [NodeConformance] [Conformance]' + description: If the restart policy is set to 'Always', Pod MUST be restarted when + terminated, If restart policy is 'OnFailure', Pod MUST be started only if it is + terminated with non-zero exit code. If the restart policy is 'Never', Pod MUST + never be restarted. All these three test cases MUST verify the restart counts + accordingly. + release: v1.13 + file: test/e2e/common/runtime.go +- testname: Docker containers, with arguments + codename: '[k8s.io] Docker Containers should be able to override the image''s default + arguments (docker cmd) [NodeConformance] [Conformance]' + description: Default command and from the docker image entrypoint MUST be used + when Pod does not specify the container command but the arguments from Pod spec + MUST override when specified. + release: v1.9 + file: test/e2e/common/docker_containers.go +- testname: Docker containers, with command + codename: '[k8s.io] Docker Containers should be able to override the image''s default + command (docker entrypoint) [NodeConformance] [Conformance]' + description: 'Note: when you override the entrypoint, the image''s arguments (docker + cmd) are ignored. Default command from the docker image entrypoint MUST NOT be + used when Pod specifies the container command. Command from Pod spec MUST override + the command in the image.' + release: v1.9 + file: test/e2e/common/docker_containers.go +- testname: Docker containers, with command and arguments + codename: '[k8s.io] Docker Containers should be able to override the image''s default + command and arguments [NodeConformance] [Conformance]' + description: Default command and arguments from the docker image entrypoint MUST + NOT be used when Pod specifies the container command and arguments. Command and + arguments from Pod spec MUST override the command and arguments in the image. + release: v1.9 + file: test/e2e/common/docker_containers.go +- testname: Docker containers, without command and arguments + codename: '[k8s.io] Docker Containers should use the image defaults if command and + args are blank [NodeConformance] [Conformance]' + description: Default command and arguments from the docker image entrypoint MUST + be used when Pod does not specify the container command + release: v1.9 + file: test/e2e/common/docker_containers.go +- testname: init-container-starts-app-restartalways-pod + codename: '[k8s.io] InitContainer [NodeConformance] should invoke init containers + on a RestartAlways pod [Conformance]' + description: Ensure that all InitContainers are started and all containers in pod + started and at least one container is still running or is in the process of being + restarted when Pod has restart policy as RestartAlways. + release: v1.12 + file: test/e2e/common/init_container.go +- testname: init-container-starts-app-restartnever-pod + codename: '[k8s.io] InitContainer [NodeConformance] should invoke init containers + on a RestartNever pod [Conformance]' + description: Ensure that all InitContainers are started and all containers in pod + are voluntarily terminated with exit status 0, and the system is not going to + restart any of these containers when Pod has restart policy as RestartNever. + release: v1.12 + file: test/e2e/common/init_container.go +- testname: init-container-fails-stops-app-restartnever-pod + codename: '[k8s.io] InitContainer [NodeConformance] should not start app containers + and fail the pod if init containers fail on a RestartNever pod [Conformance]' + description: Ensure that app container is not started when at least one InitContainer + fails to start and Pod has restart policy as RestartNever. + release: v1.12 + file: test/e2e/common/init_container.go +- testname: init-container-fails-stops-app-restartalways-pod + codename: '[k8s.io] InitContainer [NodeConformance] should not start app containers + if init containers fail on a RestartAlways pod [Conformance]' + description: Ensure that app container is not started when all InitContainers failed + to start and Pod has restarted for few occurrences and pod has restart policy + as RestartAlways. + release: v1.12 + file: test/e2e/common/init_container.go +- testname: Kubelet, hostAliases + codename: '[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases should + write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a Pod with hostAliases and a container with command to output + /etc/hosts entries. Pod's logs MUST have matching entries of specified hostAliases + to the output of /etc/hosts entries. Kubernetes mounts the /etc/hosts file into + its containers, however, mounting individual files is not supported on Windows + Containers. For this reason, this test is marked LinuxOnly. + release: v1.13 + file: test/e2e/common/kubelet.go +- testname: Kubelet, log output, default + codename: '[k8s.io] Kubelet when scheduling a busybox command in a pod should print + the output to logs [NodeConformance] [Conformance]' + description: By default the stdout and stderr from the process being executed in + a pod MUST be sent to the pod's logs. + release: v1.13 + file: test/e2e/common/kubelet.go +- testname: Kubelet, failed pod, delete + codename: '[k8s.io] Kubelet when scheduling a busybox command that always fails + in a pod should be possible to delete [NodeConformance] [Conformance]' + description: Create a Pod with terminated state. This terminated pod MUST be able + to be deleted. + release: v1.13 + file: test/e2e/common/kubelet.go +- testname: Kubelet, failed pod, terminated reason + codename: '[k8s.io] Kubelet when scheduling a busybox command that always fails + in a pod should have an terminated reason [NodeConformance] [Conformance]' + description: Create a Pod with terminated state. Pod MUST have only one container. + Container MUST be in terminated state and MUST have an terminated reason. + release: v1.13 + file: test/e2e/common/kubelet.go +- testname: Kubelet, pod with read only root file system + codename: '[k8s.io] Kubelet when scheduling a read only busybox container should + not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a Pod with security context set with ReadOnlyRootFileSystem + set to true. The Pod then tries to write to the /file on the root, write operation + to the root filesystem MUST fail as expected. This test is marked LinuxOnly since + Windows does not support creating containers with read-only access. + release: v1.13 + file: test/e2e/common/kubelet.go +- testname: Kubelet, managed etc hosts + codename: '[k8s.io] KubeletManagedEtcHosts should test kubelet managed /etc/hosts + file [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a Pod with containers with hostNetwork set to false, one of + the containers mounts the /etc/hosts file form the host. Create a second Pod with + hostNetwork set to true. 1. The Pod with hostNetwork=false MUST have /etc/hosts + of containers managed by the Kubelet. 2. The Pod with hostNetwork=false but the + container mounts /etc/hosts file from the host. The /etc/hosts file MUST not be + managed by the Kubelet. 3. The Pod with hostNetwork=true , /etc/hosts file MUST + not be managed by the Kubelet. This test is marked LinuxOnly since Windows cannot + mount individual files in Containers. + release: v1.9 + file: test/e2e/common/kubelet_etc_hosts.go +- testname: lease API should be available + codename: '[k8s.io] Lease lease API should be available [Conformance]' + description: "Create Lease object, and get it; create and get MUST be successful + and Spec of the read Lease MUST match Spec of original Lease. Update the Lease + and get it; update and get MUST be successful\tand Spec of the read Lease MUST + match Spec of updated Lease. Patch the Lease and get it; patch and get MUST be + successful and Spec of the read Lease MUST match Spec of patched Lease. Create + a second Lease with labels and list Leases; create and list MUST be successful + and list MUST return both leases. Delete the labels lease via delete collection; + the delete MUST be successful and MUST delete only the labels lease. List leases; + list MUST be successful and MUST return just the remaining lease. Delete the lease; + delete MUST be successful. Get the lease; get MUST return not found error." + release: v1.17 + file: test/e2e/common/lease.go +- testname: Pods, ActiveDeadlineSeconds + codename: '[k8s.io] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] + [Conformance]' + description: Create a Pod with a unique label. Query for the Pod with the label + as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds + set on the Pod spec. Pod MUST terminate of the specified time elapses. + release: v1.9 + file: test/e2e/common/pods.go +- testname: Pods, lifecycle + codename: '[k8s.io] Pods should be submitted and removed [NodeConformance] [Conformance]' + description: A Pod is created with a unique label. Pod MUST be accessible when queried + using the label selector upon creation. Add a watch, check if the Pod is running. + Pod then deleted, The pod deletion timestamp is observed. The watch MUST return + the pod deleted event. Query with the original selector for the Pod MUST return + empty list. + release: v1.9 + file: test/e2e/common/pods.go +- testname: Pods, update + codename: '[k8s.io] Pods should be updated [NodeConformance] [Conformance]' + description: Create a Pod with a unique label. Query for the Pod with the label + as selector MUST be successful. Update the pod to change the value of the Label. + Query for the Pod with the new value for the label MUST be successful. + release: v1.9 + file: test/e2e/common/pods.go +- testname: Pods, service environment variables + codename: '[k8s.io] Pods should contain environment variables for services [NodeConformance] + [Conformance]' + description: Create a server Pod listening on port 9376. A Service called fooservice + is created for the server Pod listening on port 8765 targeting port 8080. If a + new Pod is created in the cluster then the Pod MUST have the fooservice environment + variables available from this new Pod. The new create Pod MUST have environment + variables such as FOOSERVICE_SERVICE_HOST, FOOSERVICE_SERVICE_PORT, FOOSERVICE_PORT, + FOOSERVICE_PORT_8765_TCP_PORT, FOOSERVICE_PORT_8765_TCP_PROTO, FOOSERVICE_PORT_8765_TCP + and FOOSERVICE_PORT_8765_TCP_ADDR that are populated with proper values. + release: v1.9 + file: test/e2e/common/pods.go +- testname: Pods, assigned hostip + codename: '[k8s.io] Pods should get a host IP [NodeConformance] [Conformance]' + description: Create a Pod. Pod status MUST return successfully and contains a valid + IP address. + release: v1.9 + file: test/e2e/common/pods.go +- testname: Pods, remote command execution over websocket + codename: '[k8s.io] Pods should support remote command execution over websockets + [NodeConformance] [Conformance]' + description: A Pod is created. Websocket is created to retrieve exec command output + from this pod. Message retrieved form Websocket MUST match with expected exec + command output. + release: v1.13 + file: test/e2e/common/pods.go +- testname: Pods, logs from websockets + codename: '[k8s.io] Pods should support retrieving logs from the container over + websockets [NodeConformance] [Conformance]' + description: A Pod is created. Websocket is created to retrieve log of a container + from this pod. Message retrieved form Websocket MUST match with container's output. + release: v1.13 + file: test/e2e/common/pods.go +- testname: Pod liveness probe, using http endpoint, failure + codename: '[k8s.io] Probing container should *not* be restarted with a /healthz + http liveness probe [NodeConformance] [Conformance]' + description: A Pod is created with liveness probe on http endpoint '/'. Liveness + probe on this endpoint will not fail. When liveness probe does not fail then the + restart count MUST remain zero. + release: v1.9 + file: test/e2e/common/container_probe.go +- testname: Pod liveness probe, using local file, no restart + codename: '[k8s.io] Probing container should *not* be restarted with a exec "cat + /tmp/health" liveness probe [NodeConformance] [Conformance]' + description: Pod is created with liveness probe that uses 'exec' command to cat + /temp/health file. Liveness probe MUST not fail to check health and the restart + count should remain 0. + release: v1.9 + file: test/e2e/common/container_probe.go +- testname: Pod liveness probe, using tcp socket, no restart + codename: '[k8s.io] Probing container should *not* be restarted with a tcp:8080 + liveness probe [NodeConformance] [Conformance]' + description: A Pod is created with liveness probe on tcp socket 8080. The http handler + on port 8080 will return http errors after 10 seconds, but the socket will remain + open. Liveness probe MUST not fail to check health and the restart count should + remain 0. + release: v1.18 + file: test/e2e/common/container_probe.go +- testname: Pod liveness probe, using http endpoint, restart + codename: '[k8s.io] Probing container should be restarted with a /healthz http liveness + probe [NodeConformance] [Conformance]' + description: A Pod is created with liveness probe on http endpoint /healthz. The + http handler on the /healthz will return a http error after 10 seconds since the + Pod is started. This MUST result in liveness check failure. The Pod MUST now be + killed and restarted incrementing restart count to 1. + release: v1.9 + file: test/e2e/common/container_probe.go +- testname: Pod liveness probe, using local file, restart + codename: '[k8s.io] Probing container should be restarted with a exec "cat /tmp/health" + liveness probe [NodeConformance] [Conformance]' + description: Create a Pod with liveness probe that uses ExecAction handler to cat + /temp/health file. The Container deletes the file /temp/health after 10 second, + triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing + restart count to 1. + release: v1.9 + file: test/e2e/common/container_probe.go +- testname: Pod liveness probe, using http endpoint, multiple restarts (slow) + codename: '[k8s.io] Probing container should have monotonically increasing restart + count [NodeConformance] [Conformance]' + description: A Pod is created with liveness probe on http endpoint /healthz. The + http handler on the /healthz will return a http error after 10 seconds since the + Pod is started. This MUST result in liveness check failure. The Pod MUST now be + killed and restarted incrementing restart count to 1. The liveness probe must + fail again after restart once the http handler for /healthz enpoind on the Pod + returns an http error after 10 seconds from the start. Restart counts MUST increment + everytime health check fails, measure upto 5 restart. + release: v1.9 + file: test/e2e/common/container_probe.go +- testname: Pod readiness probe, with initial delay + codename: '[k8s.io] Probing container with readiness probe should not be ready before + initial delay and never restart [NodeConformance] [Conformance]' + description: Create a Pod that is configured with a initial delay set on the readiness + probe. Check the Pod Start time to compare to the initial delay. The Pod MUST + be ready only after the specified initial delay. + release: v1.9 + file: test/e2e/common/container_probe.go +- testname: Pod readiness probe, failure + codename: '[k8s.io] Probing container with readiness probe that fails should never + be ready and never restart [NodeConformance] [Conformance]' + description: Create a Pod with a readiness probe that fails consistently. When this + Pod is created, then the Pod MUST never be ready, never be running and restart + count MUST be zero. + release: v1.9 + file: test/e2e/common/container_probe.go +- testname: Security Context, runAsUser=65534 + codename: '[k8s.io] Security Context When creating a container with runAsUser should + run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance]' + description: 'Container is created with runAsUser option by passing uid 65534 to + run as unpriviledged user. Pod MUST be in Succeeded phase. [LinuxOnly]: This test + is marked as LinuxOnly since Windows does not support running as UID / GID.' + release: v1.15 + file: test/e2e/common/security_context.go +- testname: Security Context, privileged=false. + codename: '[k8s.io] Security Context When creating a pod with privileged should + run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance]' + description: 'Create a container to run in unprivileged mode by setting pod''s SecurityContext + Privileged option as false. Pod MUST be in Succeeded phase. [LinuxOnly]: This + test is marked as LinuxOnly since it runs a Linux-specific command.' + release: v1.15 + file: test/e2e/common/security_context.go +- testname: Security Context, readOnlyRootFilesystem=false. + codename: '[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem + should run the container with writable rootfs when readOnlyRootFilesystem=false + [NodeConformance] [Conformance]' + description: Container is configured to run with readOnlyRootFilesystem to false. + Write operation MUST be allowed and Pod MUST be in Succeeded state. + release: v1.15 + file: test/e2e/common/security_context.go +- testname: Security Context, allowPrivilegeEscalation=false. + codename: '[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation + should not allow privilege escalation when false [LinuxOnly] [NodeConformance] + [Conformance]' + description: 'Configuring the allowPrivilegeEscalation to false, does not allow + the privilege escalation operation. A container is configured with allowPrivilegeEscalation=false + and a given uid (1000) which is not 0. When the container is run, container''s + output MUST match with expected output verifying container ran with given uid + i.e. uid=1000. [LinuxOnly]: This test is marked LinuxOnly since Windows does not + support running as UID / GID, or privilege escalation.' + release: v1.15 + file: test/e2e/common/security_context.go +- testname: Environment variables, expansion + codename: '[k8s.io] Variable Expansion should allow composing env vars into new + env vars [NodeConformance] [Conformance]' + description: Create a Pod with environment variables. Environment variables defined + using previously defined environment variables MUST expand to proper values. + release: v1.9 + file: test/e2e/common/expansion.go +- testname: Environment variables, command argument expansion + codename: '[k8s.io] Variable Expansion should allow substituting values in a container''s + args [NodeConformance] [Conformance]' + description: Create a Pod with environment variables and container command arguments + using them. Container command arguments using the defined environment variables + MUST expand to proper values. + release: v1.9 + file: test/e2e/common/expansion.go +- testname: Environment variables, command expansion + codename: '[k8s.io] Variable Expansion should allow substituting values in a container''s + command [NodeConformance] [Conformance]' + description: Create a Pod with environment variables and container command using + them. Container command using the defined environment variables MUST expand to + proper values. + release: v1.9 + file: test/e2e/common/expansion.go +- testname: Pod events, verify event from Scheduler and Kubelet + codename: '[k8s.io] [sig-node] Events should be sent by kubelets and the scheduler + about pods scheduling and running [Conformance]' + description: Create a Pod, make sure that the Pod can be queried. Create a event + selector for the kind=Pod and the source is the Scheduler. List of the events + MUST be at least one. Create a event selector for kind=Pod and the source is the + Kubelet. List of the events MUST be at least one. Both Scheduler and Kubelet MUST + send events when scheduling and running a Pod. + release: v1.9 + file: test/e2e/node/events.go +- testname: Pods, QOS + codename: '[k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class should + be set on Pods with matching resource requests and limits for memory and cpu [Conformance]' + description: Create a Pod with CPU and Memory request and limits. Pod status MUST + have QOSClass set to PodQOSGuaranteed. + release: v1.9 + file: test/e2e/node/pods.go +- testname: Pods, prestop hook + codename: '[k8s.io] [sig-node] PreStop should call prestop when killing a pod [Conformance]' + description: Create a server pod with a rest endpoint '/write' that changes state.Received + field. Create a Pod with a pre-stop handle that posts to the /write endpoint on + the server Pod. Verify that the Pod with pre-stop hook is running. Delete the + Pod with the pre-stop hook. Before the Pod is deleted, pre-stop handler MUST be + called when configured. Verify that the Pod is deleted and a call to prestop hook + is verified by checking the status received on the server Pod. + release: v1.9 + file: test/e2e/node/pre_stop.go +- testname: Admission webhook, list mutating webhooks + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing + mutating webhooks should work [Conformance]' + description: Create 10 mutating webhook configurations, all with a label. Attempt + to list the webhook configurations matching the label; all the created webhook + configurations MUST be present. Attempt to create an object; the object MUST be + mutated. Attempt to remove the webhook configurations matching the label with + deletecollection; all webhook configurations MUST be deleted. Attempt to create + an object; the object MUST NOT be mutated. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, list validating webhooks + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing + validating webhooks should work [Conformance]' + description: Create 10 validating webhook configurations, all with a label. Attempt + to list the webhook configurations matching the label; all the created webhook + configurations MUST be present. Attempt to create an object; the create MUST be + denied. Attempt to remove the webhook configurations matching the label with deletecollection; + all webhook configurations MUST be deleted. Attempt to create an object; the create + MUST NOT be denied. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, update mutating webhook + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating + a mutating webhook should work [Conformance]' + description: Register a mutating admission webhook configuration. Update the webhook + to not apply to the create operation and attempt to create an object; the webhook + MUST NOT mutate the object. Patch the webhook to apply to the create operation + again and attempt to create an object; the webhook MUST mutate the object. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, update validating webhook + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating + a validating webhook should work [Conformance]' + description: Register a validating admission webhook configuration. Update the webhook + to not apply to the create operation and attempt to create an object; the webhook + MUST NOT deny the create. Patch the webhook to apply to the create operation again + and attempt to create an object; the webhook MUST deny the create. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, deny attach + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + be able to deny attaching pod [Conformance]' + description: Register an admission webhook configuration that denies connecting + to a pod's attach sub-resource. Attempts to attach MUST be denied. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, deny custom resource create and delete + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + be able to deny custom resource creation, update and deletion [Conformance]' + description: Register an admission webhook configuration that denies creation, update + and deletion of custom resources. Attempts to create, update and delete custom + resources MUST be denied. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, deny create + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + be able to deny pod and configmap creation [Conformance]' + description: Register an admission webhook configuration that admits pod and configmap. + Attempts to create non-compliant pods and configmaps, or update/patch compliant + pods and configmaps to be non-compliant MUST be denied. An attempt to create a + pod that causes a webhook to hang MUST result in a webhook timeout error, and + the pod creation MUST be denied. An attempt to create a non-compliant configmap + in a whitelisted namespace based on the webhook namespace selector MUST be allowed. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, deny custom resource definition + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + deny crd creation [Conformance]' + description: Register a webhook that denies custom resource definition create. Attempt + to create a custom resource definition; the create request MUST be denied. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, honor timeout + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + honor timeout [Conformance]' + description: Using a webhook that waits 5 seconds before admitting objects, configure + the webhook with combinations of timeouts and failure policy values. Attempt to + create a config map with each combination. Requests MUST timeout if the configured + webhook timeout is less than 5 seconds and failure policy is fail. Requests must + not timeout if the failure policy is ignore. Requests MUST NOT timeout if configured + webhook timeout is 10 seconds (much longer than the webhook wait duration). + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, discovery document + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + include webhook resources in discovery documents [Conformance]' + description: The admissionregistration.k8s.io API group MUST exists in the /apis + discovery document. The admissionregistration.k8s.io/v1 API group/version MUST + exists in the /apis discovery document. The mutatingwebhookconfigurations and + validatingwebhookconfigurations resources MUST exist in the /apis/admissionregistration.k8s.io/v1 + discovery document. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, ordered mutation + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + mutate configmap [Conformance]' + description: Register a mutating webhook configuration with two webhooks that admit + configmaps, one that adds a data key if the configmap already has a specific key, + and another that adds a key if the key added by the first webhook is present. + Attempt to create a config map; both keys MUST be added to the config map. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, mutate custom resource + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + mutate custom resource [Conformance]' + description: Register a webhook that mutates a custom resource. Attempt to create + custom resource object; the custom resource MUST be mutated. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, mutate custom resource with different stored version + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + mutate custom resource with different stored version [Conformance]' + description: Register a webhook that mutates custom resources on create and update. + Register a custom resource definition using v1 as stored version. Create a custom + resource. Patch the custom resource definition to use v2 as the stored version. + Attempt to patch the custom resource with a new field and value; the patch MUST + be applied successfully. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, mutate custom resource with pruning + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + mutate custom resource with pruning [Conformance]' + description: Register mutating webhooks that adds fields to custom objects. Register + a custom resource definition with a schema that includes only one of the data + keys added by the webhooks. Attempt to a custom resource; the fields included + in the schema MUST be present and field not included in the schema MUST NOT be + present. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, mutation with defaulting + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + mutate pod and apply defaults after mutation [Conformance]' + description: Register a mutating webhook that adds an InitContainer to pods. Attempt + to create a pod; the InitContainer MUST be added the TerminationMessagePolicy + MUST be defaulted. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, admission control not allowed on webhook configuration + objects + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + not be able to mutate or prevent deletion of webhook configuration objects [Conformance]' + description: Register webhooks that mutate and deny deletion of webhook configuration + objects. Attempt to create and delete a webhook configuration object; both operations + MUST be allowed and the webhook configuration object MUST NOT be mutated the webhooks. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: Admission webhook, fail closed + codename: '[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should + unconditionally reject operations on fail closed webhook [Conformance]' + description: Register a webhook with a fail closed policy and without CA bundle + so that it cannot be called. Attempt operations that require the admission webhook; + all MUST be denied. + release: v1.16 + file: test/e2e/apimachinery/webhook.go +- testname: aggregator-supports-the-sample-apiserver + codename: '[sig-api-machinery] Aggregator Should be able to support the 1.17 Sample + API Server using the current Aggregator [Conformance]' + description: Ensure that the sample-apiserver code from 1.17 and compiled against + 1.17 will work on the current Aggregator/API-Server. + release: "" + file: test/e2e/apimachinery/aggregator.go +- testname: Custom Resource Definition Conversion Webhook, convert mixed version list + codename: '[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + should be able to convert a non homogeneous list of CRs [Conformance]' + description: Register a conversion webhook and a custom resource definition. Create + a custom resource stored at v1. Change the custom resource definition storage + to v2. Create a custom resource stored at v2. Attempt to list the custom resources + at v2; the list result MUST contain both custom resources at v2. + release: v1.16 + file: test/e2e/apimachinery/crd_conversion_webhook.go +- testname: Custom Resource Definition Conversion Webhook, conversion custom resource + codename: '[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + should be able to convert from CR v1 to CR v2 [Conformance]' + description: Register a conversion webhook and a custom resource definition. Create + a v1 custom resource. Attempts to read it at v2 MUST succeed. + release: v1.16 + file: test/e2e/apimachinery/crd_conversion_webhook.go +- testname: Custom Resource Definition, watch + codename: '[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] + CustomResourceDefinition Watch watch on custom resource definition objects [Conformance]' + description: Create a Custom Resource Definition. Attempt to watch it; the watch + MUST observe create, modify and delete events. + release: v1.16 + file: test/e2e/apimachinery/crd_watch.go +- testname: Custom Resource Definition, create + codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + Simple CustomResourceDefinition creating/deleting custom resource definition objects + works [Conformance]' + description: Create a API extension client and define a random custom resource definition. + Create the custom resource definition and then delete it. The creation and deletion + MUST be successful. + release: v1.9 + file: test/e2e/apimachinery/custom_resource_definition.go +- testname: Custom Resource Definition, status sub-resource + codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + Simple CustomResourceDefinition getting/updating/patching custom resource definition + status sub-resource works [Conformance]' + description: Create a custom resource definition. Attempt to read, update and patch + its status sub-resource; all mutating sub-resource operations MUST be visible + to subsequent reads. + release: v1.16 + file: test/e2e/apimachinery/custom_resource_definition.go +- testname: Custom Resource Definition, list + codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]' + description: Create a API extension client, define 10 labeled custom resource definitions + and list them using a label selector; the list result MUST contain only the labeled + custom resource definitions. Delete the labeled custom resource definitions via + delete collection; the delete MUST be successful and MUST delete only the labeled + custom resource definitions. + release: v1.16 + file: test/e2e/apimachinery/custom_resource_definition.go +- testname: Custom Resource Definition, defaulting + codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + custom resource defaulting for requests and from storage works [Conformance]' + description: Create a custom resource definition without default. Create CR. Add + default and read CR until the default is applied. Create another CR. Remove default, + add default for another field and read CR until new field is defaulted, but old + default stays. + release: v1.17 + file: test/e2e/apimachinery/custom_resource_definition.go +- testname: Custom Resource Definition, discovery + codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + should include custom resource definition resources in discovery documents [Conformance]' + description: Fetch /apis, /apis/apiextensions.k8s.io, and /apis/apiextensions.k8s.io/v1 + discovery documents, and ensure they indicate CustomResourceDefinition apiextensions.k8s.io/v1 + resources are available. + release: v1.16 + file: test/e2e/apimachinery/custom_resource_definition.go +- testname: Custom Resource OpenAPI Publish, stop serving version + codename: '[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + removes definition from spec when one version gets changed to not be served [Conformance]' + description: Register a custom resource definition with multiple versions. OpenAPI + definitions MUST be published for custom resource definitions. Update the custom + resource definition to not serve one of the versions. OpenAPI definitions MUST + be updated to not contain the version that is no longer served. + release: v1.16 + file: test/e2e/apimachinery/crd_publish_openapi.go +- testname: Custom Resource OpenAPI Publish, version rename + codename: '[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + updates the published spec when one version gets renamed [Conformance]' + description: Register a custom resource definition with multiple versions; OpenAPI + definitions MUST be published for custom resource definitions. Rename one of the + versions of the custom resource definition via a patch; OpenAPI definitions MUST + update to reflect the rename. + release: v1.16 + file: test/e2e/apimachinery/crd_publish_openapi.go +- testname: Custom Resource OpenAPI Publish, with x-preserve-unknown-fields at root + codename: '[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD preserving unknown fields at the schema root [Conformance]' + description: Register a custom resource definition with x-preserve-unknown-fields + in the schema root. Attempt to create and apply a change a custom resource, via + kubectl; client-side validation MUST accept unknown properties. Attempt kubectl + explain; the output MUST show the custom resource KIND. + release: v1.16 + file: test/e2e/apimachinery/crd_publish_openapi.go +- testname: Custom Resource OpenAPI Publish, with x-preserve-unknown-fields in embedded + object + codename: '[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD preserving unknown fields in an embedded object [Conformance]' + description: Register a custom resource definition with x-preserve-unknown-fields + in an embedded object. Attempt to create and apply a change a custom resource, + via kubectl; client-side validation MUST accept unknown properties. Attempt kubectl + explain; the output MUST show that x-preserve-unknown-properties is used on the + nested field. + release: v1.16 + file: test/e2e/apimachinery/crd_publish_openapi.go +- testname: Custom Resource OpenAPI Publish, with validation schema + codename: '[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD with validation schema [Conformance]' + description: Register a custom resource definition with a validating schema consisting + of objects, arrays and primitives. Attempt to create and apply a change a custom + resource using valid properties, via kubectl; client-side validation MUST pass. + Attempt both operations with unknown properties and without required properties; + client-side validation MUST reject the operations. Attempt kubectl explain; the + output MUST explain the custom resource properties. Attempt kubectl explain on + custom resource properties; the output MUST explain the nested custom resource + properties. + release: v1.16 + file: test/e2e/apimachinery/crd_publish_openapi.go +- testname: Custom Resource OpenAPI Publish, with x-preserve-unknown-fields in object + codename: '[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD without validation schema [Conformance]' + description: Register a custom resource definition with x-preserve-unknown-fields + in the top level object. Attempt to create and apply a change a custom resource, + via kubectl; client-side validation MUST accept unknown properties. Attempt kubectl + explain; the output MUST contain a valid DESCRIPTION stanza. + release: v1.16 + file: test/e2e/apimachinery/crd_publish_openapi.go +- testname: Custom Resource OpenAPI Publish, varying groups + codename: '[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of different groups [Conformance]' + description: Register multiple custom resource definitions spanning different groups + and versions; OpenAPI definitions MUST be published for custom resource definitions. + release: v1.16 + file: test/e2e/apimachinery/crd_publish_openapi.go +- testname: Custom Resource OpenAPI Publish, varying kinds + codename: '[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of same group and version but different kinds [Conformance]' + description: Register multiple custom resource definitions in the same group and + version but spanning different kinds; OpenAPI definitions MUST be published for + custom resource definitions. + release: v1.16 + file: test/e2e/apimachinery/crd_publish_openapi.go +- testname: Custom Resource OpenAPI Publish, varying versions + codename: '[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of same group but different versions [Conformance]' + description: Register a custom resource definition with multiple versions; OpenAPI + definitions MUST be published for custom resource definitions. + release: v1.16 + file: test/e2e/apimachinery/crd_publish_openapi.go +- testname: Garbage Collector, delete deployment, propagation policy background + codename: '[sig-api-machinery] Garbage collector should delete RS created by deployment + when not orphaning [Conformance]' + description: Create a deployment with a replicaset. Once replicaset is created , + delete the deployment with deleteOptions.PropagationPolicy set to Background. + Deleting the deployment MUST delete the replicaset created by the deployment and + also the Pods that belong to the deployments MUST be deleted. + release: v1.9 + file: test/e2e/apimachinery/garbage_collector.go +- testname: Garbage Collector, delete replication controller, propagation policy background + codename: '[sig-api-machinery] Garbage collector should delete pods created by rc + when not orphaning [Conformance]' + description: Create a replication controller with 2 Pods. Once RC is created and + the first Pod is created, delete RC with deleteOptions.PropagationPolicy set to + Background. Deleting the Replication Controller MUST cause pods created by that + RC to be deleted. + release: v1.9 + file: test/e2e/apimachinery/garbage_collector.go +- testname: Garbage Collector, delete replication controller, after owned pods + codename: '[sig-api-machinery] Garbage collector should keep the rc around until + all its pods are deleted if the deleteOptions says so [Conformance]' + description: Create a replication controller with maximum allocatable Pods between + 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC + with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication + Controller MUST cause pods created by that RC to be deleted before the RC is deleted. + release: v1.9 + file: test/e2e/apimachinery/garbage_collector.go +- testname: Garbage Collector, dependency cycle + codename: '[sig-api-machinery] Garbage collector should not be blocked by dependency + circle [Conformance]' + description: 'TODO: should be an integration test Create three pods, patch them + with Owner references such that pod1 has pod3, pod2 has pod1 and pod3 has pod2 + as owner references respectively. Delete pod1 MUST delete all pods. The dependency + cycle MUST not block the garbage collection.' + release: v1.9 + file: test/e2e/apimachinery/garbage_collector.go +- testname: Garbage Collector, multiple owners + codename: '[sig-api-machinery] Garbage collector should not delete dependents that + have both valid owner and owner that''s waiting for dependents to be deleted [Conformance]' + description: 'TODO: this should be an integration test Create a replication controller + RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second + replication controller RC2 and set RC2 as owner for half of those replicas. Once + RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy + set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted + but have a deletion timestamp. Deleting the Replication Controller MUST not delete + Pods that are owned by multiple replication controllers.' + release: v1.9 + file: test/e2e/apimachinery/garbage_collector.go +- testname: Garbage Collector, delete deployment, propagation policy orphan + codename: '[sig-api-machinery] Garbage collector should orphan RS created by deployment + when deleteOptions.PropagationPolicy is Orphan [Conformance]' + description: Create a deployment with a replicaset. Once replicaset is created , + delete the deployment with deleteOptions.PropagationPolicy set to Orphan. Deleting + the deployment MUST cause the replicaset created by the deployment to be orphaned, + also the Pods created by the deployments MUST be orphaned. + release: v1.9 + file: test/e2e/apimachinery/garbage_collector.go +- testname: Garbage Collector, delete replication controller, propagation policy orphan + codename: '[sig-api-machinery] Garbage collector should orphan pods created by rc + if delete options say so [Conformance]' + description: Create a replication controller with maximum allocatable Pods between + 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC + with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller + MUST cause pods created by that RC to be orphaned. + release: v1.9 + file: test/e2e/apimachinery/garbage_collector.go +- testname: namespace-deletion-removes-pods + codename: '[sig-api-machinery] Namespaces [Serial] should ensure that all pods are + removed when a namespace is deleted [Conformance]' + description: Ensure that if a namespace is deleted then all pods are removed from + that namespace. + release: "" + file: test/e2e/apimachinery/namespace.go +- testname: namespace-deletion-removes-services + codename: '[sig-api-machinery] Namespaces [Serial] should ensure that all services + are removed when a namespace is deleted [Conformance]' + description: Ensure that if a namespace is deleted then all services are removed + from that namespace. + release: "" + file: test/e2e/apimachinery/namespace.go +- testname: Namespace patching + codename: '[sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance]' + description: A Namespace is created. The Namespace is patched. The Namespace and + MUST now include the new Label. + release: v1.18 + file: test/e2e/apimachinery/namespace.go +- testname: ResourceQuota, update and delete + codename: '[sig-api-machinery] ResourceQuota should be able to update and delete + ResourceQuota. [Conformance]' + description: Create a ResourceQuota for CPU and Memory quota limits. Creation MUST + be successful. When ResourceQuota is updated to modify CPU and Memory quota limits, + update MUST succeed with updated values for CPU and Memory limits. When ResourceQuota + is deleted, it MUST not be available in the namespace. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: ResourceQuota, object count quota, configmap + codename: '[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture + the life of a configMap. [Conformance]' + description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus + MUST match to expected used and total allowed resource quota count within namespace. + Create a ConfigMap. Its creation MUST be successful and resource usage count against + the ConfigMap object MUST be captured in ResourceQuotaStatus of the ResourceQuota. + Delete the ConfigMap. Deletion MUST succeed and resource usage count against the + ConfigMap object MUST be released from ResourceQuotaStatus of the ResourceQuota. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: ResourceQuota, object count quota, pod + codename: '[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture + the life of a pod. [Conformance]' + description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus + MUST match to expected used and total allowed resource quota count within namespace. + Create a Pod with resource request count for CPU, Memory, EphemeralStorage and + ExtendedResourceName. Pod creation MUST be successful and respective resource + usage count MUST be captured in ResourceQuotaStatus of the ResourceQuota. Create + another Pod with resource request exceeding remaining quota. Pod creation MUST + fail as the request exceeds ResourceQuota limits. Update the successfully created + pod's resource requests. Updation MUST fail as a Pod can not dynamically update + its resource requirements. Delete the successfully created Pod. Pod Deletion MUST + be scuccessful and it MUST release the allocated resource counts from ResourceQuotaStatus + of the ResourceQuota. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: ResourceQuota, object count quota, replicaSet + codename: '[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture + the life of a replica set. [Conformance]' + description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus + MUST match to expected used and total allowed resource quota count within namespace. + Create a ReplicaSet. Its creation MUST be successful and resource usage count + against the ReplicaSet object MUST be captured in ResourceQuotaStatus of the ResourceQuota. + Delete the ReplicaSet. Deletion MUST succeed and resource usage count against + the ReplicaSet object MUST be released from ResourceQuotaStatus of the ResourceQuota. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: ResourceQuota, object count quota, replicationController + codename: '[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture + the life of a replication controller. [Conformance]' + description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus + MUST match to expected used and total allowed resource quota count within namespace. + Create a ReplicationController. Its creation MUST be successful and resource usage + count against the ReplicationController object MUST be captured in ResourceQuotaStatus + of the ResourceQuota. Delete the ReplicationController. Deletion MUST succeed + and resource usage count against the ReplicationController object MUST be released + from ResourceQuotaStatus of the ResourceQuota. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: ResourceQuota, object count quota, secret + codename: '[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture + the life of a secret. [Conformance]' + description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus + MUST match to expected used and total allowed resource quota count within namespace. + Create a Secret. Its creation MUST be successful and resource usage count against + the Secret object and resourceQuota object MUST be captured in ResourceQuotaStatus + of the ResourceQuota. Delete the Secret. Deletion MUST succeed and resource usage + count against the Secret object MUST be released from ResourceQuotaStatus of the + ResourceQuota. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: ResourceQuota, object count quota, service + codename: '[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture + the life of a service. [Conformance]' + description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus + MUST match to expected used and total allowed resource quota count within namespace. + Create a Service. Its creation MUST be successful and resource usage count against + the Service object and resourceQuota object MUST be captured in ResourceQuotaStatus + of the ResourceQuota. Delete the Service. Deletion MUST succeed and resource usage + count against the Service object MUST be released from ResourceQuotaStatus of + the ResourceQuota. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: ResourceQuota, object count quota, resourcequotas + codename: '[sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure + its status is promptly calculated. [Conformance]' + description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus + MUST match to expected used and total allowed resource quota count within namespace. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: ResourceQuota, quota scope, BestEffort and NotBestEffort scope + codename: '[sig-api-machinery] ResourceQuota should verify ResourceQuota with best + effort scope. [Conformance]' + description: Create two ResourceQuotas, one with 'BestEffort' scope and another + with 'NotBestEffort' scope. Creation MUST be successful and their ResourceQuotaStatus + MUST match to expected used and total allowed resource quota count within namespace. + Create a 'BestEffort' Pod by not explicitly specifying resource limits and requests. + Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus + of 'BestEffort' scoped ResourceQuota but MUST NOT in 'NotBestEffort' scoped ResourceQuota. + Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be + released from ResourceQuotaStatus of 'BestEffort' scoped ResourceQuota. Create + a 'NotBestEffort' Pod by explicitly specifying resource limits and requests. Pod + creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus + of 'NotBestEffort' scoped ResourceQuota but MUST NOT in 'BestEffort' scoped ResourceQuota. + Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be + released from ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: ResourceQuota, quota scope, Terminating and NotTerminating scope + codename: '[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating + scopes. [Conformance]' + description: Create two ResourceQuotas, one with 'Terminating' scope and another + 'NotTerminating' scope. Request and the limit counts for CPU and Memory resources + are set for the ResourceQuota. Creation MUST be successful and their ResourceQuotaStatus + MUST match to expected used and total allowed resource quota count within namespace. + Create a Pod with specified CPU and Memory ResourceRequirements fall within quota + limits. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus + of 'NotTerminating' scoped ResourceQuota but MUST NOT in 'Terminating' scoped + ResourceQuota. Delete the Pod. Pod deletion MUST succeed and Pod resource usage + count MUST be released from ResourceQuotaStatus of 'NotTerminating' scoped ResourceQuota. + Create a pod with specified activeDeadlineSeconds and resourceRequirements for + CPU and Memory fall within quota limits. Pod creation MUST be successful and usage + count MUST be captured in ResourceQuotaStatus of 'Terminating' scoped ResourceQuota + but MUST NOT in 'NotTerminating' scoped ResourceQuota. Delete the Pod. Pod deletion + MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus + of 'Terminating' scoped ResourceQuota. + release: v1.16 + file: test/e2e/apimachinery/resource_quota.go +- testname: Secrets, pod environment field + codename: '[sig-api-machinery] Secrets should be consumable from pods in env vars + [NodeConformance] [Conformance]' + description: Create a secret. Create a Pod with Container that declares a environment + variable which references the secret created to extract a key value from the secret. + Pod MUST have the environment variable that contains proper value for the key + to the secret. + release: v1.9 + file: test/e2e/common/secrets.go +- testname: Secrets, pod environment from source + codename: '[sig-api-machinery] Secrets should be consumable via the environment + [NodeConformance] [Conformance]' + description: Create a secret. Create a Pod with Container that declares a environment + variable using 'EnvFrom' which references the secret created to extract a key + value from the secret. Pod MUST have the environment variable that contains proper + value for the key to the secret. + release: v1.9 + file: test/e2e/common/secrets.go +- testname: Secrets, with empty-key + codename: '[sig-api-machinery] Secrets should fail to create secret due to empty + secret key [Conformance]' + description: Attempt to create a Secret with an empty key. The creation MUST fail. + release: v1.15 + file: test/e2e/common/secrets.go +- testname: Secret patching + codename: '[sig-api-machinery] Secrets should patch a secret [Conformance]' + description: A Secret is created. Listing all Secrets MUST return an empty list. + Given the patching and fetching of the Secret, the fields MUST equal the new values. + The Secret is deleted by it's static Label. Secrets are listed finally, the list + MUST NOT include the originally created Secret. + release: v1.18 + file: test/e2e/common/secrets.go +- testname: API metadata HTTP return + codename: '[sig-api-machinery] Servers with support for Table transformation should + return a 406 for a backend which does not implement metadata [Conformance]' + description: Issue a HTTP request to the API. HTTP request MUST return a HTTP status + code of 406. + release: v1.16 + file: test/e2e/apimachinery/table_conversion.go +- testname: watch-configmaps-closed-and-restarted + codename: '[sig-api-machinery] Watchers should be able to restart watching from + the last resource version observed by the previous watch [Conformance]' + description: Ensure that a watch can be reopened from the last resource version + observed by the previous watch, and it will continue delivering notifications + from that point in time. + release: "" + file: test/e2e/apimachinery/watch.go +- testname: watch-configmaps-from-resource-version + codename: '[sig-api-machinery] Watchers should be able to start watching from a + specific resource version [Conformance]' + description: Ensure that a watch can be opened from a particular resource version + in the past and only notifications happening after that resource version are observed. + release: "" + file: test/e2e/apimachinery/watch.go +- testname: watch-configmaps-with-multiple-watchers + codename: '[sig-api-machinery] Watchers should observe add, update, and delete watch + notifications on configmaps [Conformance]' + description: Ensure that multiple watchers are able to receive all add, update, + and delete notifications on configmaps that match a label selector and do not + receive notifications for configmaps which do not match that label selector. + release: "" + file: test/e2e/apimachinery/watch.go +- testname: watch-configmaps-label-changed + codename: '[sig-api-machinery] Watchers should observe an object deletion if it + stops meeting the requirements of the selector [Conformance]' + description: Ensure that a watched object stops meeting the requirements of a watch's + selector, the watch will observe a delete, and will not observe notifications + for that object until it meets the selector's requirements again. + release: "" + file: test/e2e/apimachinery/watch.go +- testname: watch-consistency + codename: '[sig-api-machinery] Watchers should receive events on concurrent watches + in same order [Conformance]' + description: Ensure that concurrent watches are consistent with each other by initiating + an additional watch for events received from the first watch, initiated at the + resource version of the event, and checking that all resource versions of all + events match. Events are produced from writes on a background goroutine. + release: v1.15 + file: test/e2e/apimachinery/watch.go +- testname: DaemonSet-FailedPodCreation + codename: '[sig-apps] Daemon set [Serial] should retry creating failed daemon pods + [Conformance]' + description: A conformant Kubernetes distribution MUST create new DaemonSet Pods + when they fail. + release: "" + file: test/e2e/apps/daemon_set.go +- testname: DaemonSet-Rollback + codename: '[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts + [Conformance]' + description: A conformant Kubernetes distribution MUST support automated, minimally + disruptive rollback of updates to a DaemonSet. + release: "" + file: test/e2e/apps/daemon_set.go +- testname: DaemonSet-NodeSelection + codename: '[sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance]' + description: A conformant Kubernetes distribution MUST support DaemonSet Pod node + selection via label selectors. + release: "" + file: test/e2e/apps/daemon_set.go +- testname: DaemonSet-Creation + codename: '[sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance]' + description: A conformant Kubernetes distribution MUST support the creation of DaemonSets. + When a DaemonSet Pod is deleted, the DaemonSet controller MUST create a replacement + Pod. + release: "" + file: test/e2e/apps/daemon_set.go +- testname: DaemonSet-RollingUpdate + codename: '[sig-apps] Daemon set [Serial] should update pod when spec was updated + and update strategy is RollingUpdate [Conformance]' + description: A conformant Kubernetes distribution MUST support DaemonSet RollingUpdates. + release: "" + file: test/e2e/apps/daemon_set.go +- testname: Deployment Recreate + codename: '[sig-apps] Deployment RecreateDeployment should delete old pods and create + new ones [Conformance]' + description: A conformant Kubernetes distribution MUST support the Deployment with + Recreate strategy. + release: "" + file: test/e2e/apps/deployment.go +- testname: Deployment RollingUpdate + codename: '[sig-apps] Deployment RollingUpdateDeployment should delete old pods + and create new ones [Conformance]' + description: A conformant Kubernetes distribution MUST support the Deployment with + RollingUpdate strategy. + release: "" + file: test/e2e/apps/deployment.go +- testname: Deployment RevisionHistoryLimit + codename: '[sig-apps] Deployment deployment should delete old replica sets [Conformance]' + description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets + based on the Deployment's `.spec.revisionHistoryLimit`. + release: "" + file: test/e2e/apps/deployment.go +- testname: Deployment Proportional Scaling + codename: '[sig-apps] Deployment deployment should support proportional scaling + [Conformance]' + description: A conformant Kubernetes distribution MUST support Deployment proportional + scaling, i.e. proportionally scale a Deployment's ReplicaSets when a Deployment + is scaled. + release: "" + file: test/e2e/apps/deployment.go +- testname: Deployment Rollover + codename: '[sig-apps] Deployment deployment should support rollover [Conformance]' + description: A conformant Kubernetes distribution MUST support Deployment rollover, + i.e. allow arbitrary number of changes to desired state during rolling update + before the rollout finishes. + release: "" + file: test/e2e/apps/deployment.go +- testname: Jobs, orphan pods, re-adoption + codename: '[sig-apps] Job should adopt matching orphans and release non-matching + pods [Conformance]' + description: Create a parallel job. The number of Pods MUST equal the level of parallelism. + Orphan a Pod by modifying its owner reference. The Job MUST re-adopt the orphan + pod. Modify the labels of one of the Job's Pods. The Job MUST release the Pod. + release: v1.16 + file: test/e2e/apps/job.go +- testname: Jobs, active pods, graceful termination + codename: '[sig-apps] Job should delete a job [Conformance]' + description: Create a job. Ensure the active pods reflect paralellism in the namespace + and delete the job. Job MUST be deleted successfully. + release: v1.15 + file: test/e2e/apps/job.go +- testname: Jobs, completion after task failure + codename: '[sig-apps] Job should run a job to completion when tasks sometimes fail + and are locally restarted [Conformance]' + description: Explicitly cause the tasks to fail once initially. After restarting, + the Job MUST execute to completion. + release: v1.16 + file: test/e2e/apps/job.go +- testname: Replica Set, adopt matching pods and release non matching pods + codename: '[sig-apps] ReplicaSet should adopt matching pods on creation and release + no longer matching pods [Conformance]' + description: A Pod is created, then a Replica Set (RS) whose label selector will + match the Pod. The RS MUST either adopt the Pod or delete and replace it with + a new Pod. When the labels on one of the Pods owned by the RS change to no longer + match the RS's label selector, the RS MUST release the Pod and update the Pod's + owner references + release: v1.13 + file: test/e2e/apps/replica_set.go +- testname: Replica Set, run basic image + codename: '[sig-apps] ReplicaSet should serve a basic image on each replica with + a public image [Conformance]' + description: Create a ReplicaSet with a Pod and a single Container. Make sure that + the Pod is running. Pod SHOULD send a valid response when queried. + release: v1.9 + file: test/e2e/apps/replica_set.go +- testname: Replication Controller, adopt matching pods + codename: '[sig-apps] ReplicationController should adopt matching pods on creation + [Conformance]' + description: An ownerless Pod is created, then a Replication Controller (RC) is + created whose label selector will match the Pod. The RC MUST either adopt the + Pod or delete and replace it with a new Pod + release: v1.13 + file: test/e2e/apps/rc.go +- testname: Replication Controller, release pods + codename: '[sig-apps] ReplicationController should release no longer matching pods + [Conformance]' + description: A Replication Controller (RC) is created, and its Pods are created. + When the labels on one of the Pods change to no longer match the RC's label selector, + the RC MUST release the Pod and update the Pod's owner references. + release: v1.13 + file: test/e2e/apps/rc.go +- testname: Replication Controller, run basic image + codename: '[sig-apps] ReplicationController should serve a basic image on each replica + with a public image [Conformance]' + description: Replication Controller MUST create a Pod with Basic Image and MUST + run the service with the provided image. Image MUST be tested by dialing into + the service listening through TCP, UDP and HTTP. + release: v1.9 + file: test/e2e/apps/rc.go +- testname: Replication Controller, check for issues like exceeding allocated quota + codename: '[sig-apps] ReplicationController should surface a failure condition on + a common issue like exceeded quota [Conformance]' + description: Attempt to create a Replication Controller with pods exceeding the + namespace quota. The creation MUST fail + release: v1.15 + file: test/e2e/apps/rc.go +- testname: StatefulSet, Burst Scaling + codename: '[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]' + description: StatefulSet MUST support the Parallel PodManagementPolicy for burst + scaling. This test does not depend on a preexisting default StorageClass or a + dynamic provisioner. + release: v1.9 + file: test/e2e/apps/statefulset.go +- testname: StatefulSet, Scaling + codename: '[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Scaling should happen in predictable order and halt if any stateful pod is unhealthy + [Slow] [Conformance]' + description: StatefulSet MUST create Pods in ascending order by ordinal index when + scaling up, and delete Pods in descending order when scaling down. Scaling up + or down MUST pause if any Pods belonging to the StatefulSet are unhealthy. This + test does not depend on a preexisting default StorageClass or a dynamic provisioner. + release: v1.9 + file: test/e2e/apps/statefulset.go +- testname: StatefulSet, Recreate Failed Pod + codename: '[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Should recreate evicted statefulset [Conformance]' + description: StatefulSet MUST delete and recreate Pods it owns that go into a Failed + state, such as when they are rejected or evicted by a Node. This test does not + depend on a preexisting default StorageClass or a dynamic provisioner. + release: v1.9 + file: test/e2e/apps/statefulset.go +- testname: StatefulSet resource Replica scaling + codename: '[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + should have a working scale subresource [Conformance]' + description: Create a StatefulSet resource. Newly created StatefulSet resource MUST + have a scale of one. Bring the scale of the StatefulSet resource up to two. StatefulSet + scale MUST be at two replicas. + release: v1.16 + file: test/e2e/apps/statefulset.go +- testname: StatefulSet, Rolling Update with Partition + codename: '[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + should perform canary updates and phased rolling updates of template modifications + [Conformance]' + description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter + for canaries and phased rollouts. If a Pod is deleted while a rolling update is + in progress, StatefulSet MUST restore the Pod without violating the Partition. + This test does not depend on a preexisting default StorageClass or a dynamic provisioner. + release: v1.9 + file: test/e2e/apps/statefulset.go +- testname: StatefulSet, Rolling Update + codename: '[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + should perform rolling updates and roll backs of template modifications [Conformance]' + description: StatefulSet MUST support the RollingUpdate strategy to automatically + replace Pods one at a time when the Pod template changes. The StatefulSet's status + MUST indicate the CurrentRevision and UpdateRevision. If the template is changed + to match a prior revision, StatefulSet MUST detect this as a rollback instead + of creating a new revision. This test does not depend on a preexisting default + StorageClass or a dynamic provisioner. + release: v1.9 + file: test/e2e/apps/statefulset.go +- testname: Service account tokens auto mount optionally + codename: '[sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance]' + description: Ensure that Service Account keys are mounted into the Pod only when + AutoMountServiceToken is not set to false. We test the following scenarios here. + 1. Create Pod, Pod Spec has AutomountServiceAccountToken set to nil a) Service + Account with default value, b) Service Account is an configured AutomountServiceAccountToken + set to true, c) Service Account is an configured AutomountServiceAccountToken + set to false 2. Create Pod, Pod Spec has AutomountServiceAccountToken set to true + a) Service Account with default value, b) Service Account is configured with AutomountServiceAccountToken + set to true, c) Service Account is configured with AutomountServiceAccountToken + set to false 3. Create Pod, Pod Spec has AutomountServiceAccountToken set to false + a) Service Account with default value, b) Service Account is configured with AutomountServiceAccountToken + set to true, c) Service Account is configured with AutomountServiceAccountToken + set to false The Containers running in these pods MUST verify that the ServiceTokenVolume + path is auto mounted only when Pod Spec has AutomountServiceAccountToken not set + to false and ServiceAccount object has AutomountServiceAccountToken not set to + false, this include test cases 1a,1b,2a,2b and 2c. In the test cases 1c,3a,3b + and 3c the ServiceTokenVolume MUST not be auto mounted. + release: v1.9 + file: test/e2e/auth/service_accounts.go +- testname: Service Account Tokens Must AutoMount + codename: '[sig-auth] ServiceAccounts should mount an API token into pods [Conformance]' + description: Ensure that Service Account keys are mounted into the Container. Pod + contains three containers each will read Service Account token, root CA and default + namespace respectively from the default API Token Mount path. All these three + files MUST exist and the Service Account mount path MUST be auto mounted to the + Container. + release: v1.9 + file: test/e2e/auth/service_accounts.go +- testname: Kubectl, guestbook application + codename: '[sig-cli] Kubectl client Guestbook application should create and stop + a working application [Conformance]' + description: Create Guestbook application that contains an agnhost master server, + 2 agnhost slaves, frontend application, frontend service and agnhost master service + and agnhost slave service. Using frontend service, the test will write an entry + into the guestbook application which will store the entry into the backend agnhost + store. Application flow MUST work as expected and the data written MUST be available + to read. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, check version v1 + codename: '[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in + available api versions [Conformance]' + description: Run kubectl to get api versions, output MUST contain returned versions + with 'v1' listed. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, cluster info + codename: '[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes + master services is included in cluster-info [Conformance]' + description: Call kubectl to get cluster-info, output MUST contain cluster-info + returned and Kubernetes Master SHOULD be running. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, describe pod or rc + codename: '[sig-cli] Kubectl client Kubectl describe should check if kubectl describe + prints relevant information for rc and pods [Conformance]' + description: Deploy an agnhost controller and an agnhost service. Kubectl describe + pods SHOULD return the name, namespace, labels, state and other information as + expected. Kubectl describe on rc, service, node and namespace SHOULD also return + proper information. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, create service, replication controller + codename: '[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]' + description: Create a Pod running agnhost listening to port 6379. Using kubectl + expose the agnhost master replication controllers at port 1234. Validate that + the replication controller is listening on port 1234 and the target port is set + to 6379, port that agnhost master is listening. Using kubectl expose the agnhost + master as a service at port 2345. The service MUST be listening on port 2345 and + the target port is set to 6379, port that agnhost master is listening. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, label update + codename: '[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]' + description: When a Pod is running, update a Label using 'kubectl label' command. + The label MUST be created in the Pod. A 'kubectl get pod' with -l option on the + container MUST verify that the label can be read back. Use 'kubectl label label-' + to remove the label. 'kubectl get pod' with -l option SHOULD not list the deleted + label as the label is removed. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, logs + codename: '[sig-cli] Kubectl client Kubectl logs should be able to retrieve and + filter logs [Conformance]' + description: When a Pod is running then it MUST generate logs. Starting a Pod should + have a expected log line. Also log command options MUST work as expected and described + below. 'kubectl logs -tail=1' should generate a output of one line, the last line + in the log. 'kubectl --limit-bytes=1' should generate a single byte output. 'kubectl + --tail=1 --timestamp should generate one line with timestamp in RFC3339 format + 'kubectl --since=1s' should output logs that are only 1 second older from now + 'kubectl --since=24h' should output logs that are only 1 day older from now + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, patch to annotate + codename: '[sig-cli] Kubectl client Kubectl patch should add annotations for pods + in rc [Conformance]' + description: Start running agnhost and a replication controller. When the pod is + running, using 'kubectl patch' command add annotations. The annotation MUST be + added to running pods and SHOULD be able to read added annotations from each of + the Pods running under the replication controller. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, replace + codename: '[sig-cli] Kubectl client Kubectl replace should update a single-container + pod''s image [Conformance]' + description: Command 'kubectl replace' on a existing Pod with a new spec MUST update + the image of the container running in the Pod. A -f option to 'kubectl replace' + SHOULD force to re-create the resource. The new Pod SHOULD have the container + with new change to the image. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, run pod + codename: '[sig-cli] Kubectl client Kubectl run pod should create a pod from an + image when restart is Never [Conformance]' + description: Command 'kubectl run' MUST create a pod, when a image name is specified + in the run command. After the run command there SHOULD be a pod that should exist + with one container running the specified image. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, version + codename: '[sig-cli] Kubectl client Kubectl version should check is all data is + printed [Conformance]' + description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, + etc of the Client and the Server that the kubectl is configured to connect to. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, proxy socket + codename: '[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]' + description: Start a proxy server on by running 'kubectl proxy' with --unix-socket=. Call the proxy server by requesting api versions from http://locahost:0/api. + The proxy server MUST provide at least one version string + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, proxy port zero + codename: '[sig-cli] Kubectl client Proxy server should support proxy with --port + 0 [Conformance]' + description: 'TODO: test proxy options (static, prefix, etc) Start a proxy server + on port zero by running ''kubectl proxy'' with --port=0. Call the proxy server + by requesting api versions from unix socket. The proxy server MUST provide at + least one version string.' + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, replication controller + codename: '[sig-cli] Kubectl client Update Demo should create and stop a replication + controller [Conformance]' + description: Create a Pod and a container with a given image. Configure replication + controller to run 2 replicas. The number of running instances of the Pod MUST + equal the number of replicas set on the replication controller which is 2. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: Kubectl, scale replication controller + codename: '[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]' + description: Create a Pod and a container with a given image. Configure replication + controller to run 2 replicas. The number of running instances of the Pod MUST + equal the number of replicas set on the replication controller which is 2. Update + the replicaset to 1. Number of running instances of the Pod MUST be 1. Update + the replicaset to 2. Number of running instances of the Pod MUST be 2. + release: v1.9 + file: test/e2e/kubectl/kubectl.go +- testname: DNS, cluster + codename: '[sig-network] DNS should provide /etc/hosts entries for the cluster [LinuxOnly] + [Conformance]' + description: When a Pod is created, the pod MUST be able to resolve cluster dns + entries such as kubernetes.default via /etc/hosts. + release: v1.14 + file: test/e2e/network/dns.go +- testname: DNS, for ExternalName Services + codename: '[sig-network] DNS should provide DNS for ExternalName services [Conformance]' + description: Create a service with externalName. Pod MUST be able to resolve the + address for this service via CNAME. When externalName of this service is changed, + Pod MUST resolve to new DNS entry for the service. Change the service type from + externalName to ClusterIP, Pod MUST resolve DNS to the service by serving A records. + release: v1.15 + file: test/e2e/network/dns.go +- testname: DNS, resolve the hostname + codename: '[sig-network] DNS should provide DNS for pods for Hostname [LinuxOnly] + [Conformance]' + description: Create a headless service with label. Create a Pod with label to match + service's label, with hostname and a subdomain same as service name. Pod MUST + be able to resolve its fully qualified domain name as well as hostname by serving + an A record at that name. + release: v1.15 + file: test/e2e/network/dns.go +- testname: DNS, resolve the subdomain + codename: '[sig-network] DNS should provide DNS for pods for Subdomain [Conformance]' + description: Create a headless service with label. Create a Pod with label to match + service's label, with hostname and a subdomain same as service name. Pod MUST + be able to resolve its fully qualified domain name as well as subdomain by serving + an A record at that name. + release: v1.15 + file: test/e2e/network/dns.go +- testname: DNS, services + codename: '[sig-network] DNS should provide DNS for services [Conformance]' + description: When a headless service is created, the service MUST be able to resolve + all the required service endpoints. When the service is created, any pod in the + same namespace must be able to resolve the service by all of the expected DNS + names. + release: v1.9 + file: test/e2e/network/dns.go +- testname: DNS, cluster + codename: '[sig-network] DNS should provide DNS for the cluster [Conformance]' + description: When a Pod is created, the pod MUST be able to resolve cluster dns + entries such as kubernetes.default via DNS. + release: v1.9 + file: test/e2e/network/dns.go +- testname: DNS, PQDN for services + codename: '[sig-network] DNS should resolve DNS of partial qualified names for services + [LinuxOnly] [Conformance]' + description: 'Create a headless service and normal service. Both the services MUST + be able to resolve partial qualified DNS entries of their service endpoints by + serving A records and SRV records. [LinuxOnly]: As Windows currently does not + support resolving PQDNs.' + release: v1.17 + file: test/e2e/network/dns.go +- testname: DNS, custom dnsConfig + codename: '[sig-network] DNS should support configurable pod DNS nameservers [Conformance]' + description: Create a Pod with DNSPolicy as None and custom DNS configuration, specifying + nameservers and search path entries. Pod creation MUST be successful and provided + DNS configuration MUST be configured in the Pod. + release: v1.17 + file: test/e2e/network/dns.go +- testname: Networking, intra pod http + codename: '[sig-network] Networking Granular Checks: Pods should function for intra-pod + communication: http [NodeConformance] [Conformance]' + description: Try to hit all endpoints through a test container, retry 5 times, expect + exactly one unique hostname. Each of these endpoints reports its own hostname. + Create a hostexec pod that is capable of curl to netcat commands. Create a test + Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 + for udp. The netserver service proxies are created on specified number of nodes. + The kubectl exec on the webserver container MUST reach a http port on the each + of service proxy endpoints in the cluster and the request MUST be successful. + Container will execute curl command to reach the service port within specified + max retry limit and MUST result in reporting unique hostnames. + release: v1.9, v1.18 + file: test/e2e/common/networking.go +- testname: Networking, intra pod udp + codename: '[sig-network] Networking Granular Checks: Pods should function for intra-pod + communication: udp [NodeConformance] [Conformance]' + description: Create a hostexec pod that is capable of curl to netcat commands. Create + a test Pod that will act as a webserver front end exposing ports 8080 for tcp + and 8081 for udp. The netserver service proxies are created on specified number + of nodes. The kubectl exec on the webserver container MUST reach a udp port on + the each of service proxy endpoints in the cluster and the request MUST be successful. + Container will execute curl command to reach the service port within specified + max retry limit and MUST result in reporting unique hostnames. + release: v1.9, v1.18 + file: test/e2e/common/networking.go +- testname: Networking, intra pod http, from node + codename: '[sig-network] Networking Granular Checks: Pods should function for node-pod + communication: http [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a hostexec pod that is capable of curl to netcat commands. Create + a test Pod that will act as a webserver front end exposing ports 8080 for tcp + and 8081 for udp. The netserver service proxies are created on specified number + of nodes. The kubectl exec on the webserver container MUST reach a http port on + the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and + the request MUST be successful. Container will execute curl command to reach the + service port within specified max retry limit and MUST result in reporting unique + hostnames. This test is marked LinuxOnly since HostNetwork is not supported on + other platforms like Windows. + release: v1.9 + file: test/e2e/common/networking.go +- testname: Networking, intra pod http, from node + codename: '[sig-network] Networking Granular Checks: Pods should function for node-pod + communication: udp [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a hostexec pod that is capable of curl to netcat commands. Create + a test Pod that will act as a webserver front end exposing ports 8080 for tcp + and 8081 for udp. The netserver service proxies are created on specified number + of nodes. The kubectl exec on the webserver container MUST reach a http port on + the each of service proxy endpoints in the cluster using a http post(protocol=udp) and + the request MUST be successful. Container will execute curl command to reach the + service port within specified max retry limit and MUST result in reporting unique + hostnames. This test is marked LinuxOnly since HostNetwork is not supported on + other platforms like Windows. + release: v1.9 + file: test/e2e/common/networking.go +- testname: Proxy, logs endpoint + codename: '[sig-network] Proxy version v1 should proxy logs on node using proxy + subresource [Conformance]' + description: Select any node in the cluster to invoke /proxy/nodes///logs + endpoint. This endpoint MUST be reachable. + release: v1.9 + file: test/e2e/network/proxy.go +- testname: Proxy, logs port endpoint + codename: '[sig-network] Proxy version v1 should proxy logs on node with explicit + kubelet port using proxy subresource [Conformance]' + description: Select any node in the cluster to invoke /proxy/nodes/:10250/logs + endpoint. This endpoint MUST be reachable. + release: v1.9 + file: test/e2e/network/proxy.go +- testname: Proxy, logs service endpoint + codename: '[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance]' + description: using the porter image to serve content, access the content (of multiple + pods?) from multiple (endpoints/services?) Select any node in the cluster to invoke /logs + endpoint using the /nodes/proxy subresource from the kubelet port. This endpoint + MUST be reachable. + release: v1.9 + file: test/e2e/network/proxy.go +- testname: Service endpoint latency, thresholds + codename: '[sig-network] Service endpoints latency should not be very high [Conformance]' + description: Run 100 iterations of create service with the Pod running the pause + image, measure the time it takes for creating the service and the endpoint with + the service name is available. These durations are captured for 100 iterations, + then the durations are sorted to compute 50th, 90th and 99th percentile. The single + server latency MUST not exceed liberally set thresholds of 20s for 50th percentile + and 50s for the 90th percentile. + release: v1.9 + file: test/e2e/network/service_latency.go +- testname: Service, change type, ClusterIP to ExternalName + codename: '[sig-network] Services should be able to change the type from ClusterIP + to ExternalName [Conformance]' + description: Create a service of type ClusterIP. Service creation MUST be successful + by assigning ClusterIP to the service. Update service type from ClusterIP to ExternalName + by setting CNAME entry as externalName. Service update MUST be successful and + service MUST not has associated ClusterIP. Service MUST be able to resolve to + IP address by returning A records ensuring service is pointing to provided externalName. + release: v1.16 + file: test/e2e/network/service.go +- testname: Service, change type, ExternalName to ClusterIP + codename: '[sig-network] Services should be able to change the type from ExternalName + to ClusterIP [Conformance]' + description: Create a service of type ExternalName, pointing to external DNS. ClusterIP + MUST not be assigned to the service. Update the service from ExternalName to ClusterIP + by removing ExternalName entry, assigning port 80 as service port and TCP as protocol. + Service update MUST be successful by assigning ClusterIP to the service and it + MUST be reachable over serviceName and ClusterIP on provided service port. + release: v1.16 + file: test/e2e/network/service.go +- testname: Service, change type, ExternalName to NodePort + codename: '[sig-network] Services should be able to change the type from ExternalName + to NodePort [Conformance]' + description: Create a service of type ExternalName, pointing to external DNS. ClusterIP + MUST not be assigned to the service. Update the service from ExternalName to NodePort, + assigning port 80 as service port and, TCP as protocol. service update MUST be + successful by exposing service on every node's IP on dynamically assigned NodePort + and, ClusterIP MUST be assigned to route service requests. Service MUST be reachable + over serviceName and the ClusterIP on servicePort. Service MUST also be reachable + over node's IP on NodePort. + release: v1.16 + file: test/e2e/network/service.go +- testname: Service, change type, NodePort to ExternalName + codename: '[sig-network] Services should be able to change the type from NodePort + to ExternalName [Conformance]' + description: Create a service of type NodePort. Service creation MUST be successful + by exposing service on every node's IP on dynamically assigned NodePort and, ClusterIP + MUST be assigned to route service requests. Update the service type from NodePort + to ExternalName by setting CNAME entry as externalName. Service update MUST be + successful and, MUST not has ClusterIP associated with the service and, allocated + NodePort MUST be released. Service MUST be able to resolve to IP address by returning + A records ensuring service is pointing to provided externalName. + release: v1.16 + file: test/e2e/network/service.go +- testname: Service, NodePort Service + codename: '[sig-network] Services should be able to create a functioning NodePort + service [Conformance]' + description: Create a TCP NodePort service, and test reachability from a client + Pod. The client Pod MUST be able to access the NodePort service by service name + and cluster IP on the service port, and on nodes' internal and external IPs on + the NodePort. + release: v1.16 + file: test/e2e/network/service.go +- testname: Find Kubernetes Service in default Namespace + codename: '[sig-network] Services should find a service from listing all namespaces + [Conformance]' + description: List all Services in all Namespaces, response MUST include a Service + named Kubernetes with the Namespace of default. + release: v1.18 + file: test/e2e/network/service.go +- testname: Kubernetes Service + codename: '[sig-network] Services should provide secure master service [Conformance]' + description: By default when a kubernetes cluster is running there MUST be a 'kubernetes' + service running in the cluster. + release: v1.9 + file: test/e2e/network/service.go +- testname: Service, endpoints + codename: '[sig-network] Services should serve a basic endpoint from pods [Conformance]' + description: Create a service with a endpoint without any Pods, the service MUST + run and show empty endpoints. Add a pod to the service and the service MUST validate + to show all the endpoints for the ports exposed by the Pod. Add another Pod then + the list of all Ports exposed by both the Pods MUST be valid and have corresponding + service endpoint. Once the second Pod is deleted then set of endpoint MUST be + validated to show only ports from the first container that are exposed. Once both + pods are deleted the endpoints from the service MUST be empty. + release: v1.9 + file: test/e2e/network/service.go +- testname: Service, endpoints with multiple ports + codename: '[sig-network] Services should serve multiport endpoints from pods [Conformance]' + description: Create a service with two ports but no Pods are added to the service + yet. The service MUST run and show empty set of endpoints. Add a Pod to the first + port, service MUST list one endpoint for the Pod on that port. Add another Pod + to the second port, service MUST list both the endpoints. Delete the first Pod + and the service MUST list only the endpoint to the second Pod. Delete the second + Pod and the service must now have empty set of endpoints. + release: v1.9 + file: test/e2e/network/service.go +- testname: ConfigMap, from environment field + codename: '[sig-node] ConfigMap should be consumable via environment variable [NodeConformance] + [Conformance]' + description: Create a Pod with an environment variable value set using a value from + ConfigMap. A ConfigMap value MUST be accessible in the container environment. + release: v1.9 + file: test/e2e/common/configmap.go +- testname: ConfigMap, from environment variables + codename: '[sig-node] ConfigMap should be consumable via the environment [NodeConformance] + [Conformance]' + description: Create a Pod with a environment source from ConfigMap. All ConfigMap + values MUST be available as environment variables in the container. + release: v1.9 + file: test/e2e/common/configmap.go +- testname: ConfigMap, with empty-key + codename: '[sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance]' + description: Attempt to create a ConfigMap with an empty key. The creation MUST + fail. + release: v1.14 + file: test/e2e/common/configmap.go +- testname: DownwardAPI, environment for CPU and memory limits and requests + codename: '[sig-node] Downward API should provide container''s limits.cpu/memory + and requests.cpu/memory as env vars [NodeConformance] [Conformance]' + description: Downward API MUST expose CPU request and Memory request set through + environment variables at runtime in the container. + release: v1.9 + file: test/e2e/common/downward_api.go +- testname: DownwardAPI, environment for default CPU and memory limits and requests + codename: '[sig-node] Downward API should provide default limits.cpu/memory from + node allocatable [NodeConformance] [Conformance]' + description: Downward API MUST expose CPU request and Memory limits set through + environment variables at runtime in the container. + release: v1.9 + file: test/e2e/common/downward_api.go +- testname: DownwardAPI, environment for host ip + codename: '[sig-node] Downward API should provide host IP as an env var [NodeConformance] + [Conformance]' + description: Downward API MUST expose Pod and Container fields as environment variables. + Specify host IP as environment variable in the Pod Spec are visible at runtime + in the container. + release: v1.9 + file: test/e2e/common/downward_api.go +- testname: DownwardAPI, environment for Pod UID + codename: '[sig-node] Downward API should provide pod UID as env vars [NodeConformance] + [Conformance]' + description: Downward API MUST expose Pod UID set through environment variables + at runtime in the container. + release: v1.9 + file: test/e2e/common/downward_api.go +- testname: DownwardAPI, environment for name, namespace and ip + codename: '[sig-node] Downward API should provide pod name, namespace and IP address + as env vars [NodeConformance] [Conformance]' + description: Downward API MUST expose Pod and Container fields as environment variables. + Specify Pod Name, namespace and IP as environment variable in the Pod Spec are + visible at runtime in the container. + release: v1.9 + file: test/e2e/common/downward_api.go +- testname: LimitRange, resources + codename: '[sig-scheduling] LimitRange should create a LimitRange with defaults + and ensure pod has those defaults applied. [Conformance]' + description: Creating a Limitrange and verifying the creation of Limitrange, updating + the Limitrange and validating the Limitrange. Creating Pods with resources and + validate the pod resources are applied to the Limitrange + release: v1.18 + file: test/e2e/scheduling/limit_range.go +- testname: Pod Eviction, Toleration limits + codename: '[sig-scheduling] NoExecuteTaintManager Multiple Pods [Serial] evicts + pods with minTolerationSeconds [Disruptive] [Conformance]' + description: In a multi-pods scenario with tolerationSeconds, the pods MUST be evicted + as per the toleration time limit. + release: v1.16 + file: test/e2e/scheduling/taints.go +- testname: Taint, Pod Eviction on taint removal + codename: '[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] removing taint + cancels eviction [Disruptive] [Conformance]' + description: The Pod with toleration timeout scheduled on a tainted Node MUST not + be evicted if the taint is removed before toleration time ends. + release: v1.16 + file: test/e2e/scheduling/taints.go +- testname: Scheduler, resource limits + codename: '[sig-scheduling] SchedulerPredicates [Serial] validates resource limits + of pods that are allowed to run [Conformance]' + description: 'This test verifies we don''t allow scheduling of pods in a way that + sum of resource requests of pods is greater than machines capacity. It assumes + that cluster add-on pods stay stable and cannot be run in parallel with any other + test that touches Nodes or Pods. It is so because we need to have precise control + on what''s running in the cluster. Test scenario: 1. Find the amount CPU resources + on each node. 2. Create one pod with affinity to each node that uses 70% of the + node CPU. 3. Wait for the pods to be scheduled. 4. Create another pod with no + affinity to any node that need 50% of the largest node CPU. 5. Make sure this + additional pod is not scheduled. Scheduling Pods MUST fail if the resource requests + exceed Machine capacity.' + release: v1.9 + file: test/e2e/scheduling/predicates.go +- testname: Scheduler, node selector matching + codename: '[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector + is respected if matching [Conformance]' + description: 'Create a label on the node {k: v}. Then create a Pod with a NodeSelector + set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches + then Pod MUST be scheduled on that node.' + release: v1.9 + file: test/e2e/scheduling/predicates.go +- testname: Scheduler, node selector not matching + codename: '[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector + is respected if not matching [Conformance]' + description: Test Nodes does not have any label, hence it should be impossible to + schedule Pod with nonempty Selector set. Create a Pod with a NodeSelector set + to a value that does not match a node in the cluster. Since there are no nodes + matching the criteria the Pod MUST not be scheduled. + release: v1.9 + file: test/e2e/scheduling/predicates.go +- testname: Scheduling, HostPort and Protocol match, HostIPs different but one is + default HostIP (0.0.0.0) + codename: '[sig-scheduling] SchedulerPredicates [Serial] validates that there exists + conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP + [Conformance]' + description: Pods with the same HostPort and Protocol, but different HostIPs, MUST + NOT schedule to the same node if one of those IPs is the default HostIP of 0.0.0.0, + which represents all IPs on the host. + release: v1.16 + file: test/e2e/scheduling/predicates.go +- testname: Scheduling, HostPort matching and HostIP and Protocol not-matching + codename: '[sig-scheduling] SchedulerPredicates [Serial] validates that there is + no conflict between pods with same hostPort but different hostIP and protocol + [Conformance]' + description: Pods with the same HostPort value MUST be able to be scheduled to the + same node if the HostIP or Protocol is different. + release: v1.16 + file: test/e2e/scheduling/predicates.go +- testname: ConfigMap Volume, text data, binary data + codename: '[sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] + [Conformance]' + description: The ConfigMap that is created with text data and binary data MUST be + accessible to read from the newly created Pod using the volume mount that is mapped + to custom path in the Pod. ConfigMap's text data and binary data MUST be verified + by reading the content from the mounted files in the Pod. + release: v1.12 + file: test/e2e/common/configmap_volume.go +- testname: ConfigMap Volume, create, update and delete + codename: '[sig-storage] ConfigMap optional updates should be reflected in volume + [NodeConformance] [Conformance]' + description: The ConfigMap that is created MUST be accessible to read from the newly + created Pod using the volume mount that is mapped to custom path in the Pod. When + the config map is updated the change to the config map MUST be verified by reading + the content from the mounted file in the Pod. Also when the item(file) is deleted + from the map that MUST result in a error reading that item(file). + release: v1.9 + file: test/e2e/common/configmap_volume.go +- testname: ConfigMap Volume, without mapping + codename: '[sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] + [Conformance]' + description: Create a ConfigMap, create a Pod that mounts a volume and populates + the volume with data stored in the ConfigMap. The ConfigMap that is created MUST + be accessible to read from the newly created Pod using the volume mount. The data + content of the file MUST be readable and verified and file modes MUST default + to 0x644. + release: v1.9 + file: test/e2e/common/configmap_volume.go +- testname: ConfigMap Volume, without mapping, non-root user + codename: '[sig-storage] ConfigMap should be consumable from pods in volume as non-root + [NodeConformance] [Conformance]' + description: Create a ConfigMap, create a Pod that mounts a volume and populates + the volume with data stored in the ConfigMap. Pod is run as a non-root user with + uid=1000. The ConfigMap that is created MUST be accessible to read from the newly + created Pod using the volume mount. The file on the volume MUST have file mode + set to default value of 0x644. + release: v1.9 + file: test/e2e/common/configmap_volume.go +- testname: ConfigMap Volume, without mapping, volume mode set + codename: '[sig-storage] ConfigMap should be consumable from pods in volume with + defaultMode set [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a ConfigMap, create a Pod that mounts a volume and populates + the volume with data stored in the ConfigMap. File mode is changed to a custom + value of '0x400'. The ConfigMap that is created MUST be accessible to read from + the newly created Pod using the volume mount. The data content of the file MUST + be readable and verified and file modes MUST be set to the custom value of '0x400' + This test is marked LinuxOnly since Windows does not support setting specific + file permissions. + release: v1.9 + file: test/e2e/common/configmap_volume.go +- testname: ConfigMap Volume, with mapping + codename: '[sig-storage] ConfigMap should be consumable from pods in volume with + mappings [NodeConformance] [Conformance]' + description: Create a ConfigMap, create a Pod that mounts a volume and populates + the volume with data stored in the ConfigMap. Files are mapped to a path in the + volume. The ConfigMap that is created MUST be accessible to read from the newly + created Pod using the volume mount. The data content of the file MUST be readable + and verified and file modes MUST default to 0x644. + release: v1.9 + file: test/e2e/common/configmap_volume.go +- testname: ConfigMap Volume, with mapping, volume mode set + codename: '[sig-storage] ConfigMap should be consumable from pods in volume with + mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a ConfigMap, create a Pod that mounts a volume and populates + the volume with data stored in the ConfigMap. Files are mapped to a path in the + volume. File mode is changed to a custom value of '0x400'. The ConfigMap that + is created MUST be accessible to read from the newly created Pod using the volume + mount. The data content of the file MUST be readable and verified and file modes + MUST be set to the custom value of '0x400' This test is marked LinuxOnly since + Windows does not support setting specific file permissions. + release: v1.9 + file: test/e2e/common/configmap_volume.go +- testname: ConfigMap Volume, with mapping, non-root user + codename: '[sig-storage] ConfigMap should be consumable from pods in volume with + mappings as non-root [NodeConformance] [Conformance]' + description: Create a ConfigMap, create a Pod that mounts a volume and populates + the volume with data stored in the ConfigMap. Files are mapped to a path in the + volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created + MUST be accessible to read from the newly created Pod using the volume mount. + The file on the volume MUST have file mode set to default value of 0x644. + release: v1.9 + file: test/e2e/common/configmap_volume.go +- testname: ConfigMap Volume, multiple volume maps + codename: '[sig-storage] ConfigMap should be consumable in multiple volumes in the + same pod [NodeConformance] [Conformance]' + description: The ConfigMap that is created MUST be accessible to read from the newly + created Pod using the volume mount that is mapped to multiple paths in the Pod. + The content MUST be accessible from all the mapped volume mounts. + release: v1.9 + file: test/e2e/common/configmap_volume.go +- testname: ConfigMap Volume, update + codename: '[sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] + [Conformance]' + description: The ConfigMap that is created MUST be accessible to read from the newly + created Pod using the volume mount that is mapped to custom path in the Pod. When + the ConfigMap is updated the change to the config map MUST be verified by reading + the content from the mounted file in the Pod. + release: v1.9 + file: test/e2e/common/configmap_volume.go +- testname: DownwardAPI volume, CPU limits + codename: '[sig-storage] Downward API volume should provide container''s cpu limit + [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains a item for the CPU limits. The container runtime MUST be able to access + CPU limits from the specified path on the mounted volume. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, CPU request + codename: '[sig-storage] Downward API volume should provide container''s cpu request + [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains a item for the CPU request. The container runtime MUST be able to access + CPU request from the specified path on the mounted volume. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, memory limits + codename: '[sig-storage] Downward API volume should provide container''s memory + limit [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains a item for the memory limits. The container runtime MUST be able to access + memory limits from the specified path on the mounted volume. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, memory request + codename: '[sig-storage] Downward API volume should provide container''s memory + request [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains a item for the memory request. The container runtime MUST be able to + access memory request from the specified path on the mounted volume. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, CPU limit, default node allocatable + codename: '[sig-storage] Downward API volume should provide node allocatable (cpu) + as default cpu limit if the limit is not set [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains a item for the CPU limits. CPU limits is not specified for the container. + The container runtime MUST be able to access CPU limits from the specified path + on the mounted volume and the value MUST be default node allocatable. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, memory limit, default node allocatable + codename: '[sig-storage] Downward API volume should provide node allocatable (memory) + as default memory limit if the limit is not set [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains a item for the memory limits. memory limits is not specified for the + container. The container runtime MUST be able to access memory limits from the + specified path on the mounted volume and the value MUST be default node allocatable. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, pod name + codename: '[sig-storage] Downward API volume should provide podname only [NodeConformance] + [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains a item for the Pod name. The container runtime MUST be able to access + Pod name from the specified path on the mounted volume. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, volume mode 0400 + codename: '[sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource with the volumesource + mode set to -r-------- and DownwardAPIVolumeFiles contains a item for the Pod + name. The container runtime MUST be able to access Pod name from the specified + path on the mounted volume. This test is marked LinuxOnly since Windows does not + support setting specific file permissions. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, file mode 0400 + codename: '[sig-storage] Downward API volume should set mode on item file [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains a item for the Pod name with the file mode set to -r--------. The container + runtime MUST be able to access Pod name from the specified path on the mounted + volume. This test is marked LinuxOnly since Windows does not support setting specific + file permissions. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, update annotations + codename: '[sig-storage] Downward API volume should update annotations on modification + [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains list of items for each of the Pod annotations. The container runtime + MUST be able to access Pod annotations from the specified path on the mounted + volume. Update the annotations by adding a new annotation to the running Pod. + The new annotation MUST be available from the mounted volume. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: DownwardAPI volume, update label + codename: '[sig-storage] Downward API volume should update labels on modification + [NodeConformance] [Conformance]' + description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles + contains list of items for each of the Pod labels. The container runtime MUST + be able to access Pod labels from the specified path on the mounted volume. Update + the labels by adding a new label to the running Pod. The new label MUST be available + from the mounted volume. + release: v1.9 + file: test/e2e/common/downwardapi_volume.go +- testname: EmptyDir, Shared volumes between containers + codename: '[sig-storage] EmptyDir volumes pod should support shared volumes between + containers [Conformance]' + description: A Pod created with an 'emptyDir' Volume, should share volumes between + the containeres in the pod. The two busybox image containers shoud share the volumes + mounted to the pod. The main container shoud wait until the sub container drops + a file, and main container acess the shared data. + release: v1.15 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium default, volume mode 0644 + codename: '[sig-storage] EmptyDir volumes should support (non-root,0644,default) + [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. + Volume is mounted into the container where container is run as a non-root user. + The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents + MUST be readable. This test is marked LinuxOnly since Windows does not support + setting specific file permissions, or running as UID / GID. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium memory, volume mode 0644, non-root user + codename: '[sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the + volume mode set to 0644. Volume is mounted into the container where container + is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type + set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly + since Windows does not support setting specific file permissions, or running as + UID / GID, or the medium = 'Memory'. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium default, volume mode 0666 + codename: '[sig-storage] EmptyDir volumes should support (non-root,0666,default) + [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. + Volume is mounted into the container where container is run as a non-root user. + The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents + MUST be readable. This test is marked LinuxOnly since Windows does not support + setting specific file permissions, or running as UID / GID. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium memory, volume mode 0666,, non-root user + codename: '[sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the + volume mode set to 0666. Volume is mounted into the container where container + is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type + set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly + since Windows does not support setting specific file permissions, or running as + UID / GID, or the medium = 'Memory'. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium default, volume mode 0777 + codename: '[sig-storage] EmptyDir volumes should support (non-root,0777,default) + [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. + Volume is mounted into the container where container is run as a non-root user. + The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents + MUST be readable. This test is marked LinuxOnly since Windows does not support + setting specific file permissions, or running as UID / GID. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium memory, volume mode 0777, non-root user + codename: '[sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the + volume mode set to 0777. Volume is mounted into the container where container + is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type + set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly + since Windows does not support setting specific file permissions, or running as + UID / GID, or the medium = 'Memory'. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium default, volume mode 0644 + codename: '[sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. + The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents + MUST be readable. This test is marked LinuxOnly since Windows does not support + setting specific file permissions, or running as UID / GID. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium memory, volume mode 0644 + codename: '[sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the + volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set + to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since + Windows does not support setting specific file permissions, or running as UID + / GID, or the medium = 'Memory'. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium default, volume mode 0666 + codename: '[sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. + The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents + MUST be readable. This test is marked LinuxOnly since Windows does not support + setting specific file permissions, or running as UID / GID. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium memory, volume mode 0666 + codename: '[sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the + volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set + to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since + Windows does not support setting specific file permissions, or running as UID + / GID, or the medium = 'Memory'. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium default, volume mode 0777 + codename: '[sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. The + volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents + MUST be readable. This test is marked LinuxOnly since Windows does not support + setting specific file permissions, or running as UID / GID. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium memory, volume mode 0777 + codename: '[sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the + volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount + type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly + since Windows does not support setting specific file permissions, or running as + UID / GID, or the medium = 'Memory'. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium default, volume mode default + codename: '[sig-storage] EmptyDir volumes volume on default medium should have the + correct mode [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume, the volume MUST have mode + set as -rwxrwxrwx and mount type set to tmpfs. This test is marked LinuxOnly since + Windows does not support setting specific file permissions. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir, medium memory, volume mode default + codename: '[sig-storage] EmptyDir volumes volume on tmpfs should have the correct + mode [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the + volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs. This test + is marked LinuxOnly since Windows does not support setting specific file permissions, + or the medium = 'Memory'. + release: v1.9 + file: test/e2e/common/empty_dir.go +- testname: EmptyDir Wrapper Volume, ConfigMap volumes, no race + codename: '[sig-storage] EmptyDir wrapper volumes should not cause race condition + when used for configmaps [Serial] [Conformance]' + description: Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes + mounted. Pod MUST NOT fail waiting for Volumes. + release: v1.13 + file: test/e2e/storage/empty_dir_wrapper.go +- testname: EmptyDir Wrapper Volume, Secret and ConfigMap volumes, no conflict + codename: '[sig-storage] EmptyDir wrapper volumes should not conflict [Conformance]' + description: Secret volume and ConfigMap volume is created with data. Pod MUST be + able to start with Secret and ConfigMap volumes mounted into the container. + release: v1.13 + file: test/e2e/storage/empty_dir_wrapper.go +- testname: Host path, volume mode default + codename: '[sig-storage] HostPath should give a volume the correct mode [LinuxOnly] + [NodeConformance] [Conformance]' + description: Create a Pod with host volume mounted. The volume mounted MUST be a + directory with permissions mode -rwxrwxrwx and that is has the sticky bit (mode + flag t) set. This test is marked LinuxOnly since Windows does not support setting + the sticky bit (mode flag t). + release: v1.9 + file: test/e2e/common/host_path.go +- testname: Projected Volume, multiple projections + codename: '[sig-storage] Projected combined should project all components that make + up the projection API [Projection][NodeConformance] [Conformance]' + description: Test multiple projections A Pod is created with a projected volume + source for secrets, configMap and downwardAPI with pod name, cpu and memory limits + and cpu and memory requests. Pod MUST be able to read the secrets, configMap values + and the cpu and memory limits as well as cpu and memory requests from the mounted + DownwardAPIVolumeFiles. + release: v1.9 + file: test/e2e/common/projected_combined.go +- testname: Projected Volume, ConfigMap, create, update and delete + codename: '[sig-storage] Projected configMap optional updates should be reflected + in volume [NodeConformance] [Conformance]' + description: Create a Pod with three containers with ConfigMaps namely a create, + update and delete container. Create Container when started MUST not have configMap, + update and delete containers MUST be created with a ConfigMap value as 'value-1'. + Create a configMap in the create container, the Pod MUST be able to read the configMap + from the create container. Update the configMap in the update container, Pod MUST + be able to read the updated configMap value. Delete the configMap in the delete + container. Pod MUST fail to read the configMap from the delete container. + release: v1.9 + file: test/e2e/common/projected_configmap.go +- testname: Projected Volume, ConfigMap, volume mode default + codename: '[sig-storage] Projected configMap should be consumable from pods in volume + [NodeConformance] [Conformance]' + description: A Pod is created with projected volume source 'ConfigMap' to store + a configMap with default permission mode. Pod MUST be able to read the content + of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. + release: v1.9 + file: test/e2e/common/projected_configmap.go +- testname: Projected Volume, ConfigMap, non-root user + codename: '[sig-storage] Projected configMap should be consumable from pods in volume + as non-root [NodeConformance] [Conformance]' + description: A Pod is created with projected volume source 'ConfigMap' to store + a configMap as non-root user with uid 1000. Pod MUST be able to read the content + of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. + release: v1.9 + file: test/e2e/common/projected_configmap.go +- testname: Projected Volume, ConfigMap, volume mode 0400 + codename: '[sig-storage] Projected configMap should be consumable from pods in volume + with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod is created with projected volume source 'ConfigMap' to store + a configMap with permission mode set to 0400. Pod MUST be able to read the content + of the ConfigMap successfully and the mode on the volume MUST be -r--------. This + test is marked LinuxOnly since Windows does not support setting specific file + permissions. + release: v1.9 + file: test/e2e/common/projected_configmap.go +- testname: Projected Volume, ConfigMap, mapped + codename: '[sig-storage] Projected configMap should be consumable from pods in volume + with mappings [NodeConformance] [Conformance]' + description: A Pod is created with projected volume source 'ConfigMap' to store + a configMap with default permission mode. The ConfigMap is also mapped to a custom + path. Pod MUST be able to read the content of the ConfigMap from the custom location + successfully and the mode on the volume MUST be -rw-r--r--. + release: v1.9 + file: test/e2e/common/projected_configmap.go +- testname: Projected Volume, ConfigMap, mapped, volume mode 0400 + codename: '[sig-storage] Projected configMap should be consumable from pods in volume + with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod is created with projected volume source 'ConfigMap' to store + a configMap with permission mode set to 0400. The ConfigMap is also mapped to + a custom path. Pod MUST be able to read the content of the ConfigMap from the + custom location successfully and the mode on the volume MUST be -r--r--r--. This + test is marked LinuxOnly since Windows does not support setting specific file + permissions. + release: v1.9 + file: test/e2e/common/projected_configmap.go +- testname: Projected Volume, ConfigMap, mapped, non-root user + codename: '[sig-storage] Projected configMap should be consumable from pods in volume + with mappings as non-root [NodeConformance] [Conformance]' + description: A Pod is created with projected volume source 'ConfigMap' to store + a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a + custom path. Pod MUST be able to read the content of the ConfigMap from the custom + location successfully and the mode on the volume MUST be -r--r--r--. + release: v1.9 + file: test/e2e/common/projected_configmap.go +- testname: Projected Volume, ConfigMap, multiple volume paths + codename: '[sig-storage] Projected configMap should be consumable in multiple volumes + in the same pod [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source 'ConfigMap' to store + a configMap. The configMap is mapped to two different volume mounts. Pod MUST + be able to read the content of the configMap successfully from the two volume + mounts. + release: v1.9 + file: test/e2e/common/projected_configmap.go +- testname: Projected Volume, ConfigMap, update + codename: '[sig-storage] Projected configMap updates should be reflected in volume + [NodeConformance] [Conformance]' + description: A Pod is created with projected volume source 'ConfigMap' to store + a configMap and performs a create and update to new value. Pod MUST be able to + create the configMap with value-1. Pod MUST be able to update the value in the + confgiMap to value-2. + release: v1.9 + file: test/e2e/common/projected_configmap.go +- testname: Projected Volume, DownwardAPI, CPU limits + codename: '[sig-storage] Projected downwardAPI should provide container''s cpu limit + [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able + to read the cpu limits from the mounted DownwardAPIVolumeFiles. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, CPU request + codename: '[sig-storage] Projected downwardAPI should provide container''s cpu request + [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able + to read the cpu request from the mounted DownwardAPIVolumeFiles. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, memory limits + codename: '[sig-storage] Projected downwardAPI should provide container''s memory + limit [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able + to read the memory limits from the mounted DownwardAPIVolumeFiles. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, memory request + codename: '[sig-storage] Projected downwardAPI should provide container''s memory + request [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able + to read the memory request from the mounted DownwardAPIVolumeFiles. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, CPU limit, node allocatable + codename: '[sig-storage] Projected downwardAPI should provide node allocatable (cpu) + as default cpu limit if the limit is not set [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests. The CPU and memory + resources for requests and limits are NOT specified for the container. Pod MUST + be able to read the default cpu limits from the mounted DownwardAPIVolumeFiles. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, memory limit, node allocatable + codename: '[sig-storage] Projected downwardAPI should provide node allocatable (memory) + as default memory limit if the limit is not set [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests. The CPU and memory + resources for requests and limits are NOT specified for the container. Pod MUST + be able to read the default memory limits from the mounted DownwardAPIVolumeFiles. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, pod name + codename: '[sig-storage] Projected downwardAPI should provide podname only [NodeConformance] + [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able + to read the pod name from the mounted DownwardAPIVolumeFiles. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, volume mode 0400 + codename: '[sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests. The default mode + for the volume mount is set to 0400. Pod MUST be able to read the pod name from + the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------. This + test is marked LinuxOnly since Windows does not support setting specific file + permissions. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, volume mode 0400 + codename: '[sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] + [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests. The default mode + for the volume mount is set to 0400. Pod MUST be able to read the pod name from + the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------. This + test is marked LinuxOnly since Windows does not support setting specific file + permissions. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, update annotation + codename: '[sig-storage] Projected downwardAPI should update annotations on modification + [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests and annotation items. + Pod MUST be able to read the annotations from the mounted DownwardAPIVolumeFiles. + Annotations are then updated. Pod MUST be able to read the updated values for + the Annotations. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, DownwardAPI, update labels + codename: '[sig-storage] Projected downwardAPI should update labels on modification + [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source for downwardAPI with + pod name, cpu and memory limits and cpu and memory requests and label items. Pod + MUST be able to read the labels from the mounted DownwardAPIVolumeFiles. Labels + are then updated. Pod MUST be able to read the updated values for the Labels. + release: v1.9 + file: test/e2e/common/projected_downwardapi.go +- testname: Projected Volume, Secrets, create, update delete + codename: '[sig-storage] Projected secret optional updates should be reflected in + volume [NodeConformance] [Conformance]' + description: Create a Pod with three containers with secrets namely a create, update + and delete container. Create Container when started MUST no have a secret, update + and delete containers MUST be created with a secret value. Create a secret in + the create container, the Pod MUST be able to read the secret from the create + container. Update the secret in the update container, Pod MUST be able to read + the updated secret value. Delete the secret in the delete container. Pod MUST + fail to read the secret from the delete container. + release: v1.9 + file: test/e2e/common/projected_secret.go +- testname: Projected Volume, Secrets, volume mode default + codename: '[sig-storage] Projected secret should be consumable from pods in volume + [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source 'secret' to store a + secret with a specified key with default permission mode. Pod MUST be able to + read the content of the key successfully and the mode MUST be -rw-r--r-- by default. + release: v1.9 + file: test/e2e/common/projected_secret.go +- testname: Project Volume, Secrets, non-root, custom fsGroup + codename: '[sig-storage] Projected secret should be consumable from pods in volume + as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source 'secret' to store a + secret with a specified key. The volume has permission mode set to 0440, fsgroup + set to 1001 and user set to non-root uid of 1000. Pod MUST be able to read the + content of the key successfully and the mode MUST be -r--r-----. This test is + marked LinuxOnly since Windows does not support setting specific file permissions, + or running as UID / GID. + release: v1.9 + file: test/e2e/common/projected_secret.go +- testname: Projected Volume, Secrets, volume mode 0400 + codename: '[sig-storage] Projected secret should be consumable from pods in volume + with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source 'secret' to store a + secret with a specified key with permission mode set to 0x400 on the Pod. Pod + MUST be able to read the content of the key successfully and the mode MUST be + -r--------. This test is marked LinuxOnly since Windows does not support setting + specific file permissions. + release: v1.9 + file: test/e2e/common/projected_secret.go +- testname: Projected Volume, Secrets, mapped + codename: '[sig-storage] Projected secret should be consumable from pods in volume + with mappings [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source 'secret' to store a + secret with a specified key with default permission mode. The secret is also mapped + to a custom path. Pod MUST be able to read the content of the key successfully + and the mode MUST be -r--------on the mapped volume. + release: v1.9 + file: test/e2e/common/projected_secret.go +- testname: Projected Volume, Secrets, mapped, volume mode 0400 + codename: '[sig-storage] Projected secret should be consumable from pods in volume + with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source 'secret' to store a + secret with a specified key with permission mode set to 0400. The secret is also + mapped to a specific name. Pod MUST be able to read the content of the key successfully + and the mode MUST be -r-------- on the mapped volume. This test is marked LinuxOnly + since Windows does not support setting specific file permissions. + release: v1.9 + file: test/e2e/common/projected_secret.go +- testname: Projected Volume, Secrets, mapped, multiple paths + codename: '[sig-storage] Projected secret should be consumable in multiple volumes + in a pod [NodeConformance] [Conformance]' + description: A Pod is created with a projected volume source 'secret' to store a + secret with a specified key. The secret is mapped to two different volume mounts. + Pod MUST be able to read the content of the key successfully from the two volume + mounts and the mode MUST be -r-------- on the mapped volumes. + release: v1.9 + file: test/e2e/common/projected_secret.go +- testname: Secrets Volume, create, update and delete + codename: '[sig-storage] Secrets optional updates should be reflected in volume + [NodeConformance] [Conformance]' + description: Create a Pod with three containers with secrets volume sources namely + a create, update and delete container. Create Container when started MUST not + have secret, update and delete containers MUST be created with a secret value. + Create a secret in the create container, the Pod MUST be able to read the secret + from the create container. Update the secret in the update container, Pod MUST + be able to read the updated secret value. Delete the secret in the delete container. + Pod MUST fail to read the secret from the delete container. + release: v1.9 + file: test/e2e/common/secrets_volume.go +- testname: Secrets Volume, volume mode default, secret with same name in different + namespace + codename: '[sig-storage] Secrets should be able to mount in a volume regardless + of a different secret existing with same name in different namespace [NodeConformance] + [Conformance]' + description: Create a secret with same name in two namespaces. Create a Pod with + secret volume source configured into the container. Pod MUST be able to read the + secrets from the mounted volume from the container runtime and only secrets which + are associated with namespace where pod is created. The file mode of the secret + MUST be -rw-r--r-- by default. + release: v1.12 + file: test/e2e/common/secrets_volume.go +- testname: Secrets Volume, default + codename: '[sig-storage] Secrets should be consumable from pods in volume [NodeConformance] + [Conformance]' + description: Create a secret. Create a Pod with secret volume source configured + into the container. Pod MUST be able to read the secret from the mounted volume + from the container runtime and the file mode of the secret MUST be -rw-r--r-- + by default. + release: v1.9 + file: test/e2e/common/secrets_volume.go +- testname: Secrets Volume, volume mode 0440, fsGroup 1001 and uid 1000 + codename: '[sig-storage] Secrets should be consumable from pods in volume as non-root + with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a secret. Create a Pod with secret volume source configured + into the container with file mode set to 0x440 as a non-root user with uid 1000 + and fsGroup id 1001. Pod MUST be able to read the secret from the mounted volume + from the container runtime and the file mode of the secret MUST be -r--r-----by + default. This test is marked LinuxOnly since Windows does not support setting + specific file permissions, or running as UID / GID. + release: v1.9 + file: test/e2e/common/secrets_volume.go +- testname: Secrets Volume, volume mode 0400 + codename: '[sig-storage] Secrets should be consumable from pods in volume with defaultMode + set [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a secret. Create a Pod with secret volume source configured + into the container with file mode set to 0x400. Pod MUST be able to read the secret + from the mounted volume from the container runtime and the file mode of the secret + MUST be -r-------- by default. This test is marked LinuxOnly since Windows does + not support setting specific file permissions. + release: v1.9 + file: test/e2e/common/secrets_volume.go +- testname: Secrets Volume, mapping + codename: '[sig-storage] Secrets should be consumable from pods in volume with mappings + [NodeConformance] [Conformance]' + description: Create a secret. Create a Pod with secret volume source configured + into the container with a custom path. Pod MUST be able to read the secret from + the mounted volume from the specified custom path. The file mode of the secret + MUST be -rw-r--r-- by default. + release: v1.9 + file: test/e2e/common/secrets_volume.go +- testname: Secrets Volume, mapping, volume mode 0400 + codename: '[sig-storage] Secrets should be consumable from pods in volume with mappings + and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]' + description: Create a secret. Create a Pod with secret volume source configured + into the container with a custom path and file mode set to 0x400. Pod MUST be + able to read the secret from the mounted volume from the specified custom path. + The file mode of the secret MUST be -r--r--r--. This test is marked LinuxOnly + since Windows does not support setting specific file permissions. + release: v1.9 + file: test/e2e/common/secrets_volume.go +- testname: Secrets Volume, mapping multiple volume paths + codename: '[sig-storage] Secrets should be consumable in multiple volumes in a pod + [NodeConformance] [Conformance]' + description: Create a secret. Create a Pod with two secret volume sources configured + into the container in to two different custom paths. Pod MUST be able to read + the secret from the both the mounted volumes from the two specified custom paths. + release: v1.9 + file: test/e2e/common/secrets_volume.go +- testname: 'SubPath: Reading content from a configmap volume.' + codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with + configmap pod [LinuxOnly] [Conformance]' + description: Containers in a pod can read content from a configmap mounted volume + which was configured with a subpath. This test is marked LinuxOnly since Windows + cannot mount individual files in Containers. + release: v1.12 + file: test/e2e/storage/subpath.go +- testname: 'SubPath: Reading content from a configmap volume.' + codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with + configmap pod with mountPath of existing file [LinuxOnly] [Conformance]' + description: Containers in a pod can read content from a configmap mounted volume + which was configured with a subpath and also using a mountpath that is a specific + file. This test is marked LinuxOnly since Windows cannot mount individual files + in Containers. + release: v1.12 + file: test/e2e/storage/subpath.go +- testname: 'SubPath: Reading content from a downwardAPI volume.' + codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with + downward pod [LinuxOnly] [Conformance]' + description: Containers in a pod can read content from a downwardAPI mounted volume + which was configured with a subpath. This test is marked LinuxOnly since Windows + cannot mount individual files in Containers. + release: v1.12 + file: test/e2e/storage/subpath.go +- testname: 'SubPath: Reading content from a projected volume.' + codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with + projected pod [LinuxOnly] [Conformance]' + description: Containers in a pod can read content from a projected mounted volume + which was configured with a subpath. This test is marked LinuxOnly since Windows + cannot mount individual files in Containers. + release: v1.12 + file: test/e2e/storage/subpath.go +- testname: 'SubPath: Reading content from a secret volume.' + codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with + secret pod [LinuxOnly] [Conformance]' + description: Containers in a pod can read content from a secret mounted volume which + was configured with a subpath. This test is marked LinuxOnly since Windows cannot + mount individual files in Containers. + release: v1.12 + file: test/e2e/storage/subpath.go + diff --git a/test/conformance/walk.go b/test/conformance/walk.go index dc2bcfd714f..6cc600e9fc5 100644 --- a/test/conformance/walk.go +++ b/test/conformance/walk.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,84 +14,289 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package main provides a tool that scans kubernetes e2e test source code -// looking for conformance test declarations, which it emits on stdout. It -// also looks for legacy, manually added "[Conformance]" tags and reports an -// error if it finds any. -// -// This approach is not air tight, but it will serve our purpose as a -// pre-submit check. package main import ( + "encoding/json" "flag" "fmt" "go/ast" "go/parser" "go/token" + "gopkg.in/yaml.v2" + "io" + "log" "os" - "path/filepath" "regexp" + "sort" "strconv" "strings" "text/template" + + "github.com/onsi/ginkgo/types" ) var ( - baseURL = flag.String("url", "https://github.com/kubernetes/kubernetes/tree/master/", "location of the current source") - confDoc = flag.Bool("conformance", false, "write a conformance document") - version = flag.String("version", "v1.9", "version of this conformance document") - totalConfTests, totalLegacyTests, missingComments int + baseURL = flag.String("url", "https://github.com/kubernetes/kubernetes/tree/master/", "location of the current source") + k8sPath = flag.String("source", "", "location of the current source on the current machine") + confDoc = flag.Bool("docs", false, "write a conformance document") + version = flag.String("version", "v1.9", "version of this conformance document") // If a test name contains any of these tags, it is ineligble for promotion to conformance regexIneligibleTags = regexp.MustCompile(`\[(Alpha|Feature:[^\]]+|Flaky)\]`) + + // Conformance comments should be within this number of lines to the call itself. + // Allowing for more than one in case a spare comment or two is below it. + conformanceCommentsLineWindow = 5 + + seenLines map[string]struct{} ) -const regexDescribe = "Describe|KubeDescribe|SIGDescribe" -const regexContext = "^Context$" +type frame struct { + Function string -type visitor struct { - FileSet *token.FileSet - describes []describe - cMap ast.CommentMap - //list of all the conformance tests in the path - tests []conformanceData -} - -//describe contains text associated with ginkgo describe container -type describe struct { - rparen token.Pos - text string - lastContext context -} - -//context contain the text associated with the Context clause -type context struct { - text string + // File and Line are the file name and line number of the + // location in this frame. For non-leaf frames, this will be + // the location of a call. These may be the empty string and + // zero, respectively, if not known. + File string + Line int } type conformanceData struct { - // A URL to the line of code in the kube src repo for the test - URL string + // A URL to the line of code in the kube src repo for the test. Omitted from the YAML to avoid exposing line number. + URL string `yaml:"-"` // Extracted from the "Testname:" comment before the test TestName string + // CodeName is taken from the actual ginkgo descriptions, e.g. `[sig-apps] Foo should bar [Conformance]` + CodeName string // Extracted from the "Description:" comment before the test Description string // Version when this test is added or modified ex: v1.12, v1.13 Release string + // File is the filename where the test is defined. We intentionally don't save the line here to avoid meaningless changes. + File string } -func (v *visitor) convertToConformanceData(at *ast.BasicLit) { - cd := conformanceData{} +func main() { + flag.Parse() - comment := v.comment(at) - pos := v.FileSet.Position(at.Pos()) - cd.URL = fmt.Sprintf("%s%s#L%d", *baseURL, pos.Filename, pos.Line) + if len(flag.Args()) < 1 { + log.Fatalln("Requires the name of the test details file as first and only argument.") + } + testDetailsFile := flag.Args()[0] + f, err := os.Open(testDetailsFile) + if err != nil { + log.Fatalf("Failed to open file %v: %v", testDetailsFile, err) + } + defer f.Close() + seenLines = map[string]struct{}{} + dec := json.NewDecoder(f) + testInfos := []*conformanceData{} + for { + var spec *types.SpecSummary + if err := dec.Decode(&spec); err == io.EOF { + break + } else if err != nil { + log.Fatal(err) + } + + if isConformance(spec) { + testInfo := getTestInfo(spec) + if testInfo != nil { + testInfos = append(testInfos, testInfo) + } + + if err := validateTestName(testInfo.CodeName); err != nil { + log.Fatal(err) + } + } + } + + sort.Slice(testInfos, func(i, j int) bool { return testInfos[i].CodeName < testInfos[j].CodeName }) + saveAllTestInfo(testInfos) +} + +func isConformance(spec *types.SpecSummary) bool { + return strings.Contains(getTestName(spec), "[Conformance]") +} + +func getTestInfo(spec *types.SpecSummary) *conformanceData { + var c *conformanceData + var err error + // The key to this working is that we don't need to parse every file or walk + // every componentCodeLocation. The last componentCodeLocation is going to typically start + // with the ConformanceIt(...) call and the next call in that callstack will be the + // ast.Node which is attached to the comment that we want. + for i := len(spec.ComponentCodeLocations) - 1; i > 0; i-- { + fullstacktrace := spec.ComponentCodeLocations[i].FullStackTrace + c, err = getConformanceDataFromStackTrace(fullstacktrace) + if err != nil { + log.Printf("Error looking for conformance data: %v", err) + } + if c != nil { + break + } + } + + if c == nil { + log.Printf("Did not find test info for spec: %#v\n", getTestName(spec)) + return nil + } + + c.CodeName = getTestName(spec) + return c +} + +func getTestName(spec *types.SpecSummary) string { + return strings.Join(spec.ComponentTexts[1:], " ") +} + +func saveAllTestInfo(dataSet []*conformanceData) { + if *confDoc { + // Note: this assumes that you're running from the root of the kube src repo + templ, err := template.ParseFiles("./test/conformance/cf_header.md") + if err != nil { + fmt.Printf("Error reading the Header file information: %s\n\n", err) + } + data := struct { + Version string + }{ + Version: *version, + } + templ.Execute(os.Stdout, data) + + for _, data := range dataSet { + fmt.Printf("## [%s](%s)\n\n", data.TestName, data.URL) + fmt.Printf("- Added to conformance in release %s\n", data.Release) + fmt.Printf("- Defined in code as: %s\n\n", data.CodeName) + fmt.Printf("%s\n\n", data.Description) + } + return + } + + // Serialize the list as a whole. Generally meant to end up as conformance.txt which tracks the set of tests. + b, err := yaml.Marshal(dataSet) + if err != nil { + log.Printf("Error marshalling data into YAML: %v", err) + } + fmt.Println(string(b)) +} + +func getConformanceDataFromStackTrace(fullstackstrace string) (*conformanceData, error) { + // The full stacktrace to parse from ginkgo is of the form: + // k8s.io/kubernetes/test/e2e/storage/utils.SIGDescribe(0x51f4c4f, 0xf, 0x53a0dd8, 0xc000ab6e01)\n\ttest/e2e/storage/utils/framework.go:23 +0x75\n ... ... + // So we need to split it into lines, remove whitespace, and then grab the files/lines. + stack := strings.Replace(fullstackstrace, "\t", "", -1) + calls := strings.Split(stack, "\n") + frames := []frame{} + i := 0 + for i < len(calls) { + fileLine := strings.Split(calls[i+1], " ") + lineinfo := strings.Split(fileLine[0], ":") + line, err := strconv.Atoi(lineinfo[1]) + if err != nil { + panic(err) + } + frames = append(frames, frame{ + Function: calls[i], + File: lineinfo[0], + Line: line, + }) + i += 2 + } + + // filenames have `/go/src/k8s.io` prefix which dont exist locally + for i, v := range frames { + frames[i].File = strings.Replace(v.File, + "/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes", + *k8sPath, 1) + } + + for _, curFrame := range frames { + if _, seen := seenLines[fmt.Sprintf("%v:%v", curFrame.File, curFrame.Line)]; seen { + continue + } + + freader, err := os.Open(curFrame.File) + if err != nil { + return nil, err + } + defer freader.Close() + cd, err := scanFileForFrame(curFrame.File, freader, curFrame) + if err != nil { + return nil, err + } + if cd != nil { + return cd, nil + } + } + + return nil, nil +} + +// scanFileForFrame will scan the target and look for a conformance comment attached to the function +// described by the target frame. If the comment can't be found then nil, nil is returned. +func scanFileForFrame(filename string, src interface{}, targetFrame frame) (*conformanceData, error) { + fset := token.NewFileSet() // positions are relative to fset + f, err := parser.ParseFile(fset, filename, src, parser.ParseComments) + if err != nil { + return nil, err + } + + cmap := ast.NewCommentMap(fset, f, f.Comments) + for _, cs := range cmap { + for _, c := range cs { + if cd := tryCommentGroupAndFrame(fset, c, targetFrame); cd != nil { + return cd, nil + } + } + } + return nil, nil +} + +func validateTestName(s string) error { + matches := regexIneligibleTags.FindAllString(s, -1) + if matches != nil { + return fmt.Errorf("'%s' cannot have invalid tags %v", s, strings.Join(matches, ",")) + } + return nil +} + +func tryCommentGroupAndFrame(fset *token.FileSet, cg *ast.CommentGroup, f frame) *conformanceData { + if !shouldProcessCommentGroup(fset, cg, f) { + return nil + } + + // Each file/line will either be some helper function (not a conformance comment) or apply to just a single test. Don't revisit. + if seenLines != nil { + seenLines[fmt.Sprintf("%v:%v", f.File, f.Line)] = struct{}{} + } + cd := commentToConformanceData(cg.Text()) + if cd == nil { + return nil + } + + cd.URL = fmt.Sprintf("%s%s#L%d", *baseURL, f.File, f.Line) + cd.File = f.File + return cd +} + +func shouldProcessCommentGroup(fset *token.FileSet, cg *ast.CommentGroup, f frame) bool { + lineDiff := f.Line - fset.Position(cg.End()).Line + return lineDiff > 0 && lineDiff <= conformanceCommentsLineWindow +} + +func commentToConformanceData(comment string) *conformanceData { lines := strings.Split(comment, "\n") - cd.Description = "" + descLines := []string{} + cd := &conformanceData{} for _, line := range lines { line = strings.TrimSpace(line) + if len(line) == 0 { + continue + } if sline := regexp.MustCompile("^Testname\\s*:\\s*").Split(line, -1); len(sline) == 2 { cd.TestName = sline[1] continue @@ -103,326 +308,12 @@ func (v *visitor) convertToConformanceData(at *ast.BasicLit) { if sline := regexp.MustCompile("^Description\\s*:\\s*").Split(line, -1); len(sline) == 2 { line = sline[1] } - cd.Description += line + "\n" + descLines = append(descLines, line) + } + if cd.Release == "" && cd.TestName == "" { + return nil } - if cd.TestName == "" { - testName := v.getDescription(at.Value) - i := strings.Index(testName, "[Conformance]") - if i > 0 { - cd.TestName = strings.TrimSpace(testName[:i]) - } else { - cd.TestName = testName - } - } - - v.tests = append(v.tests, cd) -} - -func newVisitor() *visitor { - return &visitor{ - FileSet: token.NewFileSet(), - } -} - -func (v *visitor) isConformanceCall(call *ast.CallExpr) bool { - switch fun := call.Fun.(type) { - case *ast.SelectorExpr: - if fun.Sel != nil { - return fun.Sel.Name == "ConformanceIt" - } - } - return false -} - -func (v *visitor) isLegacyItCall(call *ast.CallExpr) bool { - switch fun := call.Fun.(type) { - case *ast.Ident: - if fun.Name != "It" { - return false - } - if len(call.Args) < 1 { - v.failf(call, "Not enough arguments to It()") - } - default: - return false - } - - switch arg := call.Args[0].(type) { - case *ast.BasicLit: - if arg.Kind != token.STRING { - v.failf(arg, "Unexpected non-string argument to It()") - } - if strings.Contains(arg.Value, "[Conformance]") { - return true - } - default: - // non-literal argument to It()... we just ignore these even though they could be a way to "sneak in" a conformance test - } - - return false -} - -func (v *visitor) failf(expr ast.Expr, format string, a ...interface{}) { - msg := fmt.Sprintf(format, a...) - fmt.Fprintf(os.Stderr, "ERROR at %v: %s\n", v.FileSet.Position(expr.Pos()), msg) -} - -func (v *visitor) comment(x *ast.BasicLit) string { - for _, comm := range v.cMap.Comments() { - testOffset := int(x.Pos()-comm.End()) - len("framework.ConformanceIt(\"") - //Cannot assume the offset is within three or four tabs from the test block itself. - //It is better to trim the newlines, tabs, etc and then we if the comment is followed - //by the test block itself so that we can associate the comment with it properly. - if 0 <= testOffset && testOffset <= 10 { - b1 := make([]byte, x.Pos()-comm.End()) - //if we fail to open the file to compare the content we just assume the - //proximity of the comment and apply it. - myf, err := os.Open(v.FileSet.File(x.Pos()).Name()) - if err == nil { - defer myf.Close() - if _, err := myf.Seek(int64(comm.End()), 0); err == nil { - if _, err := myf.Read(b1); err == nil { - if strings.Compare(strings.Trim(string(b1), "\t \r\n"), "framework.ConformanceIt(\"") == 0 { - return comm.Text() - } - } - } - } else { - //comment section's end is noticed within 10 characters from framework.ConformanceIt block - return comm.Text() - } - } - } - return "" -} - -func (v *visitor) emit(arg ast.Expr) { - switch at := arg.(type) { - case *ast.BasicLit: - if at.Kind != token.STRING { - v.failf(at, "framework.ConformanceIt() called with non-string argument") - return - } - - description := v.getDescription(at.Value) - err := validateTestName(description) - if err != nil { - v.failf(at, err.Error()) - return - } - - at.Value = normalizeTestName(at.Value) - if *confDoc { - v.convertToConformanceData(at) - } else { - fmt.Printf("%s: %q\n", v.FileSet.Position(at.Pos()).Filename, at.Value) - } - default: - v.failf(at, "framework.ConformanceIt() called with non-literal argument") - fmt.Fprintf(os.Stderr, "ERROR: non-literal argument %v at %v\n", arg, v.FileSet.Position(arg.Pos())) - } -} - -func (v *visitor) getDescription(value string) string { - tokens := []string{} - for _, describe := range v.describes { - tokens = append(tokens, describe.text) - if len(describe.lastContext.text) > 0 { - tokens = append(tokens, describe.lastContext.text) - } - } - tokens = append(tokens, value) - - trimmed := []string{} - for _, token := range tokens { - trimmed = append(trimmed, strings.Trim(token, "\"")) - } - - return strings.Join(trimmed, " ") -} - -var ( - regexTag = regexp.MustCompile(`(\[[a-zA-Z0-9:-]+\])`) -) - -// normalizeTestName removes tags (e.g., [Feature:Foo]), double quotes and trim -// the spaces to normalize the test name. -func normalizeTestName(s string) string { - r := regexTag.ReplaceAllString(s, "") - r = strings.Trim(r, "\"") - return strings.TrimSpace(r) -} - -func validateTestName(s string) error { - matches := regexIneligibleTags.FindAllString(s, -1) - if matches != nil { - return fmt.Errorf("'%s' cannot have invalid tags %v", s, strings.Join(matches, ",")) - } - return nil -} - -// funcName converts a selectorExpr with two idents into a string, -// x.y -> "x.y" -func funcName(n ast.Expr) string { - if sel, ok := n.(*ast.SelectorExpr); ok { - if x, ok := sel.X.(*ast.Ident); ok { - return x.String() + "." + sel.Sel.String() - } - } - return "" -} - -// isSprintf returns whether the given node is a call to fmt.Sprintf -func isSprintf(n ast.Expr) bool { - call, ok := n.(*ast.CallExpr) - return ok && funcName(call.Fun) == "fmt.Sprintf" && len(call.Args) != 0 -} - -// firstArg attempts to statically determine the value of the first -// argument. It only handles strings, and converts any unknown values -// (fmt.Sprintf interpolations) into *. -func (v *visitor) firstArg(n *ast.CallExpr) string { - if len(n.Args) == 0 { - return "" - } - var lit *ast.BasicLit - if isSprintf(n.Args[0]) { - return v.firstArg(n.Args[0].(*ast.CallExpr)) - } - lit, ok := n.Args[0].(*ast.BasicLit) - if ok && lit.Kind == token.STRING { - val, err := strconv.Unquote(lit.Value) - if err != nil { - panic(err) - } - if strings.Contains(val, "%") { - val = strings.Replace(val, "%d", "*", -1) - val = strings.Replace(val, "%v", "*", -1) - val = strings.Replace(val, "%s", "*", -1) - } - return val - } - if ident, ok := n.Args[0].(*ast.Ident); ok { - return ident.String() - } - return "*" -} - -// matchFuncName returns the first argument of a function if it's -// a Ginkgo-relevant function (Describe/KubeDescribe/Context), -// and the empty string otherwise. -func (v *visitor) matchFuncName(n *ast.CallExpr, pattern string) string { - switch x := n.Fun.(type) { - case *ast.SelectorExpr: - if match, err := regexp.MatchString(pattern, x.Sel.Name); err == nil && match { - return v.firstArg(n) - } - case *ast.Ident: - if match, err := regexp.MatchString(pattern, x.Name); err == nil && match { - return v.firstArg(n) - } - default: - return "" - } - return "" -} - -// Visit visits each node looking for either calls to framework.ConformanceIt, -// which it will emit in its list of conformance tests, or legacy calls to -// It() with a manually embedded [Conformance] tag, which it will complain -// about. -func (v *visitor) Visit(node ast.Node) (w ast.Visitor) { - lastDescribe := len(v.describes) - 1 - - switch t := node.(type) { - case *ast.CallExpr: - if name := v.matchFuncName(t, regexDescribe); name != "" && len(t.Args) >= 2 { - v.describes = append(v.describes, describe{text: name, rparen: t.Rparen}) - } else if name := v.matchFuncName(t, regexContext); name != "" && len(t.Args) >= 2 { - if lastDescribe > -1 { - v.describes[lastDescribe].lastContext = context{text: name} - } - } else if v.isConformanceCall(t) { - totalConfTests++ - v.emit(t.Args[0]) - return nil - } else if v.isLegacyItCall(t) { - totalLegacyTests++ - v.failf(t, "Using It() with manual [Conformance] tag is no longer allowed. Use framework.ConformanceIt() instead.") - return nil - } - } - - // If we're past the position of the last describe's rparen, pop the describe off - if lastDescribe > -1 && node != nil { - if node.Pos() > v.describes[lastDescribe].rparen { - v.describes = v.describes[:lastDescribe] - } - } - - return v -} - -func scanfile(path string, src interface{}) []conformanceData { - v := newVisitor() - file, err := parser.ParseFile(v.FileSet, path, src, parser.ParseComments) - if err != nil { - panic(err) - } - - v.cMap = ast.NewCommentMap(v.FileSet, file, file.Comments) - - ast.Walk(v, file) - return v.tests -} - -func main() { - flag.Parse() - - if len(flag.Args()) < 1 { - fmt.Fprintf(os.Stderr, "USAGE: %s [...]\n", os.Args[0]) - os.Exit(64) - } - - if *confDoc { - // Note: this assumes that you're running from the root of the kube src repo - templ, err := template.ParseFiles("test/conformance/cf_header.md") - if err != nil { - fmt.Printf("Error reading the Header file information: %s\n\n", err) - } - data := struct { - Version string - }{ - Version: *version, - } - templ.Execute(os.Stdout, data) - } - - totalConfTests = 0 - totalLegacyTests = 0 - missingComments = 0 - for _, arg := range flag.Args() { - filepath.Walk(arg, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if strings.HasSuffix(path, ".go") { - tests := scanfile(path, nil) - for _, cd := range tests { - fmt.Printf("## [%s](%s)\n\n", cd.TestName, cd.URL) - fmt.Printf("### Release %s\n", cd.Release) - fmt.Printf("%s\n\n", cd.Description) - if len(cd.Description) < 10 { - missingComments++ - } - } - } - return nil - }) - } - if *confDoc { - fmt.Println("\n## **Summary**") - fmt.Printf("\nTotal Conformance Tests: %d, total legacy tests that need conversion: %d, while total tests that need comment sections: %d\n\n", totalConfTests, totalLegacyTests, missingComments) - } + cd.Description = strings.Join(descLines, " ") + return cd } diff --git a/test/conformance/walk_test.go b/test/conformance/walk_test.go index 68b39e8e919..339f4adbddf 100644 --- a/test/conformance/walk_test.go +++ b/test/conformance/walk_test.go @@ -22,127 +22,152 @@ import ( "testing" ) -var conformanceCases = []struct { - filename string - code string - output []conformanceData -}{ - // Go unit test - {"test/list/main_test.go", ` -var num = 3 -func Helper(x int) { return x / 0 } -var _ = Describe("Feature", func() { -/* - Testname: Kubelet-OutputToLogs - Description: By default the stdout and stderr from the process - being executed in a pod MUST be sent to the pod's logs. -*/ - framework.ConformanceIt("validates describe with ConformanceIt", func() {}) -})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/test/list/main_test.go#L11", TestName: "Kubelet-OutputToLogs", - Description: `By default the stdout and stderr from the process -being executed in a pod MUST be sent to the pod's logs.` + "\n\n"}}, - }, - // Describe + It - {"e2e/foo.go", ` -var _ = Describe("Feature", func() { - //It should have comment - framework.ConformanceIt("should work properly", func() {}) -})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L5", TestName: "Feature should work properly", Description: "It should have comment\n\n"}}, - }, - // KubeDescribe + It - {"e2e/foo.go", ` -var _ = framework.KubeDescribe("Feature", func() { - /*It should have comment*/ - framework.ConformanceIt("should work properly", func() {}) -})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L5", TestName: "Feature should work properly", Description: "It should have comment\n\n"}}, - }, - // KubeDescribe + Context + It - {"e2e/foo.go", ` -var _ = framework.KubeDescribe("Feature", func() { - Context("when offline", func() { - //Testname: Kubelet-OutputToLogs - //Description: By default the stdout and stderr from the process - //being executed in a pod MUST be sent to the pod's logs. - framework.ConformanceIt("should work", func() {}) - }) -})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L8", TestName: "Kubelet-OutputToLogs", - Description: `By default the stdout and stderr from the process -being executed in a pod MUST be sent to the pod's logs.` + "\n\n"}}, - }, - // SIGDescribe + KubeDescribe + It, Describe + KubeDescribe + It - {"e2e/foo.go", ` -var _ = framework.SIGDescribe("Feature", func() { - KubeDescribe("Described by", func() { - // Description: description1 - framework.ConformanceIt("A ConformanceIt", func() {}) - }) - Describe("Also described via", func() { - KubeDescribe("A nested", func() { - // Description: description2 - framework.ConformanceIt("ConformanceIt", func() {}) - }) - }) -})`, []conformanceData{ - {URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L6", TestName: "Feature Described by A ConformanceIt", Description: "description1\n\n"}, - {URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L11", TestName: "Feature Also described via A nested ConformanceIt", Description: "description2\n\n"}, - }}, - // KubeDescribe + Context + It - {"e2e/foo.go", ` -var _ = framework.KubeDescribe("Feature", func() { - Context("with context", func() { - //Description: By default the stdout and stderr from the process - //being executed in a pod MUST be sent to the pod's logs. - framework.ConformanceIt("should work", func() {}) - }) -})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L7", TestName: "Feature with context should work", - Description: `By default the stdout and stderr from the process -being executed in a pod MUST be sent to the pod's logs.` + "\n\n"}}, - }, - {"e2e/foo.go", ` -var _ = framework.KubeDescribe("Feature", func() { - Context("with context and extra spaces before It block should still pick up Testname", func() { - // Testname: Test with spaces - //Description: Should pick up testname even if it is not within 3 spaces - //even when executed from memory. - framework.ConformanceIt("should work", func() {}) - }) -})`, []conformanceData{{URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L8", TestName: "Test with spaces", - Description: `Should pick up testname even if it is not within 3 spaces -even when executed from memory.` + "\n\n"}}, - }, -} - func TestConformance(t *testing.T) { - for _, test := range conformanceCases { - code := "package test\n" + test.code - *confDoc = true - tests := scanfile(test.filename, code) - if !reflect.DeepEqual(tests, test.output) { - t.Errorf("code:\n%s\ngot %+v\nwant %+v", - code, tests, test.output) - } - } -} - -func TestNormalizeTestNames(t *testing.T) { - testCases := []struct { - rawName string - normalizedName string + for _, tc := range []struct { + desc string + filename string + code string + targetFrame frame + output *conformanceData }{ { - "should have monotonically increasing restart count [Slow]", - "should have monotonically increasing restart count", + desc: "Grabs comment above test", + filename: "test/list/main_test.go", + code: `package test + + var num = 3 + func Helper(x int) { return x / 0 } + var _ = Describe("Feature", func() { + /* + Testname: Kubelet-OutputToLogs + Description: By default the stdout and stderr from the process + being executed in a pod MUST be sent to the pod's logs. + */ + framework.ConformanceIt("validates describe with ConformanceIt", func() {}) + })`, + output: &conformanceData{ + URL: "https://github.com/kubernetes/kubernetes/tree/master/test/list/main_test.go#L11", + TestName: "Kubelet-OutputToLogs", + Description: `By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs.`, + File: "test/list/main_test.go", + }, + targetFrame: frame{File: "test/list/main_test.go", Line: 11}, + }, { + desc: "Handles extra spaces", + filename: "e2e/foo.go", + code: `package test + + var _ = framework.KubeDescribe("Feature", func() { + Context("with context and extra spaces before It block should still pick up Testname", func() { + // Testname: Test with spaces + //Description: Should pick up testname even if it is not within 3 spaces + //even when executed from memory. + framework.ConformanceIt("should work", func() {}) + }) + })`, + output: &conformanceData{ + URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L8", + TestName: "Test with spaces", + Description: `Should pick up testname even if it is not within 3 spaces even when executed from memory.`, + File: "e2e/foo.go", + }, + targetFrame: frame{File: "e2e/foo.go", Line: 8}, + }, { + desc: "Should target the correct comment based on the line numbers (second)", + filename: "e2e/foo.go", + code: `package test + + var _ = framework.KubeDescribe("Feature", func() { + Context("with context and extra spaces before It block should still pick up Testname", func() { + // Testname: First test + // Description: Should pick up testname even if it is not within 3 spaces + // even when executed from memory. + framework.ConformanceIt("should work", func() {}) + + // Testname: Second test + // Description: Should target the correct test/comment based on the line numbers + framework.ConformanceIt("should work", func() {}) + }) + })`, + output: &conformanceData{ + URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L13", + TestName: "Second test", + Description: `Should target the correct test/comment based on the line numbers`, + File: "e2e/foo.go", + }, + targetFrame: frame{File: "e2e/foo.go", Line: 13}, + }, { + desc: "Should target the correct comment based on the line numbers (first)", + filename: "e2e/foo.go", + code: `package test + + var _ = framework.KubeDescribe("Feature", func() { + Context("with context and extra spaces before It block should still pick up Testname", func() { + // Testname: First test + // Description: Should target the correct test/comment based on the line numbers + framework.ConformanceIt("should work", func() {}) + + // Testname: Second test + // Description: Should target the correct test/comment based on the line numbers + framework.ConformanceIt("should work", func() {}) + }) + })`, + output: &conformanceData{ + URL: "https://github.com/kubernetes/kubernetes/tree/master/e2e/foo.go#L8", + TestName: "First test", + Description: `Should target the correct test/comment based on the line numbers`, + File: "e2e/foo.go", + }, + targetFrame: frame{File: "e2e/foo.go", Line: 8}, }, + } { + t.Run(tc.desc, func(t *testing.T) { + *confDoc = true + cd, err := scanFileForFrame(tc.filename, tc.code, tc.targetFrame) + if err != nil { + panic(err) + } + if !reflect.DeepEqual(cd, tc.output) { + t.Errorf("code:\n%s\ngot %+v\nwant %+v", + tc.code, cd, tc.output) + } + }) + } +} + +func TestCommentToConformanceData(t *testing.T) { + tcs := []struct { + desc string + input string + expected *conformanceData + }{ { - " should check is all data is printed ", - "should check is all data is printed", + desc: "Empty comment leads to nil", + }, { + desc: "No Release or Testname leads to nil", + input: "Description: foo", + }, { + desc: "Release but no Testname does not result in nil", + input: "Release: v1.1\nDescription: foo", + expected: &conformanceData{Release: "v1.1", Description: "foo"}, + }, { + desc: "Testname but no Release does not result in nil", + input: "Testname: mytest\nDescription: foo", + expected: &conformanceData{TestName: "mytest", Description: "foo"}, + }, { + desc: "All fields parsed and newlines and whitespace removed from description", + input: "Release: v1.1\n\t\tTestname: mytest\n\t\tDescription: foo\n\t\tbar\ndone", + expected: &conformanceData{TestName: "mytest", Release: "v1.1", Description: "foo bar done"}, }, } - for i, tc := range testCases { - actualName := normalizeTestName(tc.rawName) - if actualName != tc.normalizedName { - t.Errorf("test case[%d]: expected normalized name %q, got %q", i, tc.normalizedName, actualName) - } + + for _, tc := range tcs { + t.Run(tc.desc, func(t *testing.T) { + out := commentToConformanceData(tc.input) + if !reflect.DeepEqual(out, tc.expected) { + t.Errorf("Expected %#v but got %#v", tc.expected, out) + } + }) } } diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index c5b3246ed7b..09e359ff802 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -46,7 +46,6 @@ var _ = SIGDescribe("ReplicationController", func() { Testname: Replication Controller, run basic image Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP. */ - framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() { TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage) }) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index f9b073ed3ec..7fcac1eee0f 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -107,6 +107,12 @@ func RunE2ETests(t *testing.T) { // Stream the progress to stdout and optionally a URL accepting progress updates. r = append(r, e2ereporters.NewProgressReporter(framework.TestContext.ProgressReportURL)) + // The DetailsRepoerter will output details about every test (name, files, lines, etc) which helps + // when documenting our tests. + if len(framework.TestContext.SpecSummaryOutput) > 0 { + r = append(r, e2ereporters.NewDetailsReporterFile(framework.TestContext.SpecSummaryOutput)) + } + klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode) ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) } diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index a9391b72052..90dab94f955 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -170,6 +170,9 @@ type TestContextType struct { // SriovdpConfigMapFile is the path to the ConfigMap to configure the SRIOV device plugin on this host. SriovdpConfigMapFile string + + // SpecSummaryOutput is the file to write ginkgo.SpecSummary objects to as tests complete. Useful for debugging and test introspection. + SpecSummaryOutput string } // NodeKillerConfig describes configuration of NodeKiller -- a utility to @@ -298,6 +301,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) { flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.") flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.") + flags.StringVar(&TestContext.SpecSummaryOutput, "spec-dump", "", "The file to dump all ginkgo.SpecSummary to after tests run. If empty, no objects are saved/printed.") } // RegisterClusterFlags registers flags specific to the cluster e2e test suite. diff --git a/test/e2e/reporters/BUILD b/test/e2e/reporters/BUILD index a3c54e1219c..35345c7eb6a 100644 --- a/test/e2e/reporters/BUILD +++ b/test/e2e/reporters/BUILD @@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["progress.go"], + srcs = [ + "progress.go", + "testDetails.go", + ], importpath = "k8s.io/kubernetes/test/e2e/reporters", visibility = ["//visibility:public"], deps = [ diff --git a/test/e2e/reporters/testDetails.go b/test/e2e/reporters/testDetails.go new file mode 100644 index 00000000000..e4be6ceaea2 --- /dev/null +++ b/test/e2e/reporters/testDetails.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reporters + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" + "k8s.io/klog" +) + +// DetailsReporter is a ginkgo reporter which dumps information regarding the tests which is difficult to get +// via an AST app. This allows us to leverage the existing ginkgo logic to walk the tests and such while then following +// up with an custom app which leverages AST to generate conformance documentation. +type DetailsReporter struct { + Writer io.Writer +} + +// NewDetailsReporterWithWriter returns a reporter which will write the SpecSummary objects as tests +// complete to the given writer. +func NewDetailsReporterWithWriter(w io.Writer) *DetailsReporter { + return &DetailsReporter{ + Writer: w, + } +} + +// NewDetailsReporterFile returns a reporter which will create the file given and dump the specs +// to it as they complete. +func NewDetailsReporterFile(filename string) *DetailsReporter { + absPath, err := filepath.Abs(filename) + if err != nil { + klog.Errorf("%#v\n", err) + panic(err) + } + f, err := os.Create(absPath) + if err != nil { + klog.Errorf("%#v\n", err) + panic(err) + } + return NewDetailsReporterWithWriter(f) +} + +// SpecSuiteWillBegin is implemented as a noop to satisfy the reporter interface for ginkgo. +func (reporter *DetailsReporter) SpecSuiteWillBegin(cfg config.GinkgoConfigType, summary *types.SuiteSummary) { +} + +// SpecSuiteDidEnd is implemented as a noop to satisfy the reporter interface for ginkgo. +func (reporter *DetailsReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {} + +// SpecDidComplete is invoked by Ginkgo each time a spec is completed (including skipped specs). +func (reporter *DetailsReporter) SpecDidComplete(specSummary *types.SpecSummary) { + b, err := json.Marshal(specSummary) + if err != nil { + klog.Errorf("Error in detail reporter: %v", err) + return + } + _, err = reporter.Writer.Write(b) + if err != nil { + klog.Errorf("Error saving test details in detail reporter: %v", err) + return + } + // Printing newline between records for easier viewing in various tools. + _, err = fmt.Fprintln(reporter.Writer, "") + if err != nil { + klog.Errorf("Error saving test details in detail reporter: %v", err) + return + } +} + +// SpecWillRun is implemented as a noop to satisfy the reporter interface for ginkgo. +func (reporter *DetailsReporter) SpecWillRun(specSummary *types.SpecSummary) {} + +// BeforeSuiteDidRun is implemented as a noop to satisfy the reporter interface for ginkgo. +func (reporter *DetailsReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {} + +// AfterSuiteDidRun is implemented as a noop to satisfy the reporter interface for ginkgo. +func (reporter *DetailsReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + if c, ok := reporter.Writer.(io.Closer); ok { + c.Close() + } +}