mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 03:03:59 +00:00
Merge pull request #120959 from pohly/e2e-test-whitespace-cleanup
e2e: remove redundant spaces in test names
This commit is contained in:
commit
f19b62fc09
@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
//e.g. framework.ConformanceIt("should provide secure master service ", func(ctx context.Context) {
|
||||
// e.g. framework.ConformanceIt("should provide secure master service", func(ctx context.Context) {
|
||||
patternStartConformance = `framework.ConformanceIt\(.*, func\(\) {$`
|
||||
patternEndConformance = `}\)$`
|
||||
patternSkip = `e2eskipper.Skip.*\(`
|
||||
|
102
test/conformance/testdata/conformance.yaml
vendored
102
test/conformance/testdata/conformance.yaml
vendored
@ -186,7 +186,7 @@
|
||||
- testname: Custom Resource Definition, create
|
||||
codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin]
|
||||
Simple CustomResourceDefinition creating/deleting custom resource definition objects
|
||||
works [Conformance]'
|
||||
works [Conformance]'
|
||||
description: Create a API extension client and define a random custom resource definition.
|
||||
Create the custom resource definition and then delete it. The creation and deletion
|
||||
MUST be successful.
|
||||
@ -195,7 +195,7 @@
|
||||
- testname: Custom Resource Definition, status sub-resource
|
||||
codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin]
|
||||
Simple CustomResourceDefinition getting/updating/patching custom resource definition
|
||||
status sub-resource works [Conformance]'
|
||||
status sub-resource works [Conformance]'
|
||||
description: Create a custom resource definition. Attempt to read, update and patch
|
||||
its status sub-resource; all mutating sub-resource operations MUST be visible
|
||||
to subsequent reads.
|
||||
@ -203,7 +203,8 @@
|
||||
file: test/e2e/apimachinery/custom_resource_definition.go
|
||||
- testname: Custom Resource Definition, list
|
||||
codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin]
|
||||
Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]'
|
||||
Simple CustomResourceDefinition listing custom resource definition objects works
|
||||
[Conformance]'
|
||||
description: Create a API extension client, define 10 labeled custom resource definitions
|
||||
and list them using a label selector; the list result MUST contain only the labeled
|
||||
custom resource definitions. Delete the labeled custom resource definitions via
|
||||
@ -213,7 +214,7 @@
|
||||
file: test/e2e/apimachinery/custom_resource_definition.go
|
||||
- testname: Custom Resource Definition, defaulting
|
||||
codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin]
|
||||
custom resource defaulting for requests and from storage works [Conformance]'
|
||||
custom resource defaulting for requests and from storage works [Conformance]'
|
||||
description: Create a custom resource definition without default. Create CR. Add
|
||||
default and read CR until the default is applied. Create another CR. Remove default,
|
||||
add default for another field and read CR until new field is defaulted, but old
|
||||
@ -993,7 +994,7 @@
|
||||
file: test/e2e/apps/replica_set.go
|
||||
- testname: Replica Set, run basic image
|
||||
codename: '[sig-apps] ReplicaSet should serve a basic image on each replica with
|
||||
a public image [Conformance]'
|
||||
a public image [Conformance]'
|
||||
description: Create a ReplicaSet with a Pod and a single Container. Make sure that
|
||||
the Pod is running. Pod SHOULD send a valid response when queried.
|
||||
release: v1.9
|
||||
@ -1031,7 +1032,7 @@
|
||||
file: test/e2e/apps/rc.go
|
||||
- testname: Replication Controller, run basic image
|
||||
codename: '[sig-apps] ReplicationController should serve a basic image on each replica
|
||||
with a public image [Conformance]'
|
||||
with a public image [Conformance]'
|
||||
description: Replication Controller MUST create a Pod with Basic Image and MUST
|
||||
run the service with the provided image. Image MUST be tested by dialing into
|
||||
the service listening through TCP, UDP and HTTP.
|
||||
@ -1150,7 +1151,8 @@
|
||||
release: v1.21
|
||||
file: test/e2e/auth/service_accounts.go
|
||||
- testname: Service account tokens auto mount optionally
|
||||
codename: '[sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance]'
|
||||
codename: '[sig-auth] ServiceAccounts should allow opting out of API token automount
|
||||
[Conformance]'
|
||||
description: Ensure that Service Account keys are mounted into the Pod only when
|
||||
AutoMountServiceToken is not set to false. We test the following scenarios here.
|
||||
1. Create Pod, Pod Spec has AutomountServiceAccountToken set to nil a) Service
|
||||
@ -1177,7 +1179,7 @@
|
||||
release: v1.21
|
||||
file: test/e2e/auth/service_accounts.go
|
||||
- testname: Service Account Tokens Must AutoMount
|
||||
codename: '[sig-auth] ServiceAccounts should mount an API token into pods [Conformance]'
|
||||
codename: '[sig-auth] ServiceAccounts should mount an API token into pods [Conformance]'
|
||||
description: Ensure that Service Account keys are mounted into the Container. Pod
|
||||
contains three containers each will read Service Account token, root CA and default
|
||||
namespace respectively from the default API Token Mount path. All these three
|
||||
@ -1221,7 +1223,7 @@
|
||||
file: test/e2e/auth/subjectreviews.go
|
||||
- testname: Kubectl, guestbook application
|
||||
codename: '[sig-cli] Kubectl client Guestbook application should create and stop
|
||||
a working application [Conformance]'
|
||||
a working application [Conformance]'
|
||||
description: Create Guestbook application that contains an agnhost primary server,
|
||||
2 agnhost replicas, frontend application, frontend service and agnhost primary
|
||||
service and agnhost replica service. Using frontend service, the test will write
|
||||
@ -1232,21 +1234,21 @@
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, check version v1
|
||||
codename: '[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in
|
||||
available api versions [Conformance]'
|
||||
available api versions [Conformance]'
|
||||
description: Run kubectl to get api versions, output MUST contain returned versions
|
||||
with 'v1' listed.
|
||||
release: v1.9
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, cluster info
|
||||
codename: '[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes
|
||||
control plane services is included in cluster-info [Conformance]'
|
||||
control plane services is included in cluster-info [Conformance]'
|
||||
description: Call kubectl to get cluster-info, output MUST contain cluster-info
|
||||
returned and Kubernetes control plane SHOULD be running.
|
||||
release: v1.9
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, describe pod or rc
|
||||
codename: '[sig-cli] Kubectl client Kubectl describe should check if kubectl describe
|
||||
prints relevant information for rc and pods [Conformance]'
|
||||
prints relevant information for rc and pods [Conformance]'
|
||||
description: Deploy an agnhost controller and an agnhost service. Kubectl describe
|
||||
pods SHOULD return the name, namespace, labels, state and other information as
|
||||
expected. Kubectl describe on rc, service, node and namespace SHOULD also return
|
||||
@ -1262,7 +1264,8 @@
|
||||
release: v1.19
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, create service, replication controller
|
||||
codename: '[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]'
|
||||
codename: '[sig-cli] Kubectl client Kubectl expose should create services for rc
|
||||
[Conformance]'
|
||||
description: Create a Pod running agnhost listening to port 6379. Using kubectl
|
||||
expose the agnhost primary replication controllers at port 1234. Validate that
|
||||
the replication controller is listening on port 1234 and the target port is set
|
||||
@ -1272,7 +1275,8 @@
|
||||
release: v1.9
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, label update
|
||||
codename: '[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]'
|
||||
codename: '[sig-cli] Kubectl client Kubectl label should update the label on a resource
|
||||
[Conformance]'
|
||||
description: When a Pod is running, update a Label using 'kubectl label' command.
|
||||
The label MUST be created in the Pod. A 'kubectl get pod' with -l option on the
|
||||
container MUST verify that the label can be read back. Use 'kubectl label label-'
|
||||
@ -1282,7 +1286,7 @@
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, patch to annotate
|
||||
codename: '[sig-cli] Kubectl client Kubectl patch should add annotations for pods
|
||||
in rc [Conformance]'
|
||||
in rc [Conformance]'
|
||||
description: Start running agnhost and a replication controller. When the pod is
|
||||
running, using 'kubectl patch' command add annotations. The annotation MUST be
|
||||
added to running pods and SHOULD be able to read added annotations from each of
|
||||
@ -1291,7 +1295,7 @@
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, replace
|
||||
codename: '[sig-cli] Kubectl client Kubectl replace should update a single-container
|
||||
pod''s image [Conformance]'
|
||||
pod''s image [Conformance]'
|
||||
description: Command 'kubectl replace' on a existing Pod with a new spec MUST update
|
||||
the image of the container running in the Pod. A -f option to 'kubectl replace'
|
||||
SHOULD force to re-create the resource. The new Pod SHOULD have the container
|
||||
@ -1300,7 +1304,7 @@
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, run pod
|
||||
codename: '[sig-cli] Kubectl client Kubectl run pod should create a pod from an
|
||||
image when restart is Never [Conformance]'
|
||||
image when restart is Never [Conformance]'
|
||||
description: Command 'kubectl run' MUST create a pod, when a image name is specified
|
||||
in the run command. After the run command there SHOULD be a pod that should exist
|
||||
with one container running the specified image.
|
||||
@ -1317,13 +1321,14 @@
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, version
|
||||
codename: '[sig-cli] Kubectl client Kubectl version should check is all data is
|
||||
printed [Conformance]'
|
||||
printed [Conformance]'
|
||||
description: The command 'kubectl version' MUST return the major, minor versions, GitCommit,
|
||||
etc of the Client and the Server that the kubectl is configured to connect to.
|
||||
release: v1.9
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, proxy socket
|
||||
codename: '[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]'
|
||||
codename: '[sig-cli] Kubectl client Proxy server should support --unix-socket=/path
|
||||
[Conformance]'
|
||||
description: Start a proxy server on by running 'kubectl proxy' with --unix-socket=<some
|
||||
path>. Call the proxy server by requesting api versions from http://locahost:0/api.
|
||||
The proxy server MUST provide at least one version string
|
||||
@ -1331,7 +1336,7 @@
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, proxy port zero
|
||||
codename: '[sig-cli] Kubectl client Proxy server should support proxy with --port
|
||||
0 [Conformance]'
|
||||
0 [Conformance]'
|
||||
description: Start a proxy server on port zero by running 'kubectl proxy' with --port=0.
|
||||
Call the proxy server by requesting api versions from unix socket. The proxy server
|
||||
MUST provide at least one version string.
|
||||
@ -1339,14 +1344,15 @@
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, replication controller
|
||||
codename: '[sig-cli] Kubectl client Update Demo should create and stop a replication
|
||||
controller [Conformance]'
|
||||
controller [Conformance]'
|
||||
description: Create a Pod and a container with a given image. Configure replication
|
||||
controller to run 2 replicas. The number of running instances of the Pod MUST
|
||||
equal the number of replicas set on the replication controller which is 2.
|
||||
release: v1.9
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, scale replication controller
|
||||
codename: '[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]'
|
||||
codename: '[sig-cli] Kubectl client Update Demo should scale a replication controller
|
||||
[Conformance]'
|
||||
description: Create a Pod and a container with a given image. Configure replication
|
||||
controller to run 2 replicas. The number of running instances of the Pod MUST
|
||||
equal the number of replicas set on the replication controller which is 2. Update
|
||||
@ -1355,7 +1361,8 @@
|
||||
release: v1.9
|
||||
file: test/e2e/kubectl/kubectl.go
|
||||
- testname: Kubectl, logs
|
||||
codename: '[sig-cli] Kubectl logs logs should be able to retrieve and filter logs [Conformance]'
|
||||
codename: '[sig-cli] Kubectl logs logs should be able to retrieve and filter logs
|
||||
[Conformance]'
|
||||
description: When a Pod is running then it MUST generate logs. Starting a Pod should
|
||||
have a expected log line. Also log command options MUST work as expected and described
|
||||
below. 'kubectl logs -tail=1' should generate a output of one line, the last line
|
||||
@ -1429,7 +1436,7 @@
|
||||
release: v1.15
|
||||
file: test/e2e/network/dns.go
|
||||
- testname: DNS, services
|
||||
codename: '[sig-network] DNS should provide DNS for services [Conformance]'
|
||||
codename: '[sig-network] DNS should provide DNS for services [Conformance]'
|
||||
description: When a headless service is created, the service MUST be able to resolve
|
||||
all the required service endpoints. When the service is created, any pod in the
|
||||
same namespace must be able to resolve the service by all of the expected DNS
|
||||
@ -1437,7 +1444,7 @@
|
||||
release: v1.9
|
||||
file: test/e2e/network/dns.go
|
||||
- testname: DNS, cluster
|
||||
codename: '[sig-network] DNS should provide DNS for the cluster [Conformance]'
|
||||
codename: '[sig-network] DNS should provide DNS for the cluster [Conformance]'
|
||||
description: When a Pod is created, the pod MUST be able to resolve cluster dns
|
||||
entries such as kubernetes.default via DNS.
|
||||
release: v1.9
|
||||
@ -1529,8 +1536,8 @@
|
||||
release: v1.19
|
||||
file: test/e2e/network/ingress.go
|
||||
- testname: IngressClass API
|
||||
codename: '[sig-network] IngressClass API should support creating IngressClass
|
||||
API operations [Conformance]'
|
||||
codename: '[sig-network] IngressClass API should support creating IngressClass API
|
||||
operations [Conformance]'
|
||||
description: ' - The networking.k8s.io API group MUST exist in the /apis discovery
|
||||
document. - The networking.k8s.io/v1 API group/version MUST exist in the /apis/networking.k8s.io
|
||||
discovery document. - The ingressclasses resource MUST exist in the /apis/networking.k8s.io/v1
|
||||
@ -1607,13 +1614,14 @@
|
||||
release: v1.21
|
||||
file: test/e2e/network/proxy.go
|
||||
- testname: Proxy, logs service endpoint
|
||||
codename: '[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance]'
|
||||
codename: '[sig-network] Proxy version v1 should proxy through a service and a pod
|
||||
[Conformance]'
|
||||
description: Select any node in the cluster to invoke /logs endpoint using the
|
||||
/nodes/proxy subresource from the kubelet port. This endpoint MUST be reachable.
|
||||
release: v1.9
|
||||
file: test/e2e/network/proxy.go
|
||||
- testname: Service endpoint latency, thresholds
|
||||
codename: '[sig-network] Service endpoints latency should not be very high [Conformance]'
|
||||
codename: '[sig-network] Service endpoints latency should not be very high [Conformance]'
|
||||
description: Run 100 iterations of create service with the Pod running the pause
|
||||
image, measure the time it takes for creating the service and the endpoint with
|
||||
the service name is available. These durations are captured for 100 iterations,
|
||||
@ -1761,13 +1769,13 @@
|
||||
release: v1.19
|
||||
file: test/e2e/network/service.go
|
||||
- testname: Kubernetes Service
|
||||
codename: '[sig-network] Services should provide secure master service [Conformance]'
|
||||
codename: '[sig-network] Services should provide secure master service [Conformance]'
|
||||
description: By default when a kubernetes cluster is running there MUST be a 'kubernetes'
|
||||
service running in the cluster.
|
||||
release: v1.9
|
||||
file: test/e2e/network/service.go
|
||||
- testname: Service, endpoints
|
||||
codename: '[sig-network] Services should serve a basic endpoint from pods [Conformance]'
|
||||
codename: '[sig-network] Services should serve a basic endpoint from pods [Conformance]'
|
||||
description: Create a service with a endpoint without any Pods, the service MUST
|
||||
run and show empty endpoints. Add a pod to the service and the service MUST validate
|
||||
to show all the endpoints for the ports exposed by the Pod. Add another Pod then
|
||||
@ -1788,7 +1796,7 @@
|
||||
release: v1.29
|
||||
file: test/e2e/network/service.go
|
||||
- testname: Service, endpoints with multiple ports
|
||||
codename: '[sig-network] Services should serve multiport endpoints from pods [Conformance]'
|
||||
codename: '[sig-network] Services should serve multiport endpoints from pods [Conformance]'
|
||||
description: Create a service with two ports but no Pods are added to the service
|
||||
yet. The service MUST run and show empty set of endpoints. Add a Pod to the first
|
||||
port, service MUST list one endpoint for the Pod on that port. Add another Pod
|
||||
@ -2226,7 +2234,7 @@
|
||||
release: v1.13
|
||||
file: test/e2e/common/node/pods.go
|
||||
- testname: Pods, prestop hook
|
||||
codename: '[sig-node] PreStop should call prestop when killing a pod [Conformance]'
|
||||
codename: '[sig-node] PreStop should call prestop when killing a pod [Conformance]'
|
||||
description: Create a server pod with a rest endpoint '/write' that changes state.Received
|
||||
field. Create a Pod with a pre-stop handle that posts to the /write endpoint on
|
||||
the server Pod. Verify that the Pod with pre-stop hook is running. Delete the
|
||||
@ -2322,16 +2330,6 @@
|
||||
count MUST be zero.
|
||||
release: v1.9
|
||||
file: test/e2e/common/node/container_probe.go
|
||||
- testname: RuntimeClass API
|
||||
codename: '[sig-node] RuntimeClass should support RuntimeClasses API operations
|
||||
[Conformance]'
|
||||
description: ' The node.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The node.k8s.io/v1 API group/version MUST exist in the /apis/mode.k8s.io discovery
|
||||
document. The runtimeclasses resource MUST exist in the /apis/node.k8s.io/v1 discovery
|
||||
document. The runtimeclasses resource must support create, get, list, watch, update,
|
||||
patch, delete, and deletecollection.'
|
||||
release: v1.20
|
||||
file: test/e2e/common/node/runtimeclass.go
|
||||
- testname: Pod with the deleted RuntimeClass is rejected.
|
||||
codename: '[sig-node] RuntimeClass should reject a Pod requesting a deleted RuntimeClass
|
||||
[NodeConformance] [Conformance]'
|
||||
@ -2362,6 +2360,16 @@
|
||||
is not being tested here.
|
||||
release: v1.20
|
||||
file: test/e2e/common/node/runtimeclass.go
|
||||
- testname: RuntimeClass API
|
||||
codename: '[sig-node] RuntimeClass should support RuntimeClasses API operations
|
||||
[Conformance]'
|
||||
description: ' The node.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The node.k8s.io/v1 API group/version MUST exist in the /apis/mode.k8s.io discovery
|
||||
document. The runtimeclasses resource MUST exist in the /apis/node.k8s.io/v1 discovery
|
||||
document. The runtimeclasses resource must support create, get, list, watch, update,
|
||||
patch, delete, and deletecollection.'
|
||||
release: v1.20
|
||||
file: test/e2e/common/node/runtimeclass.go
|
||||
- testname: Secrets, pod environment field
|
||||
codename: '[sig-node] Secrets should be consumable from pods in env vars [NodeConformance]
|
||||
[Conformance]'
|
||||
@ -2543,13 +2551,13 @@
|
||||
file: test/e2e/scheduling/limit_range.go
|
||||
- testname: Scheduler, resource limits
|
||||
codename: '[sig-scheduling] SchedulerPredicates [Serial] validates resource limits
|
||||
of pods that are allowed to run [Conformance]'
|
||||
of pods that are allowed to run [Conformance]'
|
||||
description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity.
|
||||
release: v1.9
|
||||
file: test/e2e/scheduling/predicates.go
|
||||
- testname: Scheduler, node selector matching
|
||||
codename: '[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector
|
||||
is respected if matching [Conformance]'
|
||||
is respected if matching [Conformance]'
|
||||
description: 'Create a label on the node {k: v}. Then create a Pod with a NodeSelector
|
||||
set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches
|
||||
then Pod MUST be scheduled on that node.'
|
||||
@ -2557,7 +2565,7 @@
|
||||
file: test/e2e/scheduling/predicates.go
|
||||
- testname: Scheduler, node selector not matching
|
||||
codename: '[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector
|
||||
is respected if not matching [Conformance]'
|
||||
is respected if not matching [Conformance]'
|
||||
description: Create a Pod with a NodeSelector set to a value that does not match
|
||||
a node in the cluster. Since there are no nodes matching the criteria the Pod
|
||||
MUST not be scheduled.
|
||||
@ -2623,7 +2631,7 @@
|
||||
release: v1.26
|
||||
file: test/e2e/storage/csi_inline.go
|
||||
- testname: CSIStorageCapacity API
|
||||
codename: '[sig-storage] CSIStorageCapacity should support CSIStorageCapacities
|
||||
codename: '[sig-storage] CSIStorageCapacity should support CSIStorageCapacities
|
||||
API operations [Conformance]'
|
||||
description: ' The storage.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The storage.k8s.io/v1 API group/version MUST exist in the /apis/mode.k8s.io discovery
|
||||
|
@ -56,7 +56,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
|
||||
Create the custom resource definition and then delete it. The creation and deletion MUST
|
||||
be successful.
|
||||
*/
|
||||
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("creating/deleting custom resource definition objects works", func(ctx context.Context) {
|
||||
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "loading config")
|
||||
@ -83,7 +83,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
|
||||
custom resource definitions via delete collection; the delete MUST be successful and MUST delete only the
|
||||
labeled custom resource definitions.
|
||||
*/
|
||||
framework.ConformanceIt("listing custom resource definition objects works ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("listing custom resource definition objects works", func(ctx context.Context) {
|
||||
testListSize := 10
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "loading config")
|
||||
@ -143,7 +143,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
|
||||
Description: Create a custom resource definition. Attempt to read, update and patch its status sub-resource;
|
||||
all mutating sub-resource operations MUST be visible to subsequent reads.
|
||||
*/
|
||||
framework.ConformanceIt("getting/updating/patching custom resource definition status sub-resource works ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("getting/updating/patching custom resource definition status sub-resource works", func(ctx context.Context) {
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "loading config")
|
||||
apiExtensionClient, err := clientset.NewForConfig(config)
|
||||
@ -267,7 +267,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
|
||||
the default is applied. Create another CR. Remove default, add default for another field and read CR until
|
||||
new field is defaulted, but old default stays.
|
||||
*/
|
||||
framework.ConformanceIt("custom resource defaulting for requests and from storage works ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("custom resource defaulting for requests and from storage works", func(ctx context.Context) {
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "loading config")
|
||||
apiExtensionClient, err := clientset.NewForConfig(config)
|
||||
|
@ -67,7 +67,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
Testname: Replication Controller, run basic image
|
||||
Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP.
|
||||
*/
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) {
|
||||
TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
|
@ -109,7 +109,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
Testname: Replica Set, run basic image
|
||||
Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried.
|
||||
*/
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) {
|
||||
testReplicaSetServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
|
@ -76,7 +76,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
Token Mount path. All these three files MUST exist and the Service
|
||||
Account mount path MUST be auto mounted to the Container.
|
||||
*/
|
||||
framework.ConformanceIt("should mount an API token into pods ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should mount an API token into pods", func(ctx context.Context) {
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -159,7 +159,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
include test cases 1a,1b,2a,2b and 2c.
|
||||
In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted.
|
||||
*/
|
||||
framework.ConformanceIt("should allow opting out of API token automount ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should allow opting out of API token automount", func(ctx context.Context) {
|
||||
|
||||
var err error
|
||||
trueValue := true
|
||||
|
@ -187,7 +187,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
|
||||
The runtimeclasses resource MUST exist in the /apis/node.k8s.io/v1 discovery document.
|
||||
The runtimeclasses resource must support create, get, list, watch, update, patch, delete, and deletecollection.
|
||||
*/
|
||||
framework.ConformanceIt(" should support RuntimeClasses API operations", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should support RuntimeClasses API operations", func(ctx context.Context) {
|
||||
// Setup
|
||||
rcVersion := "v1"
|
||||
rcClient := f.ClientSet.NodeV1().RuntimeClasses()
|
||||
|
@ -339,7 +339,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
Testname: Kubectl, replication controller
|
||||
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2.
|
||||
*/
|
||||
framework.ConformanceIt("should create and stop a replication controller ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should create and stop a replication controller", func(ctx context.Context) {
|
||||
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
|
||||
|
||||
ginkgo.By("creating a replication controller")
|
||||
@ -352,7 +352,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
Testname: Kubectl, scale replication controller
|
||||
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Update the replicaset to 1. Number of running instances of the Pod MUST be 1. Update the replicaset to 2. Number of running instances of the Pod MUST be 2.
|
||||
*/
|
||||
framework.ConformanceIt("should scale a replication controller ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should scale a replication controller", func(ctx context.Context) {
|
||||
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
|
||||
|
||||
ginkgo.By("creating a replication controller")
|
||||
@ -394,7 +394,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
Testname: Kubectl, guestbook application
|
||||
Description: Create Guestbook application that contains an agnhost primary server, 2 agnhost replicas, frontend application, frontend service and agnhost primary service and agnhost replica service. Using frontend service, the test will write an entry into the guestbook application which will store the entry into the backend agnhost store. Application flow MUST work as expected and the data written MUST be available to read.
|
||||
*/
|
||||
framework.ConformanceIt("should create and stop a working application ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should create and stop a working application", func(ctx context.Context) {
|
||||
defer forEachGBFile(func(contents string) {
|
||||
cleanupKubectlInputs(contents, ns)
|
||||
})
|
||||
@ -822,7 +822,7 @@ metadata:
|
||||
Testname: Kubectl, check version v1
|
||||
Description: Run kubectl to get api versions, output MUST contain returned versions with 'v1' listed.
|
||||
*/
|
||||
framework.ConformanceIt("should check if v1 is in available api versions ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should check if v1 is in available api versions", func(ctx context.Context) {
|
||||
ginkgo.By("validating api versions")
|
||||
output := e2ekubectl.RunKubectlOrDie(ns, "api-versions")
|
||||
if !strings.Contains(output, "v1") {
|
||||
@ -1308,7 +1308,7 @@ metadata:
|
||||
Testname: Kubectl, cluster info
|
||||
Description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes control plane SHOULD be running.
|
||||
*/
|
||||
framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info", func(ctx context.Context) {
|
||||
ginkgo.By("validating cluster-info")
|
||||
output := e2ekubectl.RunKubectlOrDie(ns, "cluster-info")
|
||||
// Can't check exact strings due to terminal control commands (colors)
|
||||
@ -1334,7 +1334,7 @@ metadata:
|
||||
Testname: Kubectl, describe pod or rc
|
||||
Description: Deploy an agnhost controller and an agnhost service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return proper information.
|
||||
*/
|
||||
framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods", func(ctx context.Context) {
|
||||
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
|
||||
serviceJSON := readTestFileOrDie(agnhostServiceFilename)
|
||||
|
||||
@ -1473,7 +1473,7 @@ metadata:
|
||||
Testname: Kubectl, create service, replication controller
|
||||
Description: Create a Pod running agnhost listening to port 6379. Using kubectl expose the agnhost primary replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that agnhost primary is listening. Using kubectl expose the agnhost primary as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that agnhost primary is listening.
|
||||
*/
|
||||
framework.ConformanceIt("should create services for rc ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should create services for rc", func(ctx context.Context) {
|
||||
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
|
||||
|
||||
agnhostPort := 6379
|
||||
@ -1567,7 +1567,7 @@ metadata:
|
||||
Testname: Kubectl, label update
|
||||
Description: When a Pod is running, update a Label using 'kubectl label' command. The label MUST be created in the Pod. A 'kubectl get pod' with -l option on the container MUST verify that the label can be read back. Use 'kubectl label label-' to remove the label. 'kubectl get pod' with -l option SHOULD not list the deleted label as the label is removed.
|
||||
*/
|
||||
framework.ConformanceIt("should update the label on a resource ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should update the label on a resource", func(ctx context.Context) {
|
||||
labelName := "testing-label"
|
||||
labelValue := "testing-label-value"
|
||||
|
||||
@ -1633,7 +1633,7 @@ metadata:
|
||||
Testname: Kubectl, patch to annotate
|
||||
Description: Start running agnhost and a replication controller. When the pod is running, using 'kubectl patch' command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller.
|
||||
*/
|
||||
framework.ConformanceIt("should add annotations for pods in rc ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should add annotations for pods in rc", func(ctx context.Context) {
|
||||
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
|
||||
ginkgo.By("creating Agnhost RC")
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
|
||||
@ -1666,7 +1666,7 @@ metadata:
|
||||
Testname: Kubectl, version
|
||||
Description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to.
|
||||
*/
|
||||
framework.ConformanceIt("should check is all data is printed ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should check is all data is printed", func(ctx context.Context) {
|
||||
versionString := e2ekubectl.RunKubectlOrDie(ns, "version")
|
||||
// we expect following values for: Major -> digit, Minor -> numeric followed by an optional '+', GitCommit -> alphanumeric
|
||||
requiredItems := []string{"Client Version: ", "Server Version: "}
|
||||
@ -1699,7 +1699,7 @@ metadata:
|
||||
Testname: Kubectl, run pod
|
||||
Description: Command 'kubectl run' MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image.
|
||||
*/
|
||||
framework.ConformanceIt("should create a pod from an image when restart is Never ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should create a pod from an image when restart is Never", func(ctx context.Context) {
|
||||
ginkgo.By("running the image " + httpdImage)
|
||||
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage)
|
||||
ginkgo.By("verifying the pod " + podName + " was created")
|
||||
@ -1733,7 +1733,7 @@ metadata:
|
||||
Testname: Kubectl, replace
|
||||
Description: Command 'kubectl replace' on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to 'kubectl replace' SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image.
|
||||
*/
|
||||
framework.ConformanceIt("should update a single-container pod's image ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should update a single-container pod's image", func(ctx context.Context) {
|
||||
ginkgo.By("running the image " + httpdImage)
|
||||
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
|
||||
|
||||
@ -1773,7 +1773,7 @@ metadata:
|
||||
Testname: Kubectl, proxy port zero
|
||||
Description: Start a proxy server on port zero by running 'kubectl proxy' with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string.
|
||||
*/
|
||||
framework.ConformanceIt("should support proxy with --port 0 ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should support proxy with --port 0", func(ctx context.Context) {
|
||||
ginkgo.By("starting the proxy server")
|
||||
port, cmd, err := startProxyServer(ns)
|
||||
if cmd != nil {
|
||||
@ -1798,7 +1798,7 @@ metadata:
|
||||
Testname: Kubectl, proxy socket
|
||||
Description: Start a proxy server on by running 'kubectl proxy' with --unix-socket=<some path>. Call the proxy server by requesting api versions from http://locahost:0/api. The proxy server MUST provide at least one version string
|
||||
*/
|
||||
framework.ConformanceIt("should support --unix-socket=/path ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should support --unix-socket=/path", func(ctx context.Context) {
|
||||
ginkgo.By("Starting the proxy")
|
||||
tmpdir, err := os.MkdirTemp("", "kubectl-proxy-unix")
|
||||
if err != nil {
|
||||
|
@ -111,7 +111,7 @@ var _ = SIGDescribe("Kubectl logs", func() {
|
||||
'kubectl --since=1s' should output logs that are only 1 second older from now
|
||||
'kubectl --since=24h' should output logs that are only 1 day older from now
|
||||
*/
|
||||
framework.ConformanceIt("should be able to retrieve and filter logs ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should be able to retrieve and filter logs", func(ctx context.Context) {
|
||||
|
||||
ginkgo.By("Waiting for log generator to start.")
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) {
|
||||
|
@ -47,7 +47,7 @@ var _ = common.SIGDescribe("DNS", func() {
|
||||
Testname: DNS, cluster
|
||||
Description: When a Pod is created, the pod MUST be able to resolve cluster dns entries such as kubernetes.default via DNS.
|
||||
*/
|
||||
framework.ConformanceIt("should provide DNS for the cluster ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should provide DNS for the cluster", func(ctx context.Context) {
|
||||
// All the names we need to be able to resolve.
|
||||
// TODO: Spin up a separate test service and test that dns works for that service.
|
||||
// NOTE: This only contains the FQDN and the Host name, for testing partial name, see the test below
|
||||
@ -134,7 +134,7 @@ var _ = common.SIGDescribe("DNS", func() {
|
||||
Testname: DNS, services
|
||||
Description: When a headless service is created, the service MUST be able to resolve all the required service endpoints. When the service is created, any pod in the same namespace must be able to resolve the service by all of the expected DNS names.
|
||||
*/
|
||||
framework.ConformanceIt("should provide DNS for services ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should provide DNS for services", func(ctx context.Context) {
|
||||
// NOTE: This only contains the FQDN and the Host name, for testing partial name, see the test below
|
||||
// Create a test headless service.
|
||||
ginkgo.By("Creating a test headless service")
|
||||
|
@ -264,7 +264,7 @@ var _ = common.SIGDescribe("IngressClass API", func() {
|
||||
- The ingressclasses resource MUST exist in the /apis/networking.k8s.io/v1 discovery document.
|
||||
- The ingressclass resource must support create, get, list, watch, update, patch, delete, and deletecollection.
|
||||
*/
|
||||
framework.ConformanceIt(" should support creating IngressClass API operations", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should support creating IngressClass API operations", func(ctx context.Context) {
|
||||
|
||||
// Setup
|
||||
icClient := f.ClientSet.NetworkingV1().IngressClasses()
|
||||
|
@ -98,7 +98,7 @@ var _ = common.SIGDescribe("Proxy", func() {
|
||||
Testname: Proxy, logs service endpoint
|
||||
Description: Select any node in the cluster to invoke /logs endpoint using the /nodes/proxy subresource from the kubelet port. This endpoint MUST be reachable.
|
||||
*/
|
||||
framework.ConformanceIt("should proxy through a service and a pod ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should proxy through a service and a pod", func(ctx context.Context) {
|
||||
start := time.Now()
|
||||
labels := map[string]string{"proxy-service-target": "true"}
|
||||
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, &v1.Service{
|
||||
|
@ -772,7 +772,7 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
Testname: Kubernetes Service
|
||||
Description: By default when a kubernetes cluster is running there MUST be a 'kubernetes' service running in the cluster.
|
||||
*/
|
||||
framework.ConformanceIt("should provide secure master service ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should provide secure master service", func(ctx context.Context) {
|
||||
_, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to fetch the service object for the service named kubernetes")
|
||||
})
|
||||
@ -782,7 +782,7 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
Testname: Service, endpoints
|
||||
Description: Create a service with a endpoint without any Pods, the service MUST run and show empty endpoints. Add a pod to the service and the service MUST validate to show all the endpoints for the ports exposed by the Pod. Add another Pod then the list of all Ports exposed by both the Pods MUST be valid and have corresponding service endpoint. Once the second Pod is deleted then set of endpoint MUST be validated to show only ports from the first container that are exposed. Once both pods are deleted the endpoints from the service MUST be empty.
|
||||
*/
|
||||
framework.ConformanceIt("should serve a basic endpoint from pods ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should serve a basic endpoint from pods", func(ctx context.Context) {
|
||||
serviceName := "endpoint-test2"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
@ -843,7 +843,7 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
Testname: Service, endpoints with multiple ports
|
||||
Description: Create a service with two ports but no Pods are added to the service yet. The service MUST run and show empty set of endpoints. Add a Pod to the first port, service MUST list one endpoint for the Pod on that port. Add another Pod to the second port, service MUST list both the endpoints. Delete the first Pod and the service MUST list only the endpoint to the second Pod. Delete the second Pod and the service must now have empty set of endpoints.
|
||||
*/
|
||||
framework.ConformanceIt("should serve multiport endpoints from pods ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should serve multiport endpoints from pods", func(ctx context.Context) {
|
||||
// repacking functionality is intentionally not tested here - it's better to test it in an integration test.
|
||||
serviceName := "multi-endpoint-test"
|
||||
ns := f.Namespace.Name
|
||||
|
@ -56,7 +56,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() {
|
||||
Testname: Service endpoint latency, thresholds
|
||||
Description: Run 100 iterations of create service with the Pod running the pause image, measure the time it takes for creating the service and the endpoint with the service name is available. These durations are captured for 100 iterations, then the durations are sorted to compute 50th, 90th and 99th percentile. The single server latency MUST not exceed liberally set thresholds of 20s for 50th percentile and 50s for the 90th percentile.
|
||||
*/
|
||||
framework.ConformanceIt("should not be very high ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should not be very high", func(ctx context.Context) {
|
||||
const (
|
||||
// These are very generous criteria. Ideally we will
|
||||
// get this much lower in the future. See issue
|
||||
|
@ -166,7 +166,7 @@ var _ = SIGDescribe("PreStop", func() {
|
||||
Testname: Pods, prestop hook
|
||||
Description: Create a server pod with a rest endpoint '/write' that changes state.Received field. Create a Pod with a pre-stop handle that posts to the /write endpoint on the server Pod. Verify that the Pod with pre-stop hook is running. Delete the Pod with the pre-stop hook. Before the Pod is deleted, pre-stop handler MUST be called when configured. Verify that the Pod is deleted and a call to prestop hook is verified by checking the status received on the server Pod.
|
||||
*/
|
||||
framework.ConformanceIt("should call prestop when killing a pod ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should call prestop when killing a pod", func(ctx context.Context) {
|
||||
testPreStop(ctx, f.ClientSet, f.Namespace.Name)
|
||||
})
|
||||
|
||||
|
@ -330,7 +330,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
Testname: Scheduler, resource limits
|
||||
Description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity.
|
||||
*/
|
||||
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("validates resource limits of pods that are allowed to run", func(ctx context.Context) {
|
||||
WaitForStableCluster(cs, workerNodes)
|
||||
nodeMaxAllocatable := int64(0)
|
||||
nodeToAllocatableMap := make(map[string]int64)
|
||||
@ -442,7 +442,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
Testname: Scheduler, node selector not matching
|
||||
Description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled.
|
||||
*/
|
||||
framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("validates that NodeSelector is respected if not matching", func(ctx context.Context) {
|
||||
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||
podName := "restricted-pod"
|
||||
|
||||
@ -465,7 +465,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
Testname: Scheduler, node selector matching
|
||||
Description: Create a label on the node {k: v}. Then create a Pod with a NodeSelector set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches then Pod MUST be scheduled on that node.
|
||||
*/
|
||||
framework.ConformanceIt("validates that NodeSelector is respected if matching ", func(ctx context.Context) {
|
||||
framework.ConformanceIt("validates that NodeSelector is respected if matching", func(ctx context.Context) {
|
||||
nodeName := GetNodeThatCanRunPod(ctx, f)
|
||||
|
||||
ginkgo.By("Trying to apply a random label on the found node.")
|
||||
|
@ -47,7 +47,7 @@ var _ = utils.SIGDescribe("CSIStorageCapacity", func() {
|
||||
The csistoragecapacities resource MUST exist in the /apis/storage.k8s.io/v1 discovery document.
|
||||
The csistoragecapacities resource must support create, get, list, watch, update, patch, delete, and deletecollection.
|
||||
*/
|
||||
framework.ConformanceIt(" should support CSIStorageCapacities API operations", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should support CSIStorageCapacities API operations", func(ctx context.Context) {
|
||||
// Setup
|
||||
cscVersion := "v1"
|
||||
cscClient := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name)
|
||||
|
@ -43,7 +43,7 @@ const (
|
||||
csiResizeWaitPeriod = 5 * time.Minute
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() {
|
||||
var _ = utils.SIGDescribe("PersistentVolumes-expansion", func() {
|
||||
f := framework.NewDefaultFramework("persistent-local-volumes-expansion")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
ginkgo.Context("loopback local block volume", func() {
|
||||
|
@ -150,7 +150,7 @@ var (
|
||||
Level: "s0:c0,c1"}
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
var _ = utils.SIGDescribe("PersistentVolumes-local", func() {
|
||||
f := framework.NewDefaultFramework("persistent-local-volumes-test")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
|
@ -56,7 +56,7 @@ type NodeSelector struct {
|
||||
labelValue string
|
||||
}
|
||||
|
||||
var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
||||
var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("vcp-at-scale")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
|
@ -44,7 +44,7 @@ var (
|
||||
image = imageutils.GetE2EImage(imageutils.Pause)
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:Windows] Windows volume mounts ", func() {
|
||||
var _ = SIGDescribe("[Feature:Windows] Windows volume mounts", func() {
|
||||
f := framework.NewDefaultFramework("windows-volumes")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
var (
|
||||
|
@ -44,7 +44,7 @@ func prefixedName(namePrefix string, name string) string {
|
||||
return fmt.Sprintf("%s-%s", namePrefix, name)
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
var _ = SIGDescribe("[NodeConformance] Containers Lifecycle", func() {
|
||||
f := framework.NewDefaultFramework("containers-lifecycle-test")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
@ -747,7 +747,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle ", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("[Serial] Containers Lifecycle ", func() {
|
||||
var _ = SIGDescribe("[Serial] Containers Lifecycle", func() {
|
||||
f := framework.NewDefaultFramework("containers-lifecycle-test-serial")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
@ -887,7 +887,7 @@ var _ = SIGDescribe("[Serial] Containers Lifecycle ", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle ", func() {
|
||||
var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle", func() {
|
||||
f := framework.NewDefaultFramework("containers-lifecycle-test")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
|
@ -44,7 +44,7 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:StandaloneMode] ", func() {
|
||||
var _ = SIGDescribe("[Feature:StandaloneMode]", func() {
|
||||
f := framework.NewDefaultFramework("static-pod")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
|
||||
ginkgo.Context("when creating a static pod", func() {
|
||||
|
Loading…
Reference in New Issue
Block a user