Merge pull request #89454 from gavinfish/import-aliases

Update .import-aliases for e2e test framework
This commit is contained in:
Kubernetes Prow Robot 2020-03-27 14:35:54 -07:00 committed by GitHub
commit 4e9dd8fd36
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
75 changed files with 713 additions and 702 deletions

View File

@ -51,22 +51,34 @@
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1", "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1",
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1", "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1",
"k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1", "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1",
"k8s.io/kubernetes/test/e2e/framework/auth": "e2eauth",
"k8s.io/kubernetes/test/e2e/framework/autoscaling": "e2eautoscaling", "k8s.io/kubernetes/test/e2e/framework/autoscaling": "e2eautoscaling",
"k8s.io/kubernetes/test/e2e/framework/config": "e2econfig",
"k8s.io/kubernetes/test/e2e/framework/deployment": "e2edeployment",
"k8s.io/kubernetes/test/e2e/framework/endpoints": "e2eendpoints", "k8s.io/kubernetes/test/e2e/framework/endpoints": "e2eendpoints",
"k8s.io/kubernetes/test/e2e/framework/events": "e2eevents", "k8s.io/kubernetes/test/e2e/framework/events": "e2eevents",
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper": "e2eginkgowrapper",
"k8s.io/kubernetes/test/e2e/framework/gpu": "e2egpu",
"k8s.io/kubernetes/test/e2e/framework/ingress": "e2eingress",
"k8s.io/kubernetes/test/e2e/framework/job": "e2ejob", "k8s.io/kubernetes/test/e2e/framework/job": "e2ejob",
"k8s.io/kubernetes/test/e2e/framework/kubectl": "e2ekubectl", "k8s.io/kubernetes/test/e2e/framework/kubectl": "e2ekubectl",
"k8s.io/kubernetes/test/e2e/framework/kubelet": "e2ekubelet", "k8s.io/kubernetes/test/e2e/framework/kubelet": "e2ekubelet",
"k8s.io/kubernetes/test/e2e/framework/log": "e2elog", "k8s.io/kubernetes/test/e2e/framework/log": "e2elog",
"k8s.io/kubernetes/test/e2e/framework/metrics": "e2emetrics",
"k8s.io/kubernetes/test/e2e/framework/network": "e2enetwork", "k8s.io/kubernetes/test/e2e/framework/network": "e2enetwork",
"k8s.io/kubernetes/test/e2e/framework/node": "e2enode", "k8s.io/kubernetes/test/e2e/framework/node": "e2enode",
"k8s.io/kubernetes/test/e2e/framework/perf": "e2eperf", "k8s.io/kubernetes/test/e2e/framework/perf": "e2eperf",
"k8s.io/kubernetes/test/e2e/framework/pod": "e2epod", "k8s.io/kubernetes/test/e2e/framework/pod": "e2epod",
"k8s.io/kubernetes/test/e2e/framework/pv": "e2epv", "k8s.io/kubernetes/test/e2e/framework/pv": "e2epv",
"k8s.io/kubernetes/test/e2e/framework/rc": "e2erc", "k8s.io/kubernetes/test/e2e/framework/rc": "e2erc",
"k8s.io/kubernetes/test/e2e/framework/replicaset": "e2ereplicaset",
"k8s.io/kubernetes/test/e2e/framework/resource": "e2eresource", "k8s.io/kubernetes/test/e2e/framework/resource": "e2eresource",
"k8s.io/kubernetes/test/e2e/framework/security": "e2esecurity", "k8s.io/kubernetes/test/e2e/framework/security": "e2esecurity",
"k8s.io/kubernetes/test/e2e/framework/service": "e2eservice", "k8s.io/kubernetes/test/e2e/framework/service": "e2eservice",
"k8s.io/kubernetes/test/e2e/framework/skipper": "e2eskipper", "k8s.io/kubernetes/test/e2e/framework/skipper": "e2eskipper",
"k8s.io/kubernetes/test/e2e/framework/ssh": "e2essh" "k8s.io/kubernetes/test/e2e/framework/ssh": "e2essh",
"k8s.io/kubernetes/test/e2e/framework/statefulset": "e2estatefulset",
"k8s.io/kubernetes/test/e2e/framework/testfiles": "e2etestfiles",
"k8s.io/kubernetes/test/e2e/framework/timer": "e2etimer",
"k8s.io/kubernetes/test/e2e/framework/volume": "e2evolume"
} }

View File

@ -42,7 +42,7 @@ import (
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
@ -274,9 +274,9 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
} }
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage) err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s to complete", etcdImage, deploymentName, namespace) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s to complete", etcdImage, deploymentName, namespace)
// kubectl create -f service.yaml // kubectl create -f service.yaml
@ -333,7 +333,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
// kubectl get deployments -n <aggregated-api-namespace> && status == Running // kubectl get deployments -n <aggregated-api-namespace> && status == Running
// NOTE: aggregated apis should generally be set up in their own namespace (<aggregated-api-namespace>). As the test framework // NOTE: aggregated apis should generally be set up in their own namespace (<aggregated-api-namespace>). As the test framework
// is setting up a new namespace, we are just using that. // is setting up a new namespace, we are just using that.
err = e2edeploy.WaitForDeploymentComplete(client, deployment) err = e2edeployment.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace) framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
// kubectl create -f apiservice.yaml // kubectl create -f apiservice.yaml

View File

@ -35,7 +35,7 @@ import (
"k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/utils/crd" "k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
@ -339,9 +339,9 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
ginkgo.By("Wait for the deployment to be ready") ginkgo.By("Wait for the deployment to be ready")
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image) err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = e2edeploy.WaitForDeploymentComplete(client, deployment) err = e2edeployment.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentCRDName, namespace) framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentCRDName, namespace)
ginkgo.By("Deploying the webhook service") ginkgo.By("Deploying the webhook service")

View File

@ -42,7 +42,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry" "k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/utils/crd" "k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -840,9 +840,9 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
ginkgo.By("Wait for the deployment to be ready") ginkgo.By("Wait for the deployment to be ready")
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = e2edeploy.WaitForDeploymentComplete(client, deployment) err = e2edeployment.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentName, namespace) framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentName, namespace)
ginkgo.By("Deploying the webhook service") ginkgo.By("Deploying the webhook service")

View File

@ -41,9 +41,9 @@ import (
appsinternal "k8s.io/kubernetes/pkg/apis/apps" appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/replicaset" e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -233,16 +233,16 @@ func testDeleteDeployment(f *framework.Framework) {
podLabels := map[string]string{"name": WebserverImageName} podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1) replicas := int32(1)
framework.Logf("Creating simple deployment %s", deploymentName) framework.Logf("Creating simple deployment %s", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage) err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy) err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
@ -280,17 +280,17 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// Create a deployment to delete webserver pods and instead bring up agnhost pods. // Create a deployment to delete webserver pods and instead bring up agnhost pods.
deploymentName := "test-rolling-update-deployment" deploymentName := "test-rolling-update-deployment"
framework.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 3546343826724305833. // Wait for it to be updated to revision 3546343826724305833.
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name) framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", AgnhostImage) err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", AgnhostImage)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name) framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
err = e2edeploy.WaitForDeploymentComplete(c, deploy) err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// There should be 1 old RS (webserver-controller, which is adopted) // There should be 1 old RS (webserver-controller, which is adopted)
@ -309,22 +309,22 @@ func testRecreateDeployment(f *framework.Framework) {
// Create a deployment that brings up agnhost pods. // Create a deployment that brings up agnhost pods.
deploymentName := "test-recreate-deployment" deploymentName := "test-recreate-deployment"
framework.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType)
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName) framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", AgnhostImage) err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", AgnhostImage)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Waiting deployment %q to complete", deploymentName) framework.Logf("Waiting deployment %q to complete", deploymentName)
err = e2edeploy.WaitForDeploymentComplete(c, deployment) err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Update deployment to delete agnhost pods and bring up webserver pods. // Update deployment to delete agnhost pods and bring up webserver pods.
framework.Logf("Triggering a new rollout for deployment %q", deploymentName) framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = WebserverImageName update.Spec.Template.Spec.Containers[0].Name = WebserverImageName
update.Spec.Template.Spec.Containers[0].Image = WebserverImage update.Spec.Template.Spec.Containers[0].Image = WebserverImage
}) })
@ -395,7 +395,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
} }
} }
}() }()
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -427,7 +427,7 @@ func testRolloverDeployment(f *framework.Framework) {
// Wait for replica set to become ready before adopting it. // Wait for replica set to become ready before adopting it.
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName) framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
err = replicaset.WaitForReadyReplicaSet(c, ns, rsName) err = e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Create a deployment to delete webserver pods and instead bring up redis-slave pods. // Create a deployment to delete webserver pods and instead bring up redis-slave pods.
@ -437,7 +437,7 @@ func testRolloverDeployment(f *framework.Framework) {
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent" deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType) newDeployment := e2edeployment.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{ newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
MaxUnavailable: intOrStrP(0), MaxUnavailable: intOrStrP(0),
MaxSurge: intOrStrP(1), MaxSurge: intOrStrP(1),
@ -468,7 +468,7 @@ func testRolloverDeployment(f *framework.Framework) {
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up agnhost pods. // The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up agnhost pods.
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName) framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
updatedDeploymentImageName, updatedDeploymentImage := AgnhostImageName, AgnhostImage updatedDeploymentImageName, updatedDeploymentImage := AgnhostImageName, AgnhostImage
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
}) })
@ -481,7 +481,7 @@ func testRolloverDeployment(f *framework.Framework) {
// Wait for it to be updated to revision 2 // Wait for it to be updated to revision 2
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName) framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Make sure deployment %q is complete", deploymentName) framework.Logf("Make sure deployment %q is complete", deploymentName)
@ -528,7 +528,7 @@ func testIterativeDeployments(f *framework.Framework) {
// Create a webserver deployment. // Create a webserver deployment.
deploymentName := "webserver" deploymentName := "webserver"
thirty := int32(30) thirty := int32(30)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &thirty d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
@ -546,7 +546,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.2: case n < 0.2:
// trigger a new deployment // trigger a new deployment
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name) framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)} newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i) randomScale(update, i)
@ -556,7 +556,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.4: case n < 0.4:
// rollback to the previous version // rollback to the previous version
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name) framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
if update.Annotations == nil { if update.Annotations == nil {
update.Annotations = make(map[string]string) update.Annotations = make(map[string]string)
} }
@ -567,7 +567,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.6: case n < 0.6:
// just scaling // just scaling
framework.Logf("%02d: scaling deployment %q", i, deployment.Name) framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
randomScale(update, i) randomScale(update, i)
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -576,14 +576,14 @@ func testIterativeDeployments(f *framework.Framework) {
// toggling the deployment // toggling the deployment
if deployment.Spec.Paused { if deployment.Spec.Paused {
framework.Logf("%02d: pausing deployment %q", i, deployment.Name) framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = true update.Spec.Paused = true
randomScale(update, i) randomScale(update, i)
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
} else { } else {
framework.Logf("%02d: resuming deployment %q", i, deployment.Name) framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = false update.Spec.Paused = false
randomScale(update, i) randomScale(update, i)
}) })
@ -620,7 +620,7 @@ func testIterativeDeployments(f *framework.Framework) {
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if deployment.Spec.Paused { if deployment.Spec.Paused {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = false update.Spec.Paused = false
}) })
} }
@ -630,7 +630,7 @@ func testIterativeDeployments(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Waiting for deployment %q status", deploymentName) framework.Logf("Waiting for deployment %q status", deploymentName)
err = e2edeploy.WaitForDeploymentComplete(c, deployment) err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Checking deployment %q for a complete condition", deploymentName) framework.Logf("Checking deployment %q for a complete condition", deploymentName)
@ -646,10 +646,10 @@ func testDeploymentsControllerRef(f *framework.Framework) {
framework.Logf("Creating Deployment %q", deploymentName) framework.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": WebserverImageName} podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1) replicas := int32(1)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy) err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
@ -673,10 +673,10 @@ func testDeploymentsControllerRef(f *framework.Framework) {
deploymentName = "test-adopt-deployment" deploymentName = "test-adopt-deployment"
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d = e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy) err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef") framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
@ -703,7 +703,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Create a webserver deployment. // Create a webserver deployment.
deploymentName := "webserver-deployment" deploymentName := "webserver-deployment"
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(appsv1.RollingUpdateDeployment) d.Spec.Strategy.RollingUpdate = new(appsv1.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
@ -722,7 +722,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
framework.Logf("Waiting for deployment %q to complete", deployment.Name) framework.Logf("Waiting for deployment %q to complete", deployment.Name)
err = e2edeploy.WaitForDeploymentComplete(c, deployment) err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
@ -731,7 +731,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Update the deployment with a non-existent image so that the new replica set // Update the deployment with a non-existent image so that the new replica set
// will be blocked to simulate a partial rollout. // will be blocked to simulate a partial rollout.
framework.Logf("Updating deployment %q with a non-existent image", deploymentName) framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "webserver:404" update.Spec.Template.Spec.Containers[0].Image = "webserver:404"
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -747,7 +747,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
minAvailableReplicas := replicas - int32(maxUnavailable) minAvailableReplicas := replicas - int32(maxUnavailable)
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
err = replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas) err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// First rollout's replicaset should have .spec.replicas = 8 too. // First rollout's replicaset should have .spec.replicas = 8 too.
@ -796,7 +796,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Scale the deployment to 30 replicas. // Scale the deployment to 30 replicas.
newReplicas = int32(30) newReplicas = int32(30)
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Replicas = &newReplicas update.Spec.Replicas = &newReplicas
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -868,7 +868,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
framework.Logf("Creating Deployment %q", name) framework.Logf("Creating Deployment %q", name)
podLabels := map[string]string{"name": name} podLabels := map[string]string{"name": name}
replicas := int32(3) replicas := int32(3)
d := e2edeploy.NewDeployment(name, replicas, podLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(name, replicas, podLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
// NewDeployment assigned the same value to both d.Spec.Selector and // NewDeployment assigned the same value to both d.Spec.Selector and
// d.Spec.Template.Labels, so mutating the one would mutate the other. // d.Spec.Template.Labels, so mutating the one would mutate the other.
// Thus we need to set d.Spec.Template.Labels to a new value if we want // Thus we need to set d.Spec.Template.Labels to a new value if we want
@ -893,7 +893,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
} }
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deployment) err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns) framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns)
@ -939,7 +939,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
framework.Logf("Triggering a rolling deployment several times") framework.Logf("Triggering a rolling deployment several times")
for i := 1; i <= 3; i++ { for i := 1; i <= 3; i++ {
framework.Logf("Updating label deployment %q pod spec (iteration #%d)", name, i) framework.Logf("Updating label deployment %q pod spec (iteration #%d)", name, i)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Labels["iteration"] = fmt.Sprintf("%d", i) update.Spec.Template.Labels["iteration"] = fmt.Sprintf("%d", i)
setAffinities(update, true) setAffinities(update, true)
}) })

View File

@ -43,7 +43,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -379,13 +379,13 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.DumpDebugInfo(c, ns) framework.DumpDebugInfo(c, ns)
} }
framework.Logf("Deleting all stateful set in ns %v", ns) framework.Logf("Deleting all stateful set in ns %v", ns)
e2esset.DeleteAllStatefulSets(c, ns) e2estatefulset.DeleteAllStatefulSets(c, ns)
}) })
ginkgo.It("should come back up if node goes down [Slow] [Disruptive]", func() { ginkgo.It("should come back up if node goes down [Slow] [Disruptive]", func() {
petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}} podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels) ps := e2estatefulset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{}) _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -396,19 +396,19 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
common.RestartNodes(f.ClientSet, nodes) common.RestartNodes(f.ClientSet, nodes)
ginkgo.By("waiting for pods to be running again") ginkgo.By("waiting for pods to be running again")
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) e2estatefulset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
}) })
ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() { ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
e2eskipper.SkipUnlessSSHKeyPresent() e2eskipper.SkipUnlessSSHKeyPresent()
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels) ps := e2estatefulset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{}) _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) e2estatefulset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
pod := e2esset.GetPodList(c, ps).Items[0] pod := e2estatefulset.GetPodList(c, ps).Items[0]
node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -427,7 +427,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
} }
ginkgo.By("waiting for pods to be running again") ginkgo.By("waiting for pods to be running again")
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) e2estatefulset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
}) })
}) })

View File

@ -32,7 +32,7 @@ import (
"k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset" e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -229,7 +229,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name)) ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *appsv1.ReplicaSet) { rs, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, namespace, name, func(update *appsv1.ReplicaSet) {
x := int32(2) x := int32(2)
update.Spec.Replicas = &x update.Spec.Replicas = &x
}) })

View File

@ -39,7 +39,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -99,7 +99,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}} podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ss = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) ss = e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels)
@ -112,7 +112,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.DumpDebugInfo(c, ns) framework.DumpDebugInfo(c, ns)
} }
framework.Logf("Deleting all statefulset in ns %v", ns) framework.Logf("Deleting all statefulset in ns %v", ns)
e2esset.DeleteAllStatefulSets(c, ns) e2estatefulset.DeleteAllStatefulSets(c, ns)
}) })
// This can't be Conformance yet because it depends on a default // This can't be Conformance yet because it depends on a default
@ -121,37 +121,37 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(c) e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 3 *(ss.Spec.Replicas) = 3
e2esset.PauseNewPods(ss) e2estatefulset.PauseNewPods(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Saturating stateful set " + ss.Name) ginkgo.By("Saturating stateful set " + ss.Name)
e2esset.Saturate(c, ss) e2estatefulset.Saturate(c, ss)
ginkgo.By("Verifying statefulset mounted data directory is usable") ginkgo.By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(e2esset.CheckMount(c, ss, "/data")) framework.ExpectNoError(e2estatefulset.CheckMount(c, ss, "/data"))
ginkgo.By("Verifying statefulset provides a stable hostname for each pod") ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
framework.ExpectNoError(e2esset.CheckHostname(c, ss)) framework.ExpectNoError(e2estatefulset.CheckHostname(c, ss))
ginkgo.By("Verifying statefulset set proper service name") ginkgo.By("Verifying statefulset set proper service name")
framework.ExpectNoError(e2esset.CheckServiceName(ss, headlessSvcName)) framework.ExpectNoError(e2estatefulset.CheckServiceName(ss, headlessSvcName))
cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync" cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync"
ginkgo.By("Running " + cmd + " in all stateful pods") ginkgo.By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(e2esset.ExecInStatefulPods(c, ss, cmd)) framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(c, ss, cmd))
ginkgo.By("Restarting statefulset " + ss.Name) ginkgo.By("Restarting statefulset " + ss.Name)
e2esset.Restart(c, ss) e2estatefulset.Restart(c, ss)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ginkgo.By("Verifying statefulset mounted data directory is usable") ginkgo.By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(e2esset.CheckMount(c, ss, "/data")) framework.ExpectNoError(e2estatefulset.CheckMount(c, ss, "/data"))
cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi" cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi"
ginkgo.By("Running " + cmd + " in all stateful pods") ginkgo.By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(e2esset.ExecInStatefulPods(c, ss, cmd)) framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(c, ss, cmd))
}) })
// This can't be Conformance yet because it depends on a default // This can't be Conformance yet because it depends on a default
@ -160,7 +160,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(c) e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 1 *(ss.Spec.Replicas) = 1
e2esset.PauseNewPods(ss) e2estatefulset.PauseNewPods(ss)
// Replace ss with the one returned from Create() so it has the UID. // Replace ss with the one returned from Create() so it has the UID.
// Save Kind since it won't be populated in the returned ss. // Save Kind since it won't be populated in the returned ss.
@ -170,8 +170,8 @@ var _ = SIGDescribe("StatefulSet", func() {
ss.Kind = kind ss.Kind = kind
ginkgo.By("Saturating stateful set " + ss.Name) ginkgo.By("Saturating stateful set " + ss.Name)
e2esset.Saturate(c, ss) e2estatefulset.Saturate(c, ss)
pods := e2esset.GetPodList(c, ss) pods := e2estatefulset.GetPodList(c, ss)
gomega.Expect(pods.Items).To(gomega.HaveLen(int(*ss.Spec.Replicas))) gomega.Expect(pods.Items).To(gomega.HaveLen(int(*ss.Spec.Replicas)))
ginkgo.By("Checking that stateful set pods are created with ControllerRef") ginkgo.By("Checking that stateful set pods are created with ControllerRef")
@ -245,18 +245,18 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(c) e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 2 *(ss.Spec.Replicas) = 2
e2esset.PauseNewPods(ss) e2estatefulset.PauseNewPods(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForRunning(c, 1, 0, ss) e2estatefulset.WaitForRunning(c, 1, 0, ss)
ginkgo.By("Resuming stateful pod at index 0.") ginkgo.By("Resuming stateful pod at index 0.")
e2esset.ResumeNextPod(c, ss) e2estatefulset.ResumeNextPod(c, ss)
ginkgo.By("Waiting for stateful pod at index 1 to enter running.") ginkgo.By("Waiting for stateful pod at index 1 to enter running.")
e2esset.WaitForRunning(c, 2, 1, ss) e2estatefulset.WaitForRunning(c, 2, 1, ss)
// Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not* // Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not*
// create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till // create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till
@ -266,13 +266,13 @@ var _ = SIGDescribe("StatefulSet", func() {
deleteStatefulPodAtIndex(c, 0, ss) deleteStatefulPodAtIndex(c, 0, ss)
ginkgo.By("Confirming stateful pod at index 0 is recreated.") ginkgo.By("Confirming stateful pod at index 0 is recreated.")
e2esset.WaitForRunning(c, 2, 1, ss) e2estatefulset.WaitForRunning(c, 2, 1, ss)
ginkgo.By("Resuming stateful pod at index 1.") ginkgo.By("Resuming stateful pod at index 1.")
e2esset.ResumeNextPod(c, ss) e2estatefulset.ResumeNextPod(c, ss)
ginkgo.By("Confirming all stateful pods in statefulset are created.") ginkgo.By("Confirming all stateful pods in statefulset are created.")
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
}) })
// This can't be Conformance yet because it depends on a default // This can't be Conformance yet because it depends on a default
@ -291,7 +291,7 @@ var _ = SIGDescribe("StatefulSet", func() {
*/ */
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() { framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
ginkgo.By("Creating a new StatefulSet") ginkgo.By("Creating a new StatefulSet")
ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
rollbackTest(c, ns, ss) rollbackTest(c, ns, ss)
}) })
@ -302,7 +302,7 @@ var _ = SIGDescribe("StatefulSet", func() {
*/ */
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() { framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() {
ginkgo.By("Creating a new StatefulSet") ginkgo.By("Creating a new StatefulSet")
ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
setHTTPProbe(ss) setHTTPProbe(ss)
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType, Type: appsv1.RollingUpdateStatefulSetStrategyType,
@ -316,12 +316,12 @@ var _ = SIGDescribe("StatefulSet", func() {
} }
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss) ss = waitForStatus(c, ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision)) ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := e2esset.GetPodList(c, ss) pods := e2estatefulset.GetPodList(c, ss)
for i := range pods.Items { for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s", framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s",
pods.Items[i].Namespace, pods.Items[i].Namespace,
@ -412,9 +412,9 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Restoring Pods to the correct revision when they are deleted") ginkgo.By("Restoring Pods to the correct revision when they are deleted")
deleteStatefulPodAtIndex(c, 0, ss) deleteStatefulPodAtIndex(c, 0, ss)
deleteStatefulPodAtIndex(c, 2, ss) deleteStatefulPodAtIndex(c, 2, ss)
e2esset.WaitForRunningAndReady(c, 3, ss) e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ss = getStatefulSet(c, ss.Namespace, ss.Name) ss = getStatefulSet(c, ss.Namespace, ss.Name)
pods = e2esset.GetPodList(c, ss) pods = e2estatefulset.GetPodList(c, ss)
for i := range pods.Items { for i := range pods.Items {
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
@ -494,19 +494,19 @@ var _ = SIGDescribe("StatefulSet", func() {
// The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs. // The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs.
ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func() { ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func() {
ginkgo.By("Creating a new StatefulSet") ginkgo.By("Creating a new StatefulSet")
ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
setHTTPProbe(ss) setHTTPProbe(ss)
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.OnDeleteStatefulSetStrategyType, Type: appsv1.OnDeleteStatefulSetStrategyType,
} }
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss) ss = waitForStatus(c, ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision)) ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := e2esset.GetPodList(c, ss) pods := e2estatefulset.GetPodList(c, ss)
for i := range pods.Items { for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace, pods.Items[i].Namespace,
@ -519,9 +519,9 @@ var _ = SIGDescribe("StatefulSet", func() {
deleteStatefulPodAtIndex(c, 0, ss) deleteStatefulPodAtIndex(c, 0, ss)
deleteStatefulPodAtIndex(c, 1, ss) deleteStatefulPodAtIndex(c, 1, ss)
deleteStatefulPodAtIndex(c, 2, ss) deleteStatefulPodAtIndex(c, 2, ss)
e2esset.WaitForRunningAndReady(c, 3, ss) e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ss = getStatefulSet(c, ss.Namespace, ss.Name) ss = getStatefulSet(c, ss.Namespace, ss.Name)
pods = e2esset.GetPodList(c, ss) pods = e2estatefulset.GetPodList(c, ss)
for i := range pods.Items { for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace, pods.Items[i].Namespace,
@ -548,9 +548,9 @@ var _ = SIGDescribe("StatefulSet", func() {
deleteStatefulPodAtIndex(c, 0, ss) deleteStatefulPodAtIndex(c, 0, ss)
deleteStatefulPodAtIndex(c, 1, ss) deleteStatefulPodAtIndex(c, 1, ss)
deleteStatefulPodAtIndex(c, 2, ss) deleteStatefulPodAtIndex(c, 2, ss)
e2esset.WaitForRunningAndReady(c, 3, ss) e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ss = getStatefulSet(c, ss.Namespace, ss.Name) ss = getStatefulSet(c, ss.Namespace, ss.Name)
pods = e2esset.GetPodList(c, ss) pods = e2estatefulset.GetPodList(c, ss)
for i := range pods.Items { for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s",
pods.Items[i].Namespace, pods.Items[i].Namespace,
@ -579,24 +579,24 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
setHTTPProbe(ss) setHTTPProbe(ss)
ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ginkgo.By("Confirming that stateful set scale up will halt with unhealthy stateful pod") ginkgo.By("Confirming that stateful set scale up will halt with unhealthy stateful pod")
breakHTTPProbe(c, ss) breakHTTPProbe(c, ss)
waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss) waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0) e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0)
e2esset.UpdateReplicas(c, ss, 3) e2estatefulset.UpdateReplicas(c, ss, 3)
confirmStatefulPodCount(c, 1, ss, 10*time.Second, true) confirmStatefulPodCount(c, 1, ss, 10*time.Second, true)
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
restoreHTTPProbe(c, ss) restoreHTTPProbe(c, ss)
e2esset.WaitForRunningAndReady(c, 3, ss) e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ginkgo.By("Verifying that stateful set " + ssName + " was scaled up in order") ginkgo.By("Verifying that stateful set " + ssName + " was scaled up in order")
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"} expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
@ -622,14 +622,14 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
breakHTTPProbe(c, ss) breakHTTPProbe(c, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0) e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0)
waitForRunningAndNotReady(c, 3, ss) waitForRunningAndNotReady(c, 3, ss)
e2esset.UpdateReplicas(c, ss, 0) e2estatefulset.UpdateReplicas(c, ss, 0)
confirmStatefulPodCount(c, 3, ss, 10*time.Second, true) confirmStatefulPodCount(c, 3, ss, 10*time.Second, true)
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
restoreHTTPProbe(c, ss) restoreHTTPProbe(c, ss)
e2esset.Scale(c, ss, 0) e2estatefulset.Scale(c, ss, 0)
ginkgo.By("Verifying that stateful set " + ssName + " was scaled down in reverse order") ginkgo.By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"} expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
@ -658,37 +658,37 @@ var _ = SIGDescribe("StatefulSet", func() {
psLabels := klabels.Set(labels) psLabels := klabels.Set(labels)
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement
setHTTPProbe(ss) setHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod")
breakHTTPProbe(c, ss) breakHTTPProbe(c, ss)
waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss) waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0) e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0)
e2esset.UpdateReplicas(c, ss, 3) e2estatefulset.UpdateReplicas(c, ss, 3)
confirmStatefulPodCount(c, 3, ss, 10*time.Second, false) confirmStatefulPodCount(c, 3, ss, 10*time.Second, false)
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
restoreHTTPProbe(c, ss) restoreHTTPProbe(c, ss)
e2esset.WaitForRunningAndReady(c, 3, ss) e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ginkgo.By("Scale down will not halt with unhealthy stateful pod") ginkgo.By("Scale down will not halt with unhealthy stateful pod")
breakHTTPProbe(c, ss) breakHTTPProbe(c, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0) e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0)
waitForRunningAndNotReady(c, 3, ss) waitForRunningAndNotReady(c, 3, ss)
e2esset.UpdateReplicas(c, ss, 0) e2estatefulset.UpdateReplicas(c, ss, 0)
confirmStatefulPodCount(c, 0, ss, 10*time.Second, false) confirmStatefulPodCount(c, 0, ss, 10*time.Second, false)
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
restoreHTTPProbe(c, ss) restoreHTTPProbe(c, ss)
e2esset.Scale(c, ss, 0) e2estatefulset.Scale(c, ss, 0)
e2esset.WaitForStatusReplicas(c, ss, 0) e2estatefulset.WaitForStatusReplicas(c, ss, 0)
}) })
/* /*
@ -724,7 +724,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name) ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)
ss := e2esset.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels) ss := e2estatefulset.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels)
statefulPodContainer := &ss.Spec.Template.Spec.Containers[0] statefulPodContainer := &ss.Spec.Template.Spec.Containers[0]
statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort) statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort)
ss.Spec.Template.Spec.NodeName = node.Name ss.Spec.Template.Spec.NodeName = node.Name
@ -791,11 +791,11 @@ var _ = SIGDescribe("StatefulSet", func() {
*/ */
framework.ConformanceIt("should have a working scale subresource", func() { framework.ConformanceIt("should have a working scale subresource", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
setHTTPProbe(ss) setHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss) ss = waitForStatus(c, ss)
ginkgo.By("getting scale subresource") ginkgo.By("getting scale subresource")
@ -836,7 +836,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.DumpDebugInfo(c, ns) framework.DumpDebugInfo(c, ns)
} }
framework.Logf("Deleting all statefulset in ns %v", ns) framework.Logf("Deleting all statefulset in ns %v", ns)
e2esset.DeleteAllStatefulSets(c, ns) e2estatefulset.DeleteAllStatefulSets(c, ns)
}) })
// Do not mark this as Conformance. // Do not mark this as Conformance.
@ -907,8 +907,8 @@ func (c *clusterAppTester) run() {
default: default:
if restartCluster { if restartCluster {
ginkgo.By("Restarting stateful set " + ss.Name) ginkgo.By("Restarting stateful set " + ss.Name)
e2esset.Restart(c.client, ss) e2estatefulset.Restart(c.client, ss)
e2esset.WaitForRunningAndReady(c.client, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c.client, *ss.Spec.Replicas, ss)
} }
} }
@ -928,7 +928,7 @@ func (z *zookeeperTester) name() string {
} }
func (z *zookeeperTester) deploy(ns string) *appsv1.StatefulSet { func (z *zookeeperTester) deploy(ns string) *appsv1.StatefulSet {
z.ss = e2esset.CreateStatefulSet(z.client, zookeeperManifestPath, ns) z.ss = e2estatefulset.CreateStatefulSet(z.client, zookeeperManifestPath, ns)
return z.ss return z.ss
} }
@ -966,7 +966,7 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
} }
func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet { func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet {
m.ss = e2esset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns) m.ss = e2estatefulset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns)
framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name) framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
for _, cmd := range []string{ for _, cmd := range []string{
@ -1006,7 +1006,7 @@ func (m *redisTester) redisExec(cmd, ns, podName string) string {
} }
func (m *redisTester) deploy(ns string) *appsv1.StatefulSet { func (m *redisTester) deploy(ns string) *appsv1.StatefulSet {
m.ss = e2esset.CreateStatefulSet(m.client, redisManifestPath, ns) m.ss = e2estatefulset.CreateStatefulSet(m.client, redisManifestPath, ns)
return m.ss return m.ss
} }
@ -1037,7 +1037,7 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
} }
func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet { func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet {
c.ss = e2esset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns) c.ss = e2estatefulset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns)
framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name) framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
for _, cmd := range []string{ for _, cmd := range []string{
"CREATE DATABASE IF NOT EXISTS foo;", "CREATE DATABASE IF NOT EXISTS foo;",
@ -1088,12 +1088,12 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
setHTTPProbe(ss) setHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss) ss = waitForStatus(c, ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision)) ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := e2esset.GetPodList(c, ss) pods := e2estatefulset.GetPodList(c, ss)
for i := range pods.Items { for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace, pods.Items[i].Namespace,
@ -1101,7 +1101,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision)) currentRevision))
} }
e2esset.SortStatefulPods(pods) e2estatefulset.SortStatefulPods(pods)
err = breakPodHTTPProbe(ss, &pods.Items[1]) err = breakPodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err) framework.ExpectNoError(err)
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name) ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name)
@ -1121,11 +1121,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update") framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update")
ginkgo.By("Updating Pods in reverse ordinal order") ginkgo.By("Updating Pods in reverse ordinal order")
pods = e2esset.GetPodList(c, ss) pods = e2estatefulset.GetPodList(c, ss)
e2esset.SortStatefulPods(pods) e2estatefulset.SortStatefulPods(pods)
err = restorePodHTTPProbe(ss, &pods.Items[1]) err = restorePodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err) framework.ExpectNoError(err)
ss, pods = e2esset.WaitForPodReady(c, ss, pods.Items[1].Name) ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = waitForRollingUpdate(c, ss) ss, pods = waitForRollingUpdate(c, ss)
framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace, ss.Namespace,
@ -1161,10 +1161,10 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during roll back") framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during roll back")
ginkgo.By("Rolling back update in reverse ordinal order") ginkgo.By("Rolling back update in reverse ordinal order")
pods = e2esset.GetPodList(c, ss) pods = e2estatefulset.GetPodList(c, ss)
e2esset.SortStatefulPods(pods) e2estatefulset.SortStatefulPods(pods)
restorePodHTTPProbe(ss, &pods.Items[1]) restorePodHTTPProbe(ss, &pods.Items[1])
ss, pods = e2esset.WaitForPodReady(c, ss, pods.Items[1].Name) ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = waitForRollingUpdate(c, ss) ss, pods = waitForRollingUpdate(c, ss)
framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion", framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace, ss.Namespace,
@ -1192,7 +1192,7 @@ func confirmStatefulPodCount(c clientset.Interface, count int, ss *appsv1.Statef
start := time.Now() start := time.Now()
deadline := start.Add(timeout) deadline := start.Add(timeout)
for t := time.Now(); t.Before(deadline); t = time.Now() { for t := time.Now(); t.Before(deadline); t = time.Now() {
podList := e2esset.GetPodList(c, ss) podList := e2estatefulset.GetPodList(c, ss)
statefulPodCount := len(podList.Items) statefulPodCount := len(podList.Items)
if statefulPodCount != count { if statefulPodCount != count {
e2epod.LogPodStates(podList.Items) e2epod.LogPodStates(podList.Items)
@ -1224,7 +1224,7 @@ func breakHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error {
} }
// Ignore 'mv' errors to make this idempotent. // Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path) cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
return e2esset.ExecInStatefulPods(c, ss, cmd) return e2estatefulset.ExecInStatefulPods(c, ss, cmd)
} }
// breakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod. // breakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
@ -1248,7 +1248,7 @@ func restoreHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error {
} }
// Ignore 'mv' errors to make this idempotent. // Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path) cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
return e2esset.ExecInStatefulPods(c, ss, cmd) return e2estatefulset.ExecInStatefulPods(c, ss, cmd)
} }
// restorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod. // restorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.

View File

@ -23,7 +23,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
) )
// waitForPartitionedRollingUpdate waits for all Pods in set to exist and have the correct revision. set must have // waitForPartitionedRollingUpdate waits for all Pods in set to exist and have the correct revision. set must have
@ -43,7 +43,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
set.Namespace, set.Namespace,
set.Name) set.Name)
} }
e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2 set = set2
pods = pods2 pods = pods2
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition) partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
@ -55,7 +55,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
set.Namespace, set.Namespace,
set.Name, set.Name,
) )
e2esset.SortStatefulPods(pods) e2estatefulset.SortStatefulPods(pods)
for i := range pods.Items { for i := range pods.Items {
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision { if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s", framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
@ -85,7 +85,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
// waitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation. // waitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
// The returned StatefulSet contains such a StatefulSetStatus // The returned StatefulSet contains such a StatefulSetStatus
func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet { func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet {
e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) { e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
if set2.Status.ObservedGeneration >= set.Generation { if set2.Status.ObservedGeneration >= set.Generation {
set = set2 set = set2
return true, nil return true, nil
@ -98,7 +98,7 @@ func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.State
// waitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition. // waitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition.
func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList var pods *v1.PodList
e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2 set = set2
pods = pods2 pods = pods2
for i := range pods.Items { for i := range pods.Items {
@ -121,7 +121,7 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
set.Name, set.Name,
set.Spec.UpdateStrategy.Type) set.Spec.UpdateStrategy.Type)
} }
e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2 set = set2
pods = pods2 pods = pods2
if len(pods.Items) < int(*set.Spec.Replicas) { if len(pods.Items) < int(*set.Spec.Replicas) {
@ -132,7 +132,7 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
set.Namespace, set.Namespace,
set.Name, set.Name,
) )
e2esset.SortStatefulPods(pods) e2estatefulset.SortStatefulPods(pods)
for i := range pods.Items { for i := range pods.Items {
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision { if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s", framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
@ -151,5 +151,5 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
// waitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready. // waitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
func waitForRunningAndNotReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) { func waitForRunningAndNotReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) {
e2esset.WaitForRunning(c, numStatefulPods, 0, ss) e2estatefulset.WaitForRunning(c, numStatefulPods, 0, ss)
} }

View File

@ -36,8 +36,8 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/utils" "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -204,7 +204,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() { ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() {
podLabels := map[string]string{"name": "audit-deployment-pod"} podLabels := map[string]string{"name": "audit-deployment-pod"}
d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "agnhost", imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RecreateDeploymentStrategyType) d := e2edeployment.NewDeployment("audit-deployment", int32(1), podLabels, "agnhost", imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RecreateDeploymentStrategyType)
_, err := f.ClientSet.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) _, err := f.ClientSet.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create audit-deployment") framework.ExpectNoError(err, "failed to create audit-deployment")
@ -656,7 +656,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
// test authorizer annotations, RBAC is required. // test authorizer annotations, RBAC is required.
ginkgo.It("should audit API calls to get a pod with unauthorized user.", func() { ginkgo.It("should audit API calls to get a pod with unauthorized user.", func() {
if !auth.IsRBACEnabled(f.ClientSet.RbacV1()) { if !e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) {
e2eskipper.Skipf("RBAC not enabled.") e2eskipper.Skipf("RBAC not enabled.")
} }

View File

@ -36,7 +36,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/utils" "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -347,7 +347,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
}, },
} }
if auth.IsRBACEnabled(f.ClientSet.RbacV1()) { if e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) {
testCases = append(testCases, annotationTestCases...) testCases = append(testCases, annotationTestCases...)
} }
expectedEvents := []utils.AuditEvent{} expectedEvents := []utils.AuditEvent{}

View File

@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp" "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -56,7 +56,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
if !framework.IsPodSecurityPolicyEnabled(f.ClientSet) { if !framework.IsPodSecurityPolicyEnabled(f.ClientSet) {
e2eskipper.Skipf("PodSecurityPolicy not enabled") e2eskipper.Skipf("PodSecurityPolicy not enabled")
} }
if !auth.IsRBACEnabled(f.ClientSet.RbacV1()) { if !e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) {
e2eskipper.Skipf("RBAC not enabled") e2eskipper.Skipf("RBAC not enabled")
} }
ns = f.Namespace.Name ns = f.Namespace.Name
@ -72,7 +72,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Binding the edit role to the default SA") ginkgo.By("Binding the edit role to the default SA")
err = auth.BindClusterRole(f.ClientSet.RbacV1(), "edit", ns, err = e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "edit", ns,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns, Name: "default"}) rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns, Name: "default"})
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@ -233,14 +233,14 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu
framework.ExpectNoError(err, "Failed to create PSP role") framework.ExpectNoError(err, "Failed to create PSP role")
// Bind the role to the namespace. // Bind the role to the namespace.
err = auth.BindRoleInNamespace(f.ClientSet.RbacV1(), name, ns, rbacv1.Subject{ err = e2eauth.BindRoleInNamespace(f.ClientSet.RbacV1(), name, ns, rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind, Kind: rbacv1.ServiceAccountKind,
Namespace: ns, Namespace: ns,
Name: "default", Name: "default",
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(auth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1(), framework.ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
serviceaccount.MakeUsername(ns, "default"), ns, "use", name, serviceaccount.MakeUsername(ns, "default"), ns, "use", name,
schema.GroupResource{Group: "policy", Resource: "podsecuritypolicies"}, true)) schema.GroupResource{Group: "policy", Resource: "podsecuritypolicies"}, true))

View File

@ -35,8 +35,8 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/chaosmonkey" "k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config" e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
@ -48,8 +48,8 @@ import (
) )
var ( var (
upgradeTarget = config.Flags.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.") upgradeTarget = e2econfig.Flags.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
upgradeImage = config.Flags.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.") upgradeImage = e2econfig.Flags.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
) )
var upgradeTests = []upgrades.Test{ var upgradeTests = []upgrades.Test{
@ -408,7 +408,7 @@ func finalizeUpgradeTest(start time.Time, tc *junit.TestCase) {
} }
switch r := r.(type) { switch r := r.(type) {
case ginkgowrapper.FailurePanic: case e2eginkgowrapper.FailurePanic:
tc.Failures = []*junit.Failure{ tc.Failures = []*junit.Failure{
{ {
Message: r.Message, Message: r.Message,

View File

@ -50,7 +50,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
@ -77,10 +77,10 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
ginkgo.Describe("NFSv4", func() { ginkgo.Describe("NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func() { ginkgo.It("should be mountable for NFSv4", func() {
config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{}) config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer volume.TestServerCleanup(f, config) defer e2evolume.TestServerCleanup(f, config)
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: v1.VolumeSource{ Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{ NFS: &v1.NFSVolumeSource{
@ -95,16 +95,16 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
} }
// Must match content of test/images/volumes-tester/nfs/index.html // Must match content of test/images/volumes-tester/nfs/index.html
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
}) })
}) })
ginkgo.Describe("NFSv3", func() { ginkgo.Describe("NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func() { ginkgo.It("should be mountable for NFSv3", func() {
config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{}) config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer volume.TestServerCleanup(f, config) defer e2evolume.TestServerCleanup(f, config)
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: v1.VolumeSource{ Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{ NFS: &v1.NFSVolumeSource{
@ -118,7 +118,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
}, },
} }
// Must match content of test/images/volume-tester/nfs/index.html // Must match content of test/images/volume-tester/nfs/index.html
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
}) })
}) })
@ -128,15 +128,15 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
ginkgo.Describe("GlusterFS", func() { ginkgo.Describe("GlusterFS", func() {
ginkgo.It("should be mountable", func() { ginkgo.It("should be mountable", func() {
// create gluster server and endpoints // create gluster server and endpoints
config, _, _ := volume.NewGlusterfsServer(c, namespace.Name) config, _, _ := e2evolume.NewGlusterfsServer(c, namespace.Name)
name := config.Prefix + "-server" name := config.Prefix + "-server"
defer func() { defer func() {
volume.TestServerCleanup(f, config) e2evolume.TestServerCleanup(f, config)
err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "defer: Gluster delete endpoints failed") framework.ExpectNoError(err, "defer: Gluster delete endpoints failed")
}() }()
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: v1.VolumeSource{ Volume: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{ Glusterfs: &v1.GlusterfsVolumeSource{
@ -151,7 +151,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
ExpectedContent: "Hello from GlusterFS!", ExpectedContent: "Hello from GlusterFS!",
}, },
} }
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
}) })
}) })
}) })

View File

@ -57,7 +57,7 @@ import (
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -445,10 +445,10 @@ func NewIngressTestJig(c clientset.Interface) *TestJig {
func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[string]string, svcAnnotations map[string]string) { func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[string]string, svcAnnotations map[string]string) {
var err error var err error
read := func(file string) string { read := func(file string) string {
return string(testfiles.ReadOrDie(filepath.Join(manifestPath, file))) return string(e2etestfiles.ReadOrDie(filepath.Join(manifestPath, file)))
} }
exists := func(file string) bool { exists := func(file string) bool {
return testfiles.Exists(filepath.Join(manifestPath, file)) return e2etestfiles.Exists(filepath.Join(manifestPath, file))
} }
j.Logger.Infof("creating replication controller") j.Logger.Infof("creating replication controller")
@ -499,7 +499,7 @@ func marshalToYaml(obj runtime.Object, gv schema.GroupVersion) ([]byte, error) {
// ingressFromManifest reads a .json/yaml file and returns the ingress in it. // ingressFromManifest reads a .json/yaml file and returns the ingress in it.
func ingressFromManifest(fileName string) (*networkingv1beta1.Ingress, error) { func ingressFromManifest(fileName string) (*networkingv1beta1.Ingress, error) {
var ing networkingv1beta1.Ingress var ing networkingv1beta1.Ingress
data, err := testfiles.Read(fileName) data, err := e2etestfiles.Read(fileName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1008,7 +1008,7 @@ func (cont *NginxIngressController) Init() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
read := func(file string) string { read := func(file string) string {
return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file))) return string(e2etestfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file)))
} }
framework.Logf("initializing nginx ingress controller") framework.Logf("initializing nginx ingress controller")
framework.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) framework.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns))

View File

@ -26,7 +26,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245) // TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
) )
func nowStamp() string { func nowStamp() string {
@ -53,7 +53,7 @@ func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
skip := offset + 1 skip := offset + 1
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgowrapper.Fail(nowStamp()+": "+msg, skip) e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
} }
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs // Fail is a replacement for ginkgo.Fail which logs the problem as it occurs
@ -64,7 +64,7 @@ func Fail(msg string, callerSkip ...int) {
skip += callerSkip[0] skip += callerSkip[0]
} }
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgowrapper.Fail(nowStamp()+": "+msg, skip) e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
} }
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`) var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`)

View File

@ -24,7 +24,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
) )
func nowStamp() string { func nowStamp() string {
@ -50,5 +50,5 @@ func Failf(format string, args ...interface{}) {
func FailfWithOffset(offset int, format string, args ...interface{}) { func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
log("FAIL", msg) log("FAIL", msg)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset) e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
} }

View File

@ -33,7 +33,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245) // TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
) )
const ( const (
@ -128,7 +128,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
} }
if auth.IsRBACEnabled(kubeClient.RbacV1()) { if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
// Create the Role to bind it to the namespace. // Create the Role to bind it to the namespace.
_, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{ _, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged}, ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
@ -145,10 +145,10 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
} }
}) })
if auth.IsRBACEnabled(kubeClient.RbacV1()) { if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s", ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
podSecurityPolicyPrivileged, namespace)) podSecurityPolicyPrivileged, namespace))
err := auth.BindClusterRoleInNamespace(kubeClient.RbacV1(), err := e2eauth.BindClusterRoleInNamespace(kubeClient.RbacV1(),
podSecurityPolicyPrivileged, podSecurityPolicyPrivileged,
namespace, namespace,
rbacv1.Subject{ rbacv1.Subject{
@ -157,7 +157,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
Name: "default", Name: "default",
}) })
ExpectNoError(err) ExpectNoError(err)
ExpectNoError(auth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(), ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged, serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true)) schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
} }

View File

@ -25,7 +25,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config" e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -37,7 +37,7 @@ var loggingSoak struct {
Scale int `default:"1" usage:"number of waves of pods"` Scale int `default:"1" usage:"number of waves of pods"`
TimeBetweenWaves time.Duration `default:"5000ms" usage:"time to wait before dumping the next wave of pods"` TimeBetweenWaves time.Duration `default:"5000ms" usage:"time to wait before dumping the next wave of pods"`
} }
var _ = config.AddOptions(&loggingSoak, "instrumentation.logging.soak") var _ = e2econfig.AddOptions(&loggingSoak, "instrumentation.logging.soak")
var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() { var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() {

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"k8s.io/kubernetes/test/e2e/scheduling" "k8s.io/kubernetes/test/e2e/scheduling"
@ -93,7 +93,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
Args: []string{"nvidia-smi && sleep infinity"}, Args: []string{"nvidia-smi && sleep infinity"},
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{ Limits: v1.ResourceList{
gpu.NVIDIAGPUResourceName: *resource.NewQuantity(1, resource.DecimalSI), e2egpu.NVIDIAGPUResourceName: *resource.NewQuantity(1, resource.DecimalSI),
}, },
}, },
}, },

View File

@ -23,7 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
@ -34,13 +34,13 @@ import (
var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
f := framework.NewDefaultFramework("metrics-grabber") f := framework.NewDefaultFramework("metrics-grabber")
var c, ec clientset.Interface var c, ec clientset.Interface
var grabber *metrics.Grabber var grabber *e2emetrics.Grabber
gin.BeforeEach(func() { gin.BeforeEach(func() {
var err error var err error
c = f.ClientSet c = f.ClientSet
ec = f.KubemarkExternalClusterClientSet ec = f.KubemarkExternalClusterClientSet
framework.ExpectNoError(err) framework.ExpectNoError(err)
grabber, err = metrics.NewMetricsGrabber(c, ec, true, true, true, true, true) grabber, err = e2emetrics.NewMetricsGrabber(c, ec, true, true, true, true, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })

View File

@ -62,12 +62,12 @@ import (
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
commonutils "k8s.io/kubernetes/test/e2e/common" commonutils "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/scheduling" "k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/utils/crd" "k8s.io/kubernetes/test/utils/crd"
@ -208,7 +208,7 @@ func assertCleanup(ns string, selectors ...string) {
} }
func readTestFileOrDie(file string) []byte { func readTestFileOrDie(file string) []byte {
return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file)) return e2etestfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file))
} }
func runKubectlRetryOrDie(ns string, args ...string) string { func runKubectlRetryOrDie(ns string, args ...string) string {
@ -300,7 +300,7 @@ var _ = SIGDescribe("Kubectl client", func() {
var nautilus string var nautilus string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo" updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
nautilus = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in")))) nautilus = commonutils.SubstituteImageName(string(e2etestfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"))))
}) })
/* /*
Release : v1.9 Release : v1.9
@ -348,7 +348,7 @@ var _ = SIGDescribe("Kubectl client", func() {
"agnhost-master-deployment.yaml.in", "agnhost-master-deployment.yaml.in",
"agnhost-slave-deployment.yaml.in", "agnhost-slave-deployment.yaml.in",
} { } {
contents := commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile)))) contents := commonutils.SubstituteImageName(string(e2etestfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile))))
run(contents) run(contents)
} }
} }
@ -621,11 +621,11 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.It("should handle in-cluster config", func() { ginkgo.It("should handle in-cluster config", func() {
ginkgo.By("adding rbac permissions") ginkgo.By("adding rbac permissions")
// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace // grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
err := auth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name, err := e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = auth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(), err = e2eauth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"), serviceaccount.MakeUsername(f.Namespace.Name, "default"),
f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true) f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -24,13 +24,13 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
utilyaml "k8s.io/apimachinery/pkg/util/yaml" utilyaml "k8s.io/apimachinery/pkg/util/yaml"
scheme "k8s.io/client-go/kubernetes/scheme" scheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
) )
// PodFromManifest reads a .json/yaml file and returns the pod in it. // PodFromManifest reads a .json/yaml file and returns the pod in it.
func PodFromManifest(filename string) (*v1.Pod, error) { func PodFromManifest(filename string) (*v1.Pod, error) {
var pod v1.Pod var pod v1.Pod
data, err := testfiles.Read(filename) data, err := e2etestfiles.Read(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -48,7 +48,7 @@ func PodFromManifest(filename string) (*v1.Pod, error) {
// RcFromManifest reads a .json/yaml file and returns the rc in it. // RcFromManifest reads a .json/yaml file and returns the rc in it.
func RcFromManifest(fileName string) (*v1.ReplicationController, error) { func RcFromManifest(fileName string) (*v1.ReplicationController, error) {
var controller v1.ReplicationController var controller v1.ReplicationController
data, err := testfiles.Read(fileName) data, err := e2etestfiles.Read(fileName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -66,7 +66,7 @@ func RcFromManifest(fileName string) (*v1.ReplicationController, error) {
// SvcFromManifest reads a .json/yaml file and returns the service in it. // SvcFromManifest reads a .json/yaml file and returns the service in it.
func SvcFromManifest(fileName string) (*v1.Service, error) { func SvcFromManifest(fileName string) (*v1.Service, error) {
var svc v1.Service var svc v1.Service
data, err := testfiles.Read(fileName) data, err := e2etestfiles.Read(fileName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -84,7 +84,7 @@ func SvcFromManifest(fileName string) (*v1.Service, error) {
// StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns. // StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns.
func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) { func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) {
var ss appsv1.StatefulSet var ss appsv1.StatefulSet
data, err := testfiles.Read(fileName) data, err := e2etestfiles.Read(fileName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -108,7 +108,7 @@ func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) {
// DaemonSetFromManifest returns a DaemonSet from a manifest stored in fileName in the Namespace indicated by ns. // DaemonSetFromManifest returns a DaemonSet from a manifest stored in fileName in the Namespace indicated by ns.
func DaemonSetFromManifest(fileName, ns string) (*appsv1.DaemonSet, error) { func DaemonSetFromManifest(fileName, ns string) (*appsv1.DaemonSet, error) {
var ds appsv1.DaemonSet var ds appsv1.DaemonSet
data, err := testfiles.Read(fileName) data, err := e2etestfiles.Read(fileName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -128,7 +128,7 @@ func DaemonSetFromManifest(fileName, ns string) (*appsv1.DaemonSet, error) {
// RoleFromManifest returns a Role from a manifest stored in fileName in the Namespace indicated by ns. // RoleFromManifest returns a Role from a manifest stored in fileName in the Namespace indicated by ns.
func RoleFromManifest(fileName, ns string) (*rbacv1.Role, error) { func RoleFromManifest(fileName, ns string) (*rbacv1.Role, error) {
var role rbacv1.Role var role rbacv1.Role
data, err := testfiles.Read(fileName) data, err := e2etestfiles.Read(fileName)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -136,7 +136,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
replicas := int32(len(nodeList.Items)) replicas := int32(len(nodeList.Items))
serverDeploymentSpec := e2edeploy.NewDeployment(serverDeploymentName, serverDeploymentSpec := e2edeployment.NewDeployment(serverDeploymentName,
replicas, replicas,
map[string]string{"test": "dual-stack-server"}, map[string]string{"test": "dual-stack-server"},
"dualstack-test-server", "dualstack-test-server",
@ -165,7 +165,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
}, },
} }
clientDeploymentSpec := e2edeploy.NewDeployment(clientDeploymentName, clientDeploymentSpec := e2edeployment.NewDeployment(clientDeploymentName,
replicas, replicas,
map[string]string{"test": "dual-stack-client"}, map[string]string{"test": "dual-stack-client"},
"dualstack-test-client", "dualstack-test-client",
@ -198,15 +198,15 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), clientDeploymentSpec, metav1.CreateOptions{}) clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), clientDeploymentSpec, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(cs, serverDeployment) err = e2edeployment.WaitForDeploymentComplete(cs, serverDeployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(cs, clientDeployment) err = e2edeployment.WaitForDeploymentComplete(cs, clientDeployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
serverPods, err := e2edeploy.GetPodsForDeployment(cs, serverDeployment) serverPods, err := e2edeployment.GetPodsForDeployment(cs, serverDeployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
clientPods, err := e2edeploy.GetPodsForDeployment(cs, clientDeployment) clientPods, err := e2edeployment.GetPodsForDeployment(cs, clientDeployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
assertNetworkConnectivity(f, *serverPods, *clientPods, "dualstack-test-client", "80") assertNetworkConnectivity(f, *serverPods, *clientPods, "dualstack-test-client", "80")

View File

@ -36,8 +36,8 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
"k8s.io/kubernetes/test/e2e/framework/ingress" e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -54,22 +54,22 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
var ( var (
ns string ns string
jig *ingress.TestJig jig *e2eingress.TestJig
conformanceTests []ingress.ConformanceTests conformanceTests []e2eingress.ConformanceTests
) )
f := framework.NewDefaultFramework("ingress") f := framework.NewDefaultFramework("ingress")
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
jig = ingress.NewIngressTestJig(f.ClientSet) jig = e2eingress.NewIngressTestJig(f.ClientSet)
ns = f.Namespace.Name ns = f.Namespace.Name
// this test wants powerful permissions. Since the namespace names are unique, we can leave this // this test wants powerful permissions. Since the namespace names are unique, we can leave this
// lying around so we don't have to race any caches // lying around so we don't have to race any caches
err := auth.BindClusterRole(jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name, err := e2eauth.BindClusterRole(jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = auth.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1(), err = e2eauth.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"), serviceaccount.MakeUsername(f.Namespace.Name, "default"),
"", "create", schema.GroupResource{Resource: "pods"}, true) "", "create", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -116,7 +116,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}) })
ginkgo.It("should conform to Ingress spec", func() { ginkgo.It("should conform to Ingress spec", func() {
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests { for _, t := range conformanceTests {
ginkgo.By(t.EntryLog) ginkgo.By(t.EntryLog)
t.Execute() t.Execute()
@ -131,8 +131,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should support multiple TLS certs", func() { ginkgo.It("should support multiple TLS certs", func() {
ginkgo.By("Creating an ingress with no certs.") ginkgo.By("Creating an ingress with no certs.")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{ jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{
ingress.IngressStaticIPKey: ns, e2eingress.IngressStaticIPKey: ns,
}, map[string]string{}) }, map[string]string{})
ginkgo.By("Adding multiple certs to the ingress.") ginkgo.By("Adding multiple certs to the ingress.")
@ -167,8 +167,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("multicluster ingress should get instance group annotation", func() { ginkgo.It("multicluster ingress should get instance group annotation", func() {
name := "echomap" name := "echomap"
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{ jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "http"), ns, map[string]string{
ingress.IngressClassKey: ingress.MulticlusterIngressClassValue, e2eingress.IngressClassKey: e2eingress.MulticlusterIngressClassValue,
}, map[string]string{}) }, map[string]string{})
ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
@ -186,13 +186,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc. // Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc.
// Note: All resources except the firewall rule have an annotation. // Note: All resources except the firewall rule have an annotation.
umKey := ingress.StatusPrefix + "/url-map" umKey := e2eingress.StatusPrefix + "/url-map"
fwKey := ingress.StatusPrefix + "/forwarding-rule" fwKey := e2eingress.StatusPrefix + "/forwarding-rule"
tpKey := ingress.StatusPrefix + "/target-proxy" tpKey := e2eingress.StatusPrefix + "/target-proxy"
fwsKey := ingress.StatusPrefix + "/https-forwarding-rule" fwsKey := e2eingress.StatusPrefix + "/https-forwarding-rule"
tpsKey := ingress.StatusPrefix + "/https-target-proxy" tpsKey := e2eingress.StatusPrefix + "/https-target-proxy"
scKey := ingress.StatusPrefix + "/ssl-cert" scKey := e2eingress.StatusPrefix + "/ssl-cert"
beKey := ingress.StatusPrefix + "/backends" beKey := e2eingress.StatusPrefix + "/backends"
wait.Poll(2*time.Second, time.Minute, func() (bool, error) { wait.Poll(2*time.Second, time.Minute, func() (bool, error) {
ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{}) ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -272,8 +272,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should conform to Ingress spec", func() { ginkgo.It("should conform to Ingress spec", func() {
jig.PollInterval = 5 * time.Second jig.PollInterval = 5 * time.Second
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{
ingress.NEGAnnotation: `{"ingress": true}`, e2eingress.NEGAnnotation: `{"ingress": true}`,
}) })
for _, t := range conformanceTests { for _, t := range conformanceTests {
ginkgo.By(t.EntryLog) ginkgo.By(t.EntryLog)
@ -288,7 +288,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should be able to switch between IG and NEG modes", func() { ginkgo.It("should be able to switch between IG and NEG modes", func() {
var err error var err error
ginkgo.By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
err = gceController.WaitForNegBackendService(jig.GetServicePorts(false)) err = gceController.WaitForNegBackendService(jig.GetServicePorts(false))
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -297,7 +297,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}` svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": false}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -315,7 +315,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}` svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": true}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -332,7 +332,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should be able to create a ClusterIP service", func() { ginkgo.It("should be able to create a ClusterIP service", func() {
ginkgo.By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
svcPorts := jig.GetServicePorts(false) svcPorts := jig.GetServicePorts(false)
err := gceController.WaitForNegBackendService(svcPorts) err := gceController.WaitForNegBackendService(svcPorts)
@ -367,7 +367,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
} }
ginkgo.By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
jig.WaitForIngressToStable() jig.WaitForIngressToStable()
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
@ -392,7 +392,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
name := "hostname" name := "hostname"
replicas := 8 replicas := 8
ginkgo.By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
jig.WaitForIngressToStable() jig.WaitForIngressToStable()
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
@ -459,11 +459,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
var status ingress.NegStatus var status e2eingress.NegStatus
v, ok := svc.Annotations[ingress.NEGStatusAnnotation] v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
if !ok { if !ok {
// Wait for NEG sync loop to find NEGs // Wait for NEG sync loop to find NEGs
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations) framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
return false, nil return false, nil
} }
err = json.Unmarshal([]byte(v), &status) err = json.Unmarshal([]byte(v), &status)
@ -471,7 +471,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
framework.Logf("Error in parsing Expose NEG annotation: %v", err) framework.Logf("Error in parsing Expose NEG annotation: %v", err)
return false, nil return false, nil
} }
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v) framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
// Expect 2 NEGs to be created based on the test setup (neg-exposed) // Expect 2 NEGs to be created based on the test setup (neg-exposed)
if len(status.NetworkEndpointGroups) != 2 { if len(status.NetworkEndpointGroups) != 2 {
@ -506,7 +506,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
} }
ginkgo.By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false)) err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -528,7 +528,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() { ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() {
ginkgo.By("Create a basic HTTP ingress using standalone NEG") ginkgo.By("Create a basic HTTP ingress using standalone NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
name := "hostname" name := "hostname"
@ -539,7 +539,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -550,7 +550,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -561,7 +561,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -572,7 +572,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
delete(svc.Annotations, ingress.NEGAnnotation) delete(svc.Annotations, e2eingress.NEGAnnotation)
// Service cannot be ClusterIP if it's using Instance Groups. // Service cannot be ClusterIP if it's using Instance Groups.
svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.Type = v1.ServiceTypeNodePort
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{}) _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
@ -589,7 +589,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Platform specific setup // Platform specific setup
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessProviderIs("gce", "gke")
jig.Class = ingress.MulticlusterIngressClassValue jig.Class = e2eingress.MulticlusterIngressClassValue
jig.PollInterval = 5 * time.Second jig.PollInterval = 5 * time.Second
ginkgo.By("Initializing gce controller") ginkgo.By("Initializing gce controller")
gceController = &gce.IngressController{ gceController = &gce.IngressController{
@ -626,8 +626,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}) })
ginkgo.It("should conform to Ingress spec", func() { ginkgo.It("should conform to Ingress spec", func() {
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{
ingress.IngressStaticIPKey: ipName, e2eingress.IngressStaticIPKey: ipName,
}) })
for _, t := range conformanceTests { for _, t := range conformanceTests {
ginkgo.By(t.EntryLog) ginkgo.By(t.EntryLog)
@ -651,9 +651,9 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should remove clusters as expected", func() { ginkgo.It("should remove clusters as expected", func() {
ingAnnotations := map[string]string{ ingAnnotations := map[string]string{
ingress.IngressStaticIPKey: ipName, e2eingress.IngressStaticIPKey: ipName,
} }
ingFilePath := filepath.Join(ingress.IngressManifestPath, "http") ingFilePath := filepath.Join(e2eingress.IngressManifestPath, "http")
jig.CreateIngress(ingFilePath, ns, ingAnnotations, map[string]string{}) jig.CreateIngress(ingFilePath, ns, ingAnnotations, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/) jig.WaitForIngress(false /*waitForNodePort*/)
name := jig.Ingress.Name name := jig.Ingress.Name
@ -681,7 +681,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("single and multi-cluster ingresses should be able to exist together", func() { ginkgo.It("single and multi-cluster ingresses should be able to exist together", func() {
ginkgo.By("Creating a single cluster ingress first") ginkgo.By("Creating a single cluster ingress first")
jig.Class = "" jig.Class = ""
singleIngFilePath := filepath.Join(ingress.GCEIngressManifestPath, "static-ip-2") singleIngFilePath := filepath.Join(e2eingress.GCEIngressManifestPath, "static-ip-2")
jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{}) jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/) jig.WaitForIngress(false /*waitForNodePort*/)
// jig.Ingress will be overwritten when we create MCI, so keep a reference. // jig.Ingress will be overwritten when we create MCI, so keep a reference.
@ -689,11 +689,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Create the multi-cluster ingress next. // Create the multi-cluster ingress next.
ginkgo.By("Creating a multi-cluster ingress next") ginkgo.By("Creating a multi-cluster ingress next")
jig.Class = ingress.MulticlusterIngressClassValue jig.Class = e2eingress.MulticlusterIngressClassValue
ingAnnotations := map[string]string{ ingAnnotations := map[string]string{
ingress.IngressStaticIPKey: ipName, e2eingress.IngressStaticIPKey: ipName,
} }
multiIngFilePath := filepath.Join(ingress.IngressManifestPath, "http") multiIngFilePath := filepath.Join(e2eingress.IngressManifestPath, "http")
jig.CreateIngress(multiIngFilePath, ns, ingAnnotations, map[string]string{}) jig.CreateIngress(multiIngFilePath, ns, ingAnnotations, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/) jig.WaitForIngress(false /*waitForNodePort*/)
mciIngress := jig.Ingress mciIngress := jig.Ingress
@ -703,7 +703,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
jig.Class = "" jig.Class = ""
jig.TryDeleteIngress() jig.TryDeleteIngress()
jig.Ingress = mciIngress jig.Ingress = mciIngress
jig.Class = ingress.MulticlusterIngressClassValue jig.Class = e2eingress.MulticlusterIngressClassValue
jig.WaitForIngress(false /*waitForNodePort*/) jig.WaitForIngress(false /*waitForNodePort*/)
ginkgo.By("Cleanup: Deleting the multi-cluster ingress") ginkgo.By("Cleanup: Deleting the multi-cluster ingress")
@ -713,13 +713,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Time: borderline 5m, slow by design // Time: borderline 5m, slow by design
ginkgo.Describe("[Slow] Nginx", func() { ginkgo.Describe("[Slow] Nginx", func() {
var nginxController *ingress.NginxIngressController var nginxController *e2eingress.NginxIngressController
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessProviderIs("gce", "gke")
ginkgo.By("Initializing nginx controller") ginkgo.By("Initializing nginx controller")
jig.Class = "nginx" jig.Class = "nginx"
nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client} nginxController = &e2eingress.NginxIngressController{Ns: ns, Client: jig.Client}
// TODO: This test may fail on other platforms. We can simply skip it // TODO: This test may fail on other platforms. We can simply skip it
// but we want to allow easy testing where a user might've hand // but we want to allow easy testing where a user might've hand
@ -753,7 +753,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Poll more frequently to reduce e2e completion time. // Poll more frequently to reduce e2e completion time.
// This test runs in presubmit. // This test runs in presubmit.
jig.PollInterval = 5 * time.Second jig.PollInterval = 5 * time.Second
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests { for _, t := range conformanceTests {
ginkgo.By(t.EntryLog) ginkgo.By(t.EntryLog)
t.Execute() t.Execute()
@ -775,11 +775,11 @@ func verifyKubemciStatusHas(name, expectedSubStr string) {
} }
} }
func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { func executePresharedCertTest(f *framework.Framework, jig *e2eingress.TestJig, staticIPName string) {
preSharedCertName := "test-pre-shared-cert" preSharedCertName := "test-pre-shared-cert"
ginkgo.By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) ginkgo.By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
testHostname := "test.ingress.com" testHostname := "test.ingress.com"
cert, key, err := ingress.GenerateRSACerts(testHostname, true) cert, key, err := e2eingress.GenerateRSACerts(testHostname, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gceCloud, err := gce.GetGCECloud() gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -811,36 +811,36 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
ginkgo.By("Creating an ingress referencing the pre-shared certificate") ginkgo.By("Creating an ingress referencing the pre-shared certificate")
// Create an ingress referencing this cert using pre-shared-cert annotation. // Create an ingress referencing this cert using pre-shared-cert annotation.
ingAnnotations := map[string]string{ ingAnnotations := map[string]string{
ingress.IngressPreSharedCertKey: preSharedCertName, e2eingress.IngressPreSharedCertKey: preSharedCertName,
// Disallow HTTP to save resources. This is irrelevant to the // Disallow HTTP to save resources. This is irrelevant to the
// pre-shared cert test. // pre-shared cert test.
ingress.IngressAllowHTTPKey: "false", e2eingress.IngressAllowHTTPKey: "false",
} }
if staticIPName != "" { if staticIPName != "" {
ingAnnotations[ingress.IngressStaticIPKey] = staticIPName ingAnnotations[e2eingress.IngressStaticIPKey] = staticIPName
} }
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{}) jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
ginkgo.By("Test that ingress works with the pre-shared certificate") ginkgo.By("Test that ingress works with the pre-shared certificate")
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert) err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
} }
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) { func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *e2eingress.TestJig, ipName, ip string) {
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{ jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{
ingress.IngressStaticIPKey: ipName, e2eingress.IngressStaticIPKey: ipName,
ingress.IngressAllowHTTPKey: "false", e2eingress.IngressAllowHTTPKey: "false",
}, map[string]string{}) }, map[string]string{})
ginkgo.By("waiting for Ingress to come up with ip: " + ip) ginkgo.By("waiting for Ingress to come up with ip: " + ip)
httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout) httpClient := e2eingress.BuildInsecureClient(e2eingress.IngressReqTimeout)
framework.ExpectNoError(ingress.PollURL(fmt.Sprintf("https://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false)) framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("https://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
ginkgo.By("should reject HTTP traffic") ginkgo.By("should reject HTTP traffic")
framework.ExpectNoError(ingress.PollURL(fmt.Sprintf("http://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("http://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
} }
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *e2eingress.TestJig, staticIPName string) {
ginkgo.By("Creating a set of ingress, service and deployment that have backside re-encryption configured") ginkgo.By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName) deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName)
defer func() { defer func() {
@ -856,9 +856,9 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP") framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP")
ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} timeoutClient := &http.Client{Timeout: e2eingress.IngressReqTimeout}
err = wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { err = wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
resp, err := ingress.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "") resp, err := e2eingress.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
if err != nil { if err != nil {
framework.Logf("SimpleGET failed: %v", err) framework.Logf("SimpleGET failed: %v", err)
return false, nil return false, nil
@ -872,7 +872,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
framework.ExpectNoError(err, "ginkgo.Failed to verify backside re-encryption ingress") framework.ExpectNoError(err, "ginkgo.Failed to verify backside re-encryption ingress")
} }
func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { func detectNegAnnotation(f *framework.Framework, jig *e2eingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) {
if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) { if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
@ -889,10 +889,10 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
return true, nil return true, nil
} }
var status ingress.NegStatus var status e2eingress.NegStatus
v, ok := svc.Annotations[ingress.NEGStatusAnnotation] v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
if !ok { if !ok {
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations) framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
return false, nil return false, nil
} }
@ -901,7 +901,7 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
framework.Logf("Error in parsing Expose NEG annotation: %v", err) framework.Logf("Error in parsing Expose NEG annotation: %v", err)
return false, nil return false, nil
} }
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v) framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
if len(status.NetworkEndpointGroups) != negs { if len(status.NetworkEndpointGroups) != negs {
framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups)) framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))

View File

@ -32,7 +32,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ingress" e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
) )
@ -64,10 +64,10 @@ var (
// IngressScaleFramework defines the framework for ingress scale testing. // IngressScaleFramework defines the framework for ingress scale testing.
type IngressScaleFramework struct { type IngressScaleFramework struct {
Clientset clientset.Interface Clientset clientset.Interface
Jig *ingress.TestJig Jig *e2eingress.TestJig
GCEController *gce.IngressController GCEController *gce.IngressController
CloudConfig framework.CloudConfig CloudConfig framework.CloudConfig
Logger ingress.TestLogger Logger e2eingress.TestLogger
Namespace string Namespace string
EnableTLS bool EnableTLS bool
@ -97,7 +97,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra
Namespace: ns, Namespace: ns,
Clientset: cs, Clientset: cs,
CloudConfig: cloudConfig, CloudConfig: cloudConfig,
Logger: &ingress.E2ELogger{}, Logger: &e2eingress.E2ELogger{},
EnableTLS: true, EnableTLS: true,
NumIngressesTest: []int{ NumIngressesTest: []int{
numIngressesSmall, numIngressesSmall,
@ -111,7 +111,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra
// PrepareScaleTest prepares framework for ingress scale testing. // PrepareScaleTest prepares framework for ingress scale testing.
func (f *IngressScaleFramework) PrepareScaleTest() error { func (f *IngressScaleFramework) PrepareScaleTest() error {
f.Logger.Infof("Initializing ingress test suite and gce controller...") f.Logger.Infof("Initializing ingress test suite and gce controller...")
f.Jig = ingress.NewIngressTestJig(f.Clientset) f.Jig = e2eingress.NewIngressTestJig(f.Clientset)
f.Jig.Logger = f.Logger f.Jig.Logger = f.Logger
f.Jig.PollInterval = scaleTestPollInterval f.Jig.PollInterval = scaleTestPollInterval
f.GCEController = &gce.IngressController{ f.GCEController = &gce.IngressController{

View File

@ -34,7 +34,7 @@ import (
gcecloud "k8s.io/legacy-cloud-providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ingress" e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
"k8s.io/kubernetes/test/e2e/network/scale" "k8s.io/kubernetes/test/e2e/network/scale"
) )
@ -153,7 +153,7 @@ func main() {
// Setting up a localized scale test framework. // Setting up a localized scale test framework.
f := scale.NewIngressScaleFramework(cs, ns.Name, cloudConfig) f := scale.NewIngressScaleFramework(cs, ns.Name, cloudConfig)
f.Logger = &ingress.GLogger{} f.Logger = &e2eingress.GLogger{}
// Customizing scale test. // Customizing scale test.
f.EnableTLS = enableTLS f.EnableTLS = enableTLS
f.OutputFile = outputFile f.OutputFile = outputFile

View File

@ -45,7 +45,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -931,7 +931,7 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
}() }()
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
deployment, err = cs.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) deployment, err = cs.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Error in retrieving pause pod deployment") framework.ExpectNoError(err, "Error in retrieving pause pod deployment")
@ -2992,7 +2992,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state") ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state")
deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1) deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1)
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
defer func() { defer func() {
framework.Logf("Deleting deployment") framework.Logf("Deleting deployment")
@ -3360,7 +3360,7 @@ func createAndGetExternalServiceFQDN(cs clientset.Interface, ns, serviceName str
func createPausePodDeployment(cs clientset.Interface, name, ns string, replicas int) *appsv1.Deployment { func createPausePodDeployment(cs clientset.Interface, name, ns string, replicas int) *appsv1.Deployment {
labels := map[string]string{"deployment": "agnhost-pause"} labels := map[string]string{"deployment": "agnhost-pause"}
pauseDeployment := e2edeploy.NewDeployment(name, int32(replicas), labels, "", "", appsv1.RollingUpdateDeploymentStrategyType) pauseDeployment := e2edeployment.NewDeployment(name, int32(replicas), labels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
pauseDeployment.Spec.Template.Spec.Containers[0] = v1.Container{ pauseDeployment.Spec.Template.Spec.Containers[0] = v1.Container{
Name: "agnhost-pause", Name: "agnhost-pause",

View File

@ -36,7 +36,7 @@ import (
e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -418,7 +418,7 @@ var _ = SIGDescribe("kubelet", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
_, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) _, nfsServerPod, nfsIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {

View File

@ -28,7 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2ejob "k8s.io/kubernetes/test/e2e/framework/job" e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -133,7 +133,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
} else { } else {
dsYamlURL = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml" dsYamlURL = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
} }
gpuResourceName = gpu.NVIDIAGPUResourceName gpuResourceName = e2egpu.NVIDIAGPUResourceName
framework.Logf("Using %v", dsYamlURL) framework.Logf("Using %v", dsYamlURL)
// Creates the DaemonSet that installs Nvidia Drivers. // Creates the DaemonSet that installs Nvidia Drivers.

View File

@ -40,7 +40,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/replicaset" e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -686,7 +686,7 @@ func createPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSe
func runPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet { func runPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet {
rs := createPauseRS(f, conf) rs := createPauseRS(f, conf)
framework.ExpectNoError(replicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout)) framework.ExpectNoError(e2ereplicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout))
return rs return rs
} }

View File

@ -54,7 +54,7 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -83,7 +83,7 @@ func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]b
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(
"", // Default fsType "", // Default fsType
), ),
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
}, },
Capabilities: capabilities, Capabilities: capabilities,
@ -363,7 +363,7 @@ func InitGcePDCSIDriver() testsuites.TestDriver {
Name: GCEPDCSIDriverName, Name: GCEPDCSIDriverName,
FeatureTag: "[Serial]", FeatureTag: "[Serial]",
MaxFileSize: testpatterns.FileSizeMedium, MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(

View File

@ -54,12 +54,12 @@ import (
"k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -99,7 +99,7 @@ func InitNFSDriver() testsuites.TestDriver {
Name: "nfs", Name: "nfs",
InTreePluginName: "kubernetes.io/nfs", InTreePluginName: "kubernetes.io/nfs",
MaxFileSize: testpatterns.FileSizeLarge, MaxFileSize: testpatterns.FileSizeLarge,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(
@ -124,8 +124,8 @@ func (n *nfsDriver) GetDriverInfo() *testsuites.DriverInfo {
func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
} }
func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
nv, ok := volume.(*nfsVolume) nv, ok := e2evolume.(*nfsVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
return &v1.VolumeSource{ return &v1.VolumeSource{
NFS: &v1.NFSVolumeSource{ NFS: &v1.NFSVolumeSource{
@ -136,8 +136,8 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
} }
} }
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
nv, ok := volume.(*nfsVolume) nv, ok := e2evolume.(*nfsVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
return &v1.PersistentVolumeSource{ return &v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{ NFS: &v1.NFSVolumeSource{
@ -164,11 +164,11 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf
// TODO(mkimuram): cluster-admin gives too much right but system:persistent-volume-provisioner // TODO(mkimuram): cluster-admin gives too much right but system:persistent-volume-provisioner
// is not enough. We should create new clusterrole for testing. // is not enough. We should create new clusterrole for testing.
err := auth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name, err := e2eauth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"}) rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = auth.WaitForAuthorizationUpdate(cs.AuthorizationV1(), err = e2eauth.WaitForAuthorizationUpdate(cs.AuthorizationV1(),
serviceaccount.MakeUsername(ns.Name, "default"), serviceaccount.MakeUsername(ns.Name, "default"),
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization: %v", err) framework.ExpectNoError(err, "Failed to update authorization: %v", err)
@ -199,7 +199,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
case testpatterns.InlineVolume: case testpatterns.InlineVolume:
fallthrough fallthrough
case testpatterns.PreprovisionedPV: case testpatterns.PreprovisionedPV:
c, serverPod, serverIP := volume.NewNFSServer(cs, ns.Name, []string{}) c, serverPod, serverIP := e2evolume.NewNFSServer(cs, ns.Name, []string{})
config.ServerConfig = &c config.ServerConfig = &c
return &nfsVolume{ return &nfsVolume{
serverIP: serverIP, serverIP: serverIP,
@ -241,7 +241,7 @@ func InitGlusterFSDriver() testsuites.TestDriver {
Name: "gluster", Name: "gluster",
InTreePluginName: "kubernetes.io/glusterfs", InTreePluginName: "kubernetes.io/glusterfs",
MaxFileSize: testpatterns.FileSizeMedium, MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(
@ -265,8 +265,8 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern)
e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom") e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
} }
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
gv, ok := volume.(*glusterVolume) gv, ok := e2evolume.(*glusterVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume")
name := gv.prefix + "-server" name := gv.prefix + "-server"
@ -280,8 +280,8 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume t
} }
} }
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gv, ok := volume.(*glusterVolume) gv, ok := e2evolume.(*glusterVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume")
name := gv.prefix + "-server" name := gv.prefix + "-server"
@ -308,7 +308,7 @@ func (g *glusterFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType
cs := f.ClientSet cs := f.ClientSet
ns := f.Namespace ns := f.Namespace
c, serverPod, _ := volume.NewGlusterfsServer(cs, ns.Name) c, serverPod, _ := e2evolume.NewGlusterfsServer(cs, ns.Name)
config.ServerConfig = &c config.ServerConfig = &c
return &glusterVolume{ return &glusterVolume{
prefix: config.Prefix, prefix: config.Prefix,
@ -391,8 +391,8 @@ func (i *iSCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
} }
func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
iv, ok := volume.(*iSCSIVolume) iv, ok := e2evolume.(*iSCSIVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume")
volSource := v1.VolumeSource{ volSource := v1.VolumeSource{
@ -409,8 +409,8 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume tests
return &volSource return &volSource
} }
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
iv, ok := volume.(*iSCSIVolume) iv, ok := e2evolume.(*iSCSIVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume")
pvSource := v1.PersistentVolumeSource{ pvSource := v1.PersistentVolumeSource{
@ -452,10 +452,10 @@ func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
} }
// newISCSIServer is an iSCSI-specific wrapper for CreateStorageServer. // newISCSIServer is an iSCSI-specific wrapper for CreateStorageServer.
func newISCSIServer(cs clientset.Interface, namespace string) (config volume.TestConfig, pod *v1.Pod, ip, iqn string) { func newISCSIServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, ip, iqn string) {
// Generate cluster-wide unique IQN // Generate cluster-wide unique IQN
iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace) iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace)
config = volume.TestConfig{ config = e2evolume.TestConfig{
Namespace: namespace, Namespace: namespace,
Prefix: "iscsi", Prefix: "iscsi",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer), ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
@ -471,15 +471,15 @@ func newISCSIServer(cs clientset.Interface, namespace string) (config volume.Tes
ServerReadyMessage: "iscsi target started", ServerReadyMessage: "iscsi target started",
ServerHostNetwork: true, ServerHostNetwork: true,
} }
pod, ip = volume.CreateStorageServer(cs, config) pod, ip = e2evolume.CreateStorageServer(cs, config)
// Make sure the client runs on the same node as server so we don't need to open any firewalls. // Make sure the client runs on the same node as server so we don't need to open any firewalls.
config.ClientNodeSelection = e2epod.NodeSelection{Name: pod.Spec.NodeName} config.ClientNodeSelection = e2epod.NodeSelection{Name: pod.Spec.NodeName}
return config, pod, ip, iqn return config, pod, ip, iqn
} }
// newRBDServer is a CephRBD-specific wrapper for CreateStorageServer. // newRBDServer is a CephRBD-specific wrapper for CreateStorageServer.
func newRBDServer(cs clientset.Interface, namespace string) (config volume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) { func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
config = volume.TestConfig{ config = e2evolume.TestConfig{
Namespace: namespace, Namespace: namespace,
Prefix: "rbd", Prefix: "rbd",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer), ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
@ -489,7 +489,7 @@ func newRBDServer(cs clientset.Interface, namespace string) (config volume.TestC
}, },
ServerReadyMessage: "Ceph is ready", ServerReadyMessage: "Ceph is ready",
} }
pod, ip = volume.CreateStorageServer(cs, config) pod, ip = e2evolume.CreateStorageServer(cs, config)
// create secrets for the server // create secrets for the server
secret = &v1.Secret{ secret = &v1.Secret{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
@ -543,7 +543,7 @@ func InitRbdDriver() testsuites.TestDriver {
InTreePluginName: "kubernetes.io/rbd", InTreePluginName: "kubernetes.io/rbd",
FeatureTag: "[Feature:Volumes][Serial]", FeatureTag: "[Feature:Volumes][Serial]",
MaxFileSize: testpatterns.FileSizeMedium, MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(
@ -571,8 +571,8 @@ func (r *rbdDriver) GetDriverInfo() *testsuites.DriverInfo {
func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
} }
func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
rv, ok := volume.(*rbdVolume) rv, ok := e2evolume.(*rbdVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume")
volSource := v1.VolumeSource{ volSource := v1.VolumeSource{
@ -593,8 +593,8 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
return &volSource return &volSource
} }
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
rv, ok := volume.(*rbdVolume) rv, ok := e2evolume.(*rbdVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume")
f := rv.f f := rv.f
@ -671,7 +671,7 @@ func InitCephFSDriver() testsuites.TestDriver {
InTreePluginName: "kubernetes.io/cephfs", InTreePluginName: "kubernetes.io/cephfs",
FeatureTag: "[Feature:Volumes][Serial]", FeatureTag: "[Feature:Volumes][Serial]",
MaxFileSize: testpatterns.FileSizeMedium, MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(
@ -694,8 +694,8 @@ func (c *cephFSDriver) GetDriverInfo() *testsuites.DriverInfo {
func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
} }
func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
cv, ok := volume.(*cephVolume) cv, ok := e2evolume.(*cephVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume")
return &v1.VolumeSource{ return &v1.VolumeSource{
@ -710,8 +710,8 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume test
} }
} }
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
cv, ok := volume.(*cephVolume) cv, ok := e2evolume.(*cephVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume")
ns := cv.f.Namespace ns := cv.f.Namespace
@ -793,7 +793,7 @@ func (h *hostPathDriver) GetDriverInfo() *testsuites.DriverInfo {
func (h *hostPathDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (h *hostPathDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
} }
func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
// hostPath doesn't support readOnly volume // hostPath doesn't support readOnly volume
if readOnly { if readOnly {
return nil return nil
@ -868,8 +868,8 @@ func (h *hostPathSymlinkDriver) GetDriverInfo() *testsuites.DriverInfo {
func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
} }
func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
hv, ok := volume.(*hostPathSymlinkVolume) hv, ok := e2evolume.(*hostPathSymlinkVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Hostpath Symlink test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to Hostpath Symlink test volume")
// hostPathSymlink doesn't support readOnly volume // hostPathSymlink doesn't support readOnly volume
@ -1010,7 +1010,7 @@ func (e *emptydirDriver) GetDriverInfo() *testsuites.DriverInfo {
func (e *emptydirDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { func (e *emptydirDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
} }
func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
// emptydir doesn't support readOnly volume // emptydir doesn't support readOnly volume
if readOnly { if readOnly {
return nil return nil
@ -1059,7 +1059,7 @@ func InitCinderDriver() testsuites.TestDriver {
Name: "cinder", Name: "cinder",
InTreePluginName: "kubernetes.io/cinder", InTreePluginName: "kubernetes.io/cinder",
MaxFileSize: testpatterns.FileSizeMedium, MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(
@ -1089,8 +1089,8 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
e2eskipper.SkipUnlessProviderIs("openstack") e2eskipper.SkipUnlessProviderIs("openstack")
} }
func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
cv, ok := volume.(*cinderVolume) cv, ok := e2evolume.(*cinderVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
volSource := v1.VolumeSource{ volSource := v1.VolumeSource{
@ -1105,8 +1105,8 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume test
return &volSource return &volSource
} }
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
cv, ok := volume.(*cinderVolume) cv, ok := e2evolume.(*cinderVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
pvSource := v1.PersistentVolumeSource{ pvSource := v1.PersistentVolumeSource{
@ -1233,7 +1233,7 @@ func InitGcePdDriver() testsuites.TestDriver {
Name: "gcepd", Name: "gcepd",
InTreePluginName: "kubernetes.io/gce-pd", InTreePluginName: "kubernetes.io/gce-pd",
MaxFileSize: testpatterns.FileSizeMedium, MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: supportedTypes, SupportedFsType: supportedTypes,
@ -1267,8 +1267,8 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
} }
} }
func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
gv, ok := volume.(*gcePdVolume) gv, ok := e2evolume.(*gcePdVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
volSource := v1.VolumeSource{ volSource := v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
@ -1282,8 +1282,8 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume tests
return &volSource return &volSource
} }
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gv, ok := volume.(*gcePdVolume) gv, ok := e2evolume.(*gcePdVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
pvSource := v1.PersistentVolumeSource{ pvSource := v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
@ -1372,7 +1372,7 @@ func InitVSphereDriver() testsuites.TestDriver {
Name: "vsphere", Name: "vsphere",
InTreePluginName: "kubernetes.io/vsphere-volume", InTreePluginName: "kubernetes.io/vsphere-volume",
MaxFileSize: testpatterns.FileSizeMedium, MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(
@ -1398,8 +1398,8 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
e2eskipper.SkipUnlessProviderIs("vsphere") e2eskipper.SkipUnlessProviderIs("vsphere")
} }
func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
vsv, ok := volume.(*vSphereVolume) vsv, ok := e2evolume.(*vSphereVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
// vSphere driver doesn't seem to support readOnly volume // vSphere driver doesn't seem to support readOnly volume
@ -1418,8 +1418,8 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume tes
return &volSource return &volSource
} }
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
vsv, ok := volume.(*vSphereVolume) vsv, ok := e2evolume.(*vSphereVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
// vSphere driver doesn't seem to support readOnly volume // vSphere driver doesn't seem to support readOnly volume
@ -1496,7 +1496,7 @@ func InitAzureDiskDriver() testsuites.TestDriver {
Name: "azure-disk", Name: "azure-disk",
InTreePluginName: "kubernetes.io/azure-disk", InTreePluginName: "kubernetes.io/azure-disk",
MaxFileSize: testpatterns.FileSizeMedium, MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(
@ -1529,8 +1529,8 @@ func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern)
e2eskipper.SkipUnlessProviderIs("azure") e2eskipper.SkipUnlessProviderIs("azure")
} }
func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
av, ok := volume.(*azureDiskVolume) av, ok := e2evolume.(*azureDiskVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
@ -1549,8 +1549,8 @@ func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, volume t
return &volSource return &volSource
} }
func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
av, ok := volume.(*azureDiskVolume) av, ok := e2evolume.(*azureDiskVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
@ -1626,7 +1626,7 @@ func InitAwsDriver() testsuites.TestDriver {
Name: "aws", Name: "aws",
InTreePluginName: "kubernetes.io/aws-ebs", InTreePluginName: "kubernetes.io/aws-ebs",
MaxFileSize: testpatterns.FileSizeMedium, MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
SupportedFsType: sets.NewString( SupportedFsType: sets.NewString(
@ -1664,8 +1664,8 @@ func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
e2eskipper.SkipUnlessProviderIs("aws") e2eskipper.SkipUnlessProviderIs("aws")
} }
func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
av, ok := volume.(*awsVolume) av, ok := e2evolume.(*awsVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
volSource := v1.VolumeSource{ volSource := v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
@ -1679,8 +1679,8 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
return &volSource return &volSource
} }
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
av, ok := volume.(*awsVolume) av, ok := e2evolume.(*awsVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
pvSource := v1.PersistentVolumeSource{ pvSource := v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
@ -1921,8 +1921,8 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity
} }
} }
func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
lv, ok := volume.(*localVolume) lv, ok := e2evolume.(*localVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to local test volume") framework.ExpectEqual(ok, true, "Failed to cast test volume to local test volume")
return &v1.PersistentVolumeSource{ return &v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{ Local: &v1.LocalVolumeSource{

View File

@ -31,10 +31,10 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config" e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -122,7 +122,7 @@ type driverDefinition struct {
// SupportedSizeRange defines the desired size of dynamically // SupportedSizeRange defines the desired size of dynamically
// provisioned volumes. // provisioned volumes.
SupportedSizeRange volume.SizeRange SupportedSizeRange e2evolume.SizeRange
// ClientNodeName selects a specific node for scheduling test pods. // ClientNodeName selects a specific node for scheduling test pods.
// Can be left empty. Most drivers should not need this and instead // Can be left empty. Most drivers should not need this and instead
@ -146,7 +146,7 @@ var csiTestSuites = []func() testsuites.TestSuite{
} }
func init() { func init() {
config.Flags.Var(testDriverParameter{}, "storage.testdriver", "name of a .yaml or .json file that defines a driver for storage testing, can be used more than once") e2econfig.Flags.Var(testDriverParameter{}, "storage.testdriver", "name of a .yaml or .json file that defines a driver for storage testing, can be used more than once")
} }
// testDriverParameter is used to hook loading of the driver // testDriverParameter is used to hook loading of the driver
@ -203,7 +203,7 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) {
"", // Default fsType "", // Default fsType
), ),
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
} }
@ -276,13 +276,13 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern)
} }
} }
func (d *driverDefinition) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
var ( var (
sc *storagev1.StorageClass sc *storagev1.StorageClass
err error err error
) )
f := config.Framework f := e2econfig.Framework
switch { switch {
case d.StorageClass.FromName: case d.StorageClass.FromName:
@ -331,15 +331,15 @@ func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {
return snapshotClass, nil return snapshotClass, nil
} }
func (d *driverDefinition) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured { func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig) *unstructured.Unstructured {
if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" { if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" {
e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name) e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name)
} }
f := config.Framework f := e2econfig.Framework
snapshotter := d.DriverInfo.Name snapshotter := d.DriverInfo.Name
parameters := map[string]string{} parameters := map[string]string{}
ns := config.Framework.Namespace.Name ns := e2econfig.Framework.Namespace.Name
suffix := "vsc" suffix := "vsc"
switch { switch {
@ -368,24 +368,24 @@ func (d *driverDefinition) GetSnapshotClass(config *testsuites.PerTestConfig) *u
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix) return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
} }
func (d *driverDefinition) GetVolume(config *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) { func (d *driverDefinition) GetVolume(e2econfig *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
if len(d.InlineVolumes) == 0 { if len(d.InlineVolumes) == 0 {
e2eskipper.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name) e2eskipper.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
} }
volume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)] e2evolume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)]
return volume.Attributes, volume.Shared, volume.ReadOnly return e2evolume.Attributes, e2evolume.Shared, e2evolume.ReadOnly
} }
func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) string { func (d *driverDefinition) GetCSIDriverName(e2econfig *testsuites.PerTestConfig) string {
return d.DriverInfo.Name return d.DriverInfo.Name
} }
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
config := &testsuites.PerTestConfig{ e2econfig := &testsuites.PerTestConfig{
Driver: d, Driver: d,
Prefix: "external", Prefix: "external",
Framework: f, Framework: f,
ClientNodeSelection: e2epod.NodeSelection{Name: d.ClientNodeName}, ClientNodeSelection: e2epod.NodeSelection{Name: d.ClientNodeName},
} }
return config, func() {} return e2econfig, func() {}
} }

View File

@ -22,7 +22,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
) )
@ -34,7 +34,7 @@ func TestDriverParameter(t *testing.T) {
"", // Default fsType "", // Default fsType
), ),
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
} }

View File

@ -32,8 +32,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -51,8 +51,8 @@ const (
// testFlexVolume tests that a client pod using a given flexvolume driver // testFlexVolume tests that a client pod using a given flexvolume driver
// successfully mounts it and runs // successfully mounts it and runs
func testFlexVolume(driver string, config volume.TestConfig, f *framework.Framework) { func testFlexVolume(driver string, config e2evolume.TestConfig, f *framework.Framework) {
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: v1.VolumeSource{ Volume: v1.VolumeSource{
FlexVolume: &v1.FlexVolumeSource{ FlexVolume: &v1.FlexVolumeSource{
@ -64,7 +64,7 @@ func testFlexVolume(driver string, config volume.TestConfig, f *framework.Framew
ExpectedContent: "Hello from flexvolume!", ExpectedContent: "Hello from flexvolume!",
}, },
} }
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
} }
// installFlex installs the driver found at filePath on the node, and restarts // installFlex installs the driver found at filePath on the node, and restarts
@ -92,7 +92,7 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath
cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir) cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir)
sshAndLog(cmd, host, true /*failOnError*/) sshAndLog(cmd, host, true /*failOnError*/)
data := testfiles.ReadOrDie(filePath) data := e2etestfiles.ReadOrDie(filePath)
cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data)) cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data))
sshAndLog(cmd, host, true /*failOnError*/) sshAndLog(cmd, host, true /*failOnError*/)
@ -161,7 +161,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
var cs clientset.Interface var cs clientset.Interface
var ns *v1.Namespace var ns *v1.Namespace
var node *v1.Node var node *v1.Node
var config volume.TestConfig var config e2evolume.TestConfig
var suffix string var suffix string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
@ -175,7 +175,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
var err error var err error
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
config = volume.TestConfig{ config = e2evolume.TestConfig{
Namespace: ns.Name, Namespace: ns.Name,
Prefix: "flex", Prefix: "flex",
ClientNodeSelection: e2epod.NodeSelection{Name: node.Name}, ClientNodeSelection: e2epod.NodeSelection{Name: node.Name},

View File

@ -31,7 +31,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
@ -153,7 +153,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
framework.ExpectEqual(len(pvs), 1) framework.ExpectEqual(len(pvs), 1)
ginkgo.By("Creating a deployment with the provisioned volume") ginkgo.By("Creating a deployment with the provisioned volume")
deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err) framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
@ -174,7 +174,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
framework.ExpectNoError(err, "While waiting for pvc resize to finish") framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Getting a pod from deployment") ginkgo.By("Getting a pod from deployment")
podList, err := e2edeploy.GetPodsForDeployment(c, deployment) podList, err := e2edeployment.GetPodsForDeployment(c, deployment)
framework.ExpectNoError(err, "While getting pods from deployment") framework.ExpectNoError(err, "While getting pods from deployment")
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0] pod := podList.Items[0]

View File

@ -33,7 +33,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
// Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted. // Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted.
// We should consider adding a unit test that exercises this better. // We should consider adding a unit test that exercises this better.
ginkgo.By("Creating a deployment with selected PVC") ginkgo.By("Creating a deployment with selected PVC")
deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err) framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
@ -147,7 +147,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
framework.ExpectNoError(err, "While waiting for pvc resize to finish") framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Getting a pod from deployment") ginkgo.By("Getting a pod from deployment")
podList, err := e2edeploy.GetPodsForDeployment(c, deployment) podList, err := e2edeployment.GetPodsForDeployment(c, deployment)
framework.ExpectNoError(err, "While getting pods from deployment") framework.ExpectNoError(err, "While getting pods from deployment")
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0] pod := podList.Items[0]
@ -172,7 +172,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *appsv1.Deployment) (v1.Pod, error) { func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *appsv1.Deployment) (v1.Pod, error) {
var runningPod v1.Pod var runningPod v1.Pod
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := e2edeploy.GetPodsForDeployment(client, deployment) podList, err := e2edeployment.GetPodsForDeployment(client, deployment)
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get pods for deployment: %v", err) return false, fmt.Errorf("failed to get pods for deployment: %v", err)
} }

View File

@ -36,7 +36,7 @@ import (
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -98,7 +98,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns} volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel) selector = metav1.SetAsLabelSelector(volLabel)
// Start the NFS server pod. // Start the NFS server pod.
_, nfsServerPod, nfsServerIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) _, nfsServerPod, nfsServerIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
nfsPVconfig = e2epv.PersistentVolumeConfig{ nfsPVconfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-", NamePrefix: "nfs-",
Labels: volLabel, Labels: volLabel,

View File

@ -44,7 +44,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -1194,12 +1194,12 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(context.TODO(), spec, metav1.CreateOptions{}) ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(context.TODO(), spec, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(config.client, ssReplicas, ss) e2estatefulset.WaitForRunningAndReady(config.client, ssReplicas, ss)
return ss return ss
} }
func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti bool) { func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti bool) {
pods := e2esset.GetPodList(config.client, ss) pods := e2estatefulset.GetPodList(config.client, ss)
nodes := sets.NewString() nodes := sets.NewString()
for _, pod := range pods.Items { for _, pod := range pods.Items {

View File

@ -33,8 +33,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -127,7 +127,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
) )
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
_, nfsServerPod, serverIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"}) _, nfsServerPod, serverIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
pvConfig = e2epv.PersistentVolumeConfig{ pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-", NamePrefix: "nfs-",
Labels: volLabel, Labels: volLabel,
@ -315,7 +315,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.Context("pods that use multiple volumes", func() { ginkgo.Context("pods that use multiple volumes", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
e2esset.DeleteAllStatefulSets(c, ns) e2estatefulset.DeleteAllStatefulSets(c, ns)
}) })
ginkgo.It("should be reschedulable [Slow]", func() { ginkgo.It("should be reschedulable [Slow]", func() {
@ -355,13 +355,13 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe) spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{}) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, 1, ss) e2estatefulset.WaitForRunningAndReady(c, 1, ss)
ginkgo.By("Deleting the StatefulSet but not the volumes") ginkgo.By("Deleting the StatefulSet but not the volumes")
// Scale down to 0 first so that the Delete is quick // Scale down to 0 first so that the Delete is quick
ss, err = e2esset.Scale(c, ss, 0) ss, err = e2estatefulset.Scale(c, ss, 0)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForStatusReplicas(c, ss, 0) e2estatefulset.WaitForStatusReplicas(c, ss, 0)
err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{}) err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -375,7 +375,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe) spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe)
ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{}) ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, 1, ss) e2estatefulset.WaitForRunningAndReady(c, 1, ss)
}) })
}) })
}) })

View File

@ -19,19 +19,19 @@ package testpatterns
import ( import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
) )
const ( const (
// MinFileSize represents minimum file size (1 MiB) for testing // MinFileSize represents minimum file size (1 MiB) for testing
MinFileSize = 1 * volume.MiB MinFileSize = 1 * e2evolume.MiB
// FileSizeSmall represents small file size (1 MiB) for testing // FileSizeSmall represents small file size (1 MiB) for testing
FileSizeSmall = 1 * volume.MiB FileSizeSmall = 1 * e2evolume.MiB
// FileSizeMedium represents medium file size (100 MiB) for testing // FileSizeMedium represents medium file size (100 MiB) for testing
FileSizeMedium = 100 * volume.MiB FileSizeMedium = 100 * e2evolume.MiB
// FileSizeLarge represents large file size (1 GiB) for testing // FileSizeLarge represents large file size (1 GiB) for testing
FileSizeLarge = 1 * volume.GiB FileSizeLarge = 1 * e2evolume.GiB
) )
// TestVolType represents a volume type to be tested in a TestSuite // TestVolType represents a volume type to be tested in a TestSuite

View File

@ -26,7 +26,7 @@ limitations under the License.
package testsuites_test package testsuites_test
import ( import (
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
) )
@ -39,7 +39,7 @@ func (f *fakeSuite) GetTestSuiteInfo() testsuites.TestSuiteInfo {
Name: "fake", Name: "fake",
FeatureTag: "", FeatureTag: "",
TestPatterns: []testpatterns.TestPattern{testpatterns.DefaultFsDynamicPV}, TestPatterns: []testpatterns.TestPattern{testpatterns.DefaultFsDynamicPV},
SupportedSizeRange: volume.SizeRange{Min: "1Mi", Max: "1Gi"}, SupportedSizeRange: e2evolume.SizeRange{Min: "1Mi", Max: "1Gi"},
} }
} }

View File

@ -40,10 +40,10 @@ import (
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
csitrans "k8s.io/csi-translation-lib" csitrans "k8s.io/csi-translation-lib"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/podlogs" "k8s.io/kubernetes/test/e2e/storage/podlogs"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
) )
@ -77,7 +77,7 @@ type TestSuiteInfo struct {
Name string // name of the TestSuite Name string // name of the TestSuite
FeatureTag string // featureTag for the TestSuite FeatureTag string // featureTag for the TestSuite
TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
SupportedSizeRange volume.SizeRange // Size range supported by the test suite SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite
} }
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string { func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
@ -181,7 +181,7 @@ type VolumeResource struct {
// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with // CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with
// different test pattern volume types. // different test pattern volume types.
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange volume.SizeRange) *VolumeResource { func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource {
r := VolumeResource{ r := VolumeResource{
Config: config, Config: config,
Pattern: pattern, Pattern: pattern,
@ -423,12 +423,12 @@ func deleteStorageClass(cs clientset.Interface, className string) error {
// the testsuites package whereas volume.TestConfig is merely // the testsuites package whereas volume.TestConfig is merely
// an implementation detail. It contains fields that have no effect, // an implementation detail. It contains fields that have no effect,
// which makes it unsuitable for use in the testsuits public API. // which makes it unsuitable for use in the testsuits public API.
func convertTestConfig(in *PerTestConfig) volume.TestConfig { func convertTestConfig(in *PerTestConfig) e2evolume.TestConfig {
if in.ServerConfig != nil { if in.ServerConfig != nil {
return *in.ServerConfig return *in.ServerConfig
} }
return volume.TestConfig{ return e2evolume.TestConfig{
Namespace: in.Framework.Namespace.Name, Namespace: in.Framework.Namespace.Name,
Prefix: in.Prefix, Prefix: in.Prefix,
ClientNodeSelection: in.ClientNodeSelection, ClientNodeSelection: in.ClientNodeSelection,
@ -439,7 +439,7 @@ func convertTestConfig(in *PerTestConfig) volume.TestConfig {
// intersection of the intervals (if it exists) and return the minimum of the intersection // intersection of the intervals (if it exists) and return the minimum of the intersection
// to be used as the claim size for the test. // to be used as the claim size for the test.
// if value not set, that means there's no minimum or maximum size limitation and we set default size for it. // if value not set, that means there's no minimum or maximum size limitation and we set default size for it.
func getSizeRangesIntersection(first volume.SizeRange, second volume.SizeRange) (string, error) { func getSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
var firstMin, firstMax, secondMin, secondMax resource.Quantity var firstMin, firstMax, secondMin, secondMax resource.Quantity
var err error var err error
@ -575,7 +575,7 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
nodeLimit := 25 nodeLimit := 25
metricsGrabber, err := metrics.NewMetricsGrabber(c, nil, true, false, true, false, false) metricsGrabber, err := e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
if err != nil { if err != nil {
framework.ExpectNoError(err, "Error creating metrics grabber: %v", err) framework.ExpectNoError(err, "Error creating metrics grabber: %v", err)

View File

@ -19,7 +19,7 @@ package testsuites
import ( import (
"testing" "testing"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
) )
// getSizeRangesIntersection takes two instances of storage size ranges and determines the // getSizeRangesIntersection takes two instances of storage size ranges and determines the
@ -43,8 +43,8 @@ import (
// |---------------------------------------------------------------| // |---------------------------------------------------------------|
func Test_getSizeRangesIntersection(t *testing.T) { func Test_getSizeRangesIntersection(t *testing.T) {
type args struct { type args struct {
first volume.SizeRange first e2evolume.SizeRange
second volume.SizeRange second e2evolume.SizeRange
} }
tests := []struct { tests := []struct {
name string name string
@ -55,10 +55,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #1: first{min=A,max=?} second{min=C,max=?} where C > A ", name: "case #1: first{min=A,max=?} second{min=C,max=?} where C > A ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "10Gi", Min: "10Gi",
}, },
}, },
@ -68,10 +68,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #1: first{min=A,max=?} second{min=C,max=?} where C < A ", name: "case #1: first{min=A,max=?} second{min=C,max=?} where C < A ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "1Gi", Min: "1Gi",
}, },
}, },
@ -81,10 +81,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #2: first{min=A,max=?} second{min=C,max=D} where A > D ", name: "case #2: first{min=A,max=?} second{min=C,max=D} where A > D ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "1Gi", Min: "1Gi",
Max: "4Gi", Max: "4Gi",
}, },
@ -95,11 +95,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #2: first{min=A,max=?} second{min=C,max=D} where D > A > C ", name: "case #2: first{min=A,max=?} second{min=C,max=D} where D > A > C ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "", Max: "",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "3Gi", Min: "3Gi",
Max: "10Gi", Max: "10Gi",
}, },
@ -110,11 +110,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #2: first{min=A,max=?} second{min=C,max=D} where A < C ", name: "case #2: first{min=A,max=?} second{min=C,max=D} where A < C ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "", Max: "",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "6Gi", Min: "6Gi",
Max: "10Gi", Max: "10Gi",
}, },
@ -125,11 +125,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #3: first{min=A,max=?} second{min=?,max=D} where A > D", name: "case #3: first{min=A,max=?} second{min=?,max=D} where A > D",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "", Max: "",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Max: "1Gi", Max: "1Gi",
}, },
}, },
@ -139,11 +139,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #3: first{min=A,max=?} second{min=?,max=D} where A < D", name: "case #3: first{min=A,max=?} second{min=?,max=D} where A < D",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "", Max: "",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Max: "10Gi", Max: "10Gi",
}, },
}, },
@ -153,11 +153,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #4: first{min=A,max=?} second{min=?,max=?} ", name: "case #4: first{min=A,max=?} second{min=?,max=?} ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "", Max: "",
}, },
second: volume.SizeRange{}, second: e2evolume.SizeRange{},
}, },
want: "5Gi", want: "5Gi",
wantErr: false, wantErr: false,
@ -166,11 +166,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #5: first{min=A,max=B} second{min=C,max=?} where C < A ", name: "case #5: first{min=A,max=B} second{min=C,max=?} where C < A ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "1Gi", Min: "1Gi",
}, },
}, },
@ -180,11 +180,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #5: first{min=A,max=B} second{min=C,max=?} where B > C > A ", name: "case #5: first{min=A,max=B} second{min=C,max=?} where B > C > A ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "6Gi", Min: "6Gi",
}, },
}, },
@ -194,11 +194,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #5: first{min=A,max=B} second{min=C,max=?} where C > B ", name: "case #5: first{min=A,max=B} second{min=C,max=?} where C > B ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "15Gi", Min: "15Gi",
}, },
}, },
@ -208,11 +208,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #6: first{min=A,max=B} second{min=C,max=D} where A < B < C < D", name: "case #6: first{min=A,max=B} second{min=C,max=D} where A < B < C < D",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "6Gi", Max: "6Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "7Gi", Min: "7Gi",
Max: "8Gi", Max: "8Gi",
}, },
@ -223,11 +223,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #6: first{min=A,max=B} second{min=C,max=D} where A < C < B < D ", name: "case #6: first{min=A,max=B} second{min=C,max=D} where A < C < B < D ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "8Gi", Min: "8Gi",
Max: "15Gi", Max: "15Gi",
}, },
@ -238,11 +238,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #7: first{min=A,max=B} second{min=?,max=D} where D < A", name: "case #7: first{min=A,max=B} second{min=?,max=D} where D < A",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Max: "3Gi", Max: "3Gi",
}, },
}, },
@ -252,11 +252,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #7: first{min=A,max=B} second{min=?,max=D} where B > D > A", name: "case #7: first{min=A,max=B} second{min=?,max=D} where B > D > A",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Max: "8Gi", Max: "8Gi",
}, },
}, },
@ -266,11 +266,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #7: first{min=A,max=B} second{min=?,max=D} where D > B", name: "case #7: first{min=A,max=B} second{min=?,max=D} where D > B",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Max: "15Gi", Max: "15Gi",
}, },
}, },
@ -280,11 +280,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #8: first{min=A,max=B} second{min=?,max=?}", name: "case #8: first{min=A,max=B} second{min=?,max=?}",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{}, second: e2evolume.SizeRange{},
}, },
want: "5Gi", want: "5Gi",
wantErr: false, wantErr: false,
@ -292,10 +292,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #9: first{min=?,max=B} second{min=C,max=?} where C > B", name: "case #9: first{min=?,max=B} second{min=C,max=?} where C > B",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Max: "5Gi", Max: "5Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "10Gi", Min: "10Gi",
}, },
}, },
@ -305,10 +305,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #9: first{min=?,max=B} second{min=C,max=?} where C < B", name: "case #9: first{min=?,max=B} second{min=C,max=?} where C < B",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
}, },
@ -318,10 +318,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #10: first{min=?,max=B} second{min=C,max=D} where B > D", name: "case #10: first{min=?,max=B} second{min=C,max=D} where B > D",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "1Gi", Min: "1Gi",
Max: "5Gi", Max: "5Gi",
}, },
@ -332,10 +332,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #10: first{min=?,max=B} second{min=C,max=D} where C < B < D", name: "case #10: first{min=?,max=B} second{min=C,max=D} where C < B < D",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "15Gi", Max: "15Gi",
}, },
@ -346,10 +346,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #10: first{min=?,max=B} second{min=C,max=D} where B < C", name: "case #10: first{min=?,max=B} second{min=C,max=D} where B < C",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "15Gi", Min: "15Gi",
Max: "20Gi", Max: "20Gi",
}, },
@ -360,10 +360,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #11: first{min=?,max=B} second{min=?,max=D} where D < B", name: "case #11: first{min=?,max=B} second{min=?,max=D} where D < B",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Max: "5Gi", Max: "5Gi",
}, },
}, },
@ -373,10 +373,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #11: first{min=?,max=B} second{min=?,max=D} where D > B", name: "case #11: first{min=?,max=B} second{min=?,max=D} where D > B",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{ second: e2evolume.SizeRange{
Max: "15Gi", Max: "15Gi",
}, },
}, },
@ -386,10 +386,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #12: first{min=?,max=B} second{min=?,max=?} ", name: "case #12: first{min=?,max=B} second{min=?,max=?} ",
args: args{ args: args{
first: volume.SizeRange{ first: e2evolume.SizeRange{
Max: "10Gi", Max: "10Gi",
}, },
second: volume.SizeRange{}, second: e2evolume.SizeRange{},
}, },
want: minValidSize, want: minValidSize,
wantErr: false, wantErr: false,
@ -397,8 +397,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #13: first{min=?,max=?} second{min=C,max=?} ", name: "case #13: first{min=?,max=?} second{min=C,max=?} ",
args: args{ args: args{
first: volume.SizeRange{}, first: e2evolume.SizeRange{},
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
}, },
}, },
@ -408,8 +408,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #14: first{min=?,max=?} second{min=C,max=D} where C < D", name: "case #14: first{min=?,max=?} second{min=C,max=D} where C < D",
args: args{ args: args{
first: volume.SizeRange{}, first: e2evolume.SizeRange{},
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "5Gi", Min: "5Gi",
Max: "10Gi", Max: "10Gi",
}, },
@ -420,8 +420,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #14: first{min=?,max=?} second{min=C,max=D} where C > D", name: "case #14: first{min=?,max=?} second{min=C,max=D} where C > D",
args: args{ args: args{
first: volume.SizeRange{}, first: e2evolume.SizeRange{},
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "10Gi", Min: "10Gi",
Max: "5Gi", Max: "5Gi",
}, },
@ -432,8 +432,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #14: first{min=?,max=?} second{min=C,max=D} where C = D", name: "case #14: first{min=?,max=?} second{min=C,max=D} where C = D",
args: args{ args: args{
first: volume.SizeRange{}, first: e2evolume.SizeRange{},
second: volume.SizeRange{ second: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
Max: "1Mi", Max: "1Mi",
}, },
@ -444,8 +444,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #15: first{min=?,max=?} second{min=?,max=D}", name: "case #15: first{min=?,max=?} second{min=?,max=D}",
args: args{ args: args{
first: volume.SizeRange{}, first: e2evolume.SizeRange{},
second: volume.SizeRange{ second: e2evolume.SizeRange{
Max: "10Gi", Max: "10Gi",
}, },
}, },
@ -455,8 +455,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{ {
name: "case #16: first{min=?,max=?} second{min=?,max=?}", name: "case #16: first{min=?,max=?} second{min=?,max=?}",
args: args{ args: args{
first: volume.SizeRange{}, first: e2evolume.SizeRange{},
second: volume.SizeRange{}, second: e2evolume.SizeRange{},
}, },
want: minValidSize, want: minValidSize,
wantErr: false, wantErr: false,

View File

@ -32,7 +32,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
storageutils "k8s.io/kubernetes/test/e2e/storage/utils" storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -294,8 +294,8 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "csi-volume-tester", Name: "csi-volume-tester",
Image: volume.GetTestImage(framework.BusyBoxImage), Image: e2evolume.GetTestImage(framework.BusyBoxImage),
Command: volume.GenerateScriptCmd(command), Command: e2evolume.GenerateScriptCmd(command),
}, },
}, },
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,

View File

@ -32,7 +32,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -54,7 +54,7 @@ func InitMultiVolumeTestSuite() TestSuite {
testpatterns.BlockVolModePreprovisionedPV, testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV, testpatterns.BlockVolModeDynamicPV,
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
}, },
}, },

View File

@ -38,7 +38,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
) )
@ -78,7 +78,7 @@ func InitProvisioningTestSuite() TestSuite {
testpatterns.BlockVolModeDynamicPV, testpatterns.BlockVolModeDynamicPV,
testpatterns.NtfsDynamicPV, testpatterns.NtfsDynamicPV,
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
}, },
}, },
@ -221,7 +221,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
l.pvc.Spec.DataSource = dataSource l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data") ginkgo.By("checking whether the created volume has the pre-populated data")
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: *createVolumeSource(claim.Name, false /* readOnly */), Volume: *createVolumeSource(claim.Name, false /* readOnly */),
Mode: pattern.VolMode, Mode: pattern.VolMode,
@ -229,7 +229,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
ExpectedContent: expectedContent, ExpectedContent: expectedContent,
}, },
} }
volume.TestVolumeClientSlow(f, testConfig, nil, "", tests) e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
} }
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
@ -249,7 +249,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
l.pvc.Spec.DataSource = dataSource l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data") ginkgo.By("checking whether the created volume has the pre-populated data")
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: *createVolumeSource(claim.Name, false /* readOnly */), Volume: *createVolumeSource(claim.Name, false /* readOnly */),
Mode: pattern.VolMode, Mode: pattern.VolMode,
@ -257,7 +257,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
ExpectedContent: expectedContent, ExpectedContent: expectedContent,
}, },
} }
volume.TestVolumeClientSlow(f, testConfig, nil, "", tests) e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
} }
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
@ -297,7 +297,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
myTestCase.Class = nil // Do not create/delete the storage class in TestDynamicProvisioning, it already exists. myTestCase.Class = nil // Do not create/delete the storage class in TestDynamicProvisioning, it already exists.
myTestCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { myTestCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i)) ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i))
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: *createVolumeSource(claim.Name, false /* readOnly */), Volume: *createVolumeSource(claim.Name, false /* readOnly */),
Mode: pattern.VolMode, Mode: pattern.VolMode,
@ -305,7 +305,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
ExpectedContent: expectedContent, ExpectedContent: expectedContent,
}, },
} }
volume.TestVolumeClientSlow(f, myTestConfig, nil, "", tests) e2evolume.TestVolumeClientSlow(f, myTestConfig, nil, "", tests)
} }
myTestCase.TestDynamicProvisioning() myTestCase.TestDynamicProvisioning()
}(i) }(i)
@ -467,7 +467,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
pod = nil // Don't stop twice. pod = nil // Don't stop twice.
// Get a new copy of the PV // Get a new copy of the PV
volume, err := getBoundPV(client, claim) e2evolume, err := getBoundPV(client, claim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName)) ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
@ -475,7 +475,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
// We give the second pod the additional responsibility of checking the volume has // We give the second pod the additional responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any // been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range volume.Spec.MountOptions { for _, option := range e2evolume.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas // Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option) command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
} }
@ -486,7 +486,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
} }
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName}) RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
return volume return e2evolume
} }
// PVMultiNodeCheck checks that a PV retains data when moved between nodes. // PVMultiNodeCheck checks that a PV retains data when moved between nodes.
@ -650,8 +650,8 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "volume-tester", Name: "volume-tester",
Image: volume.GetTestImage(framework.BusyBoxImage), Image: e2evolume.GetTestImage(framework.BusyBoxImage),
Command: volume.GenerateScriptCmd(command), Command: e2evolume.GenerateScriptCmd(command),
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "my-volume", Name: "my-volume",
@ -708,7 +708,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
func prepareSnapshotDataSourceForProvisioning( func prepareSnapshotDataSourceForProvisioning(
f *framework.Framework, f *framework.Framework,
config volume.TestConfig, config e2evolume.TestConfig,
client clientset.Interface, client clientset.Interface,
dynamicClient dynamic.Interface, dynamicClient dynamic.Interface,
initClaim *v1.PersistentVolumeClaim, initClaim *v1.PersistentVolumeClaim,
@ -729,7 +729,7 @@ func prepareSnapshotDataSourceForProvisioning(
framework.ExpectNoError(err) framework.ExpectNoError(err)
// write namespace to the /mnt/test (= the volume). // write namespace to the /mnt/test (= the volume).
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: *createVolumeSource(updatedClaim.Name, false /* readOnly */), Volume: *createVolumeSource(updatedClaim.Name, false /* readOnly */),
Mode: mode, Mode: mode,
@ -737,7 +737,7 @@ func prepareSnapshotDataSourceForProvisioning(
ExpectedContent: injectContent, ExpectedContent: injectContent,
}, },
} }
volume.InjectContent(f, config, nil, "", tests) e2evolume.InjectContent(f, config, nil, "", tests)
ginkgo.By("[Initialize dataSource]creating a SnapshotClass") ginkgo.By("[Initialize dataSource]creating a SnapshotClass")
snapshotClass, err = dynamicClient.Resource(SnapshotClassGVR).Create(context.TODO(), snapshotClass, metav1.CreateOptions{}) snapshotClass, err = dynamicClient.Resource(SnapshotClassGVR).Create(context.TODO(), snapshotClass, metav1.CreateOptions{})
@ -784,7 +784,7 @@ func prepareSnapshotDataSourceForProvisioning(
func preparePVCDataSourceForProvisioning( func preparePVCDataSourceForProvisioning(
f *framework.Framework, f *framework.Framework,
config volume.TestConfig, config e2evolume.TestConfig,
client clientset.Interface, client clientset.Interface,
source *v1.PersistentVolumeClaim, source *v1.PersistentVolumeClaim,
class *storagev1.StorageClass, class *storagev1.StorageClass,
@ -802,7 +802,7 @@ func preparePVCDataSourceForProvisioning(
sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{}) sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: *createVolumeSource(sourcePVC.Name, false /* readOnly */), Volume: *createVolumeSource(sourcePVC.Name, false /* readOnly */),
Mode: mode, Mode: mode,
@ -810,7 +810,7 @@ func preparePVCDataSourceForProvisioning(
ExpectedContent: injectContent, ExpectedContent: injectContent,
}, },
} }
volume.InjectContent(f, config, nil, "", tests) e2evolume.InjectContent(f, config, nil, "", tests)
dataSourceRef := &v1.TypedLocalObjectReference{ dataSourceRef := &v1.TypedLocalObjectReference{
Kind: "PersistentVolumeClaim", Kind: "PersistentVolumeClaim",

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
) )
@ -64,7 +64,7 @@ func InitSnapshottableTestSuite() TestSuite {
TestPatterns: []testpatterns.TestPattern{ TestPatterns: []testpatterns.TestPattern{
testpatterns.DynamicSnapshot, testpatterns.DynamicSnapshot,
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
}, },
}, },

View File

@ -37,7 +37,7 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -70,7 +70,7 @@ func InitSubPathTestSuite() TestSuite {
testpatterns.DefaultFsDynamicPV, testpatterns.DefaultFsDynamicPV,
testpatterns.NtfsDynamicPV, testpatterns.NtfsDynamicPV,
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
}, },
}, },
@ -441,8 +441,8 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
defer cleanup() defer cleanup()
// Change volume container to busybox so we can exec later // Change volume container to busybox so we can exec later
l.pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) l.pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
l.pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000") l.pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name)) ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
removeUnusedContainers(l.pod) removeUnusedContainers(l.pod)
@ -516,7 +516,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
InitContainers: []v1.Container{ InitContainers: []v1.Container{
{ {
Name: fmt.Sprintf("init-volume-%s", suffix), Name: fmt.Sprintf("init-volume-%s", suffix),
Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)), Image: e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)),
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
@ -527,7 +527,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath, MountPath: probeVolumePath,
}, },
}, },
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
}, },
{ {
Name: fmt.Sprintf("test-init-subpath-%s", suffix), Name: fmt.Sprintf("test-init-subpath-%s", suffix),
@ -543,7 +543,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath, MountPath: probeVolumePath,
}, },
}, },
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
}, },
{ {
Name: fmt.Sprintf("test-init-volume-%s", suffix), Name: fmt.Sprintf("test-init-volume-%s", suffix),
@ -558,7 +558,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath, MountPath: probeVolumePath,
}, },
}, },
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
}, },
}, },
Containers: []v1.Container{ Containers: []v1.Container{
@ -576,7 +576,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath, MountPath: probeVolumePath,
}, },
}, },
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
}, },
{ {
Name: fmt.Sprintf("test-container-volume-%s", suffix), Name: fmt.Sprintf("test-container-volume-%s", suffix),
@ -591,7 +591,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath, MountPath: probeVolumePath,
}, },
}, },
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext), SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
}, },
}, },
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
@ -608,7 +608,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
}, },
}, },
}, },
SecurityContext: volume.GeneratePodSecurityContext(nil, seLinuxOptions), SecurityContext: e2evolume.GeneratePodSecurityContext(nil, seLinuxOptions),
}, },
} }
} }
@ -651,8 +651,8 @@ func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name), Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name),
Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)), Image: e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)),
Command: volume.GenerateScriptCmd("echo nothing"), Command: e2evolume.GenerateScriptCmd("echo nothing"),
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
@ -673,7 +673,7 @@ func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.
} }
func setInitCommand(pod *v1.Pod, command string) { func setInitCommand(pod *v1.Pod, command string) {
pod.Spec.InitContainers[0].Command = volume.GenerateScriptCmd(command) pod.Spec.InitContainers[0].Command = e2evolume.GenerateScriptCmd(command)
} }
func setWriteCommand(file string, container *v1.Container) { func setWriteCommand(file string, container *v1.Container) {
@ -796,10 +796,10 @@ func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerT
func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) pod.Spec.Containers[0].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000") pod.Spec.Containers[0].Command = e2evolume.GenerateScriptCmd("sleep 100000")
pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000") pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
// Add liveness probe to subpath container // Add liveness probe to subpath container
pod.Spec.Containers[0].LivenessProbe = &v1.Probe{ pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{ Handler: v1.Handler{
@ -905,10 +905,10 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
} }
// Change to busybox // Change to busybox
pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) pod.Spec.Containers[0].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000") pod.Spec.Containers[0].Command = e2evolume.GenerateScriptCmd("sleep 100000")
pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000") pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
// If grace period is too short, then there is not enough time for the volume // If grace period is too short, then there is not enough time for the volume
// manager to cleanup the volumes // manager to cleanup the volumes

View File

@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
) )
@ -170,7 +170,7 @@ type DriverInfo struct {
// Maximum single file size supported by this driver // Maximum single file size supported by this driver
MaxFileSize int64 MaxFileSize int64
// The range of disk size supported by this driver // The range of disk size supported by this driver
SupportedSizeRange volume.SizeRange SupportedSizeRange e2evolume.SizeRange
// Map of string for supported fs type // Map of string for supported fs type
SupportedFsType sets.String SupportedFsType sets.String
// Map of string for supported mount option // Map of string for supported mount option
@ -214,7 +214,7 @@ type PerTestConfig struct {
// Some test drivers initialize a storage server. This is // Some test drivers initialize a storage server. This is
// the configuration that then has to be used to run tests. // the configuration that then has to be used to run tests.
// The values above are ignored for such tests. // The values above are ignored for such tests.
ServerConfig *volume.TestConfig ServerConfig *e2evolume.TestConfig
} }
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests // GetUniqueDriverName returns unique driver name that can be used parallelly in tests

View File

@ -34,7 +34,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
) )
@ -63,7 +63,7 @@ func InitVolumeExpandTestSuite() TestSuite {
testpatterns.DefaultFsDynamicPVAllowExpansion, testpatterns.DefaultFsDynamicPVAllowExpansion,
testpatterns.BlockVolModeDynamicPVAllowExpansion, testpatterns.BlockVolModeDynamicPVAllowExpansion,
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
}, },
}, },

View File

@ -39,7 +39,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -71,7 +71,7 @@ func InitVolumeIOTestSuite() TestSuite {
testpatterns.DefaultFsPreprovisionedPV, testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV, testpatterns.DefaultFsDynamicPV,
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
}, },
}, },
@ -180,7 +180,7 @@ func createFileSizes(maxFileSize int64) []int64 {
} }
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env. // Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { func makePodSpec(config e2evolume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
var gracePeriod int64 = 1 var gracePeriod int64 = 1
volName := fmt.Sprintf("io-volume-%s", config.Namespace) volName := fmt.Sprintf("io-volume-%s", config.Namespace)
pod := &v1.Pod{ pod := &v1.Pod{
@ -305,7 +305,7 @@ func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) {
// Note: nil can be passed for the podSecContext parm, in which case it is ignored. // Note: nil can be passed for the podSecContext parm, in which case it is ignored.
// Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize` // Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize`
// bytes. // bytes.
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace)) ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace))
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
loopCnt := testpatterns.MinFileSize / int64(len(writeBlk)) loopCnt := testpatterns.MinFileSize / int64(len(writeBlk))
@ -333,7 +333,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
} }
} else { } else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume") framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(volume.PodCleanupTimeout) time.Sleep(e2evolume.PodCleanupTimeout)
} }
}() }()

View File

@ -38,7 +38,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -65,7 +65,7 @@ func InitVolumeModeTestSuite() TestSuite {
testpatterns.BlockVolModePreprovisionedPV, testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV, testpatterns.BlockVolModeDynamicPV,
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
}, },
}, },

View File

@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -74,7 +74,7 @@ func InitVolumesTestSuite() TestSuite {
testpatterns.BlockVolModePreprovisionedPV, testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV, testpatterns.BlockVolModeDynamicPV,
}, },
SupportedSizeRange: volume.SizeRange{ SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi", Min: "1Mi",
}, },
}, },
@ -156,11 +156,11 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
init() init()
defer func() { defer func() {
volume.TestServerCleanup(f, convertTestConfig(l.config)) e2evolume.TestServerCleanup(f, convertTestConfig(l.config))
cleanup() cleanup()
}() }()
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: *l.resource.VolSource, Volume: *l.resource.VolSource,
Mode: pattern.VolMode, Mode: pattern.VolMode,
@ -180,9 +180,9 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
// local), plugin skips setting fsGroup if volume is already mounted // local), plugin skips setting fsGroup if volume is already mounted
// and we don't have reliable way to detect volumes are unmounted or // and we don't have reliable way to detect volumes are unmounted or
// not before starting the second pod. // not before starting the second pod.
volume.InjectContent(f, config, fsGroup, pattern.FsType, tests) e2evolume.InjectContent(f, config, fsGroup, pattern.FsType, tests)
if driver.GetDriverInfo().Capabilities[CapPersistence] { if driver.GetDriverInfo().Capabilities[CapPersistence] {
volume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests) e2evolume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests)
} else { } else {
ginkgo.By("Skipping persistence check for non-persistent volume") ginkgo.By("Skipping persistence check for non-persistent volume")
} }
@ -228,7 +228,7 @@ func testScriptInPod(
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: fmt.Sprintf("exec-container-%s", suffix), Name: fmt.Sprintf("exec-container-%s", suffix),
Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.Nginx)), Image: e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.Nginx)),
Command: command, Command: command,
VolumeMounts: []v1.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {

View File

@ -22,8 +22,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/pkg/errors" "github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
@ -37,7 +35,8 @@ import (
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
imageutils "k8s.io/kubernetes/test/utils/image"
) )
// LoadFromManifests loads .yaml or .json manifest files and returns // LoadFromManifests loads .yaml or .json manifest files and returns
@ -80,7 +79,7 @@ func LoadFromManifests(files ...string) ([]interface{}, error) {
func visitManifests(cb func([]byte) error, files ...string) error { func visitManifests(cb func([]byte) error, files ...string) error {
for _, fileName := range files { for _, fileName := range files {
data, err := testfiles.Read(fileName) data, err := e2etestfiles.Read(fileName)
if err != nil { if err != nil {
framework.Failf("reading manifest file: %v", err) framework.Failf("reading manifest file: %v", err)
} }

View File

@ -32,7 +32,7 @@ import (
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -47,7 +47,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
c clientset.Interface c clientset.Interface
ns string ns string
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim
metricsGrabber *metrics.Grabber metricsGrabber *e2emetrics.Grabber
invalidSc *storagev1.StorageClass invalidSc *storagev1.StorageClass
defaultScName string defaultScName string
) )
@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
VolumeMode: &test.VolumeMode, VolumeMode: &test.VolumeMode,
}, ns) }, ns)
metricsGrabber, err = metrics.NewMetricsGrabber(c, nil, true, false, true, false, false) metricsGrabber, err = e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
if err != nil { if err != nil {
framework.Failf("Error creating metrics grabber : %v", err) framework.Failf("Error creating metrics grabber : %v", err)
@ -231,7 +231,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
// Poll kubelet metrics waiting for the volume to be picked up // Poll kubelet metrics waiting for the volume to be picked up
// by the volume stats collector // by the volume stats collector
var kubeMetrics metrics.KubeletMetrics var kubeMetrics e2emetrics.KubeletMetrics
waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
framework.Logf("Grabbing Kubelet metrics") framework.Logf("Grabbing Kubelet metrics")
// Grab kubelet metrics from the node the pod was scheduled on // Grab kubelet metrics from the node the pod was scheduled on
@ -405,7 +405,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
} }
pvcConfig = e2epv.PersistentVolumeClaimConfig{StorageClassName: &className} pvcConfig = e2epv.PersistentVolumeClaimConfig{StorageClassName: &className}
metrics = []struct { e2emetrics = []struct {
name string name string
dimension string dimension string
}{ }{
@ -431,7 +431,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
controllerMetrics, err := metricsGrabber.GrabFromControllerManager() controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err) framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err)
for i, metric := range metrics { for i, metric := range e2emetrics {
expectValues := metricValues[i] expectValues := metricValues[i]
if expectValues == nil { if expectValues == nil {
expectValues = make(map[string]int64) expectValues = make(map[string]int64)
@ -457,7 +457,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// Initializes all original metric values. // Initializes all original metric values.
controllerMetrics, err := metricsGrabber.GrabFromControllerManager() controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err) framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err)
for _, metric := range metrics { for _, metric := range e2emetrics {
originMetricValues = append(originMetricValues, originMetricValues = append(originMetricValues,
testutil.GetMetricValuesForLabel(testutil.Metrics(controllerMetrics), metric.name, metric.dimension)) testutil.GetMetricValuesForLabel(testutil.Metrics(controllerMetrics), metric.name, metric.dimension))
} }
@ -528,7 +528,7 @@ func newStorageControllerMetrics() *storageControllerMetrics {
} }
} }
func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGrabber *metrics.Grabber, pluginName string) *storageControllerMetrics { func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGrabber *e2emetrics.Grabber, pluginName string) *storageControllerMetrics {
backoff := wait.Backoff{ backoff := wait.Backoff{
Duration: 10 * time.Second, Duration: 10 * time.Second,
Factor: 1.2, Factor: 1.2,
@ -610,7 +610,7 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN
gomega.Expect(newStatusCount).To(gomega.BeNumerically(">", oldStatusCount), "New status count %d should be more than old count %d for action %s", newStatusCount, oldStatusCount, metricName) gomega.Expect(newStatusCount).To(gomega.BeNumerically(">", oldStatusCount), "New status count %d should be more than old count %d for action %s", newStatusCount, oldStatusCount, metricName)
} }
func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics, pluginName string) *storageControllerMetrics { func getControllerStorageMetrics(ms e2emetrics.ControllerManagerMetrics, pluginName string) *storageControllerMetrics {
result := newStorageControllerMetrics() result := newStorageControllerMetrics()
for method, samples := range ms { for method, samples := range ms {
@ -654,7 +654,7 @@ func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics, pluginName
// Finds the sample in the specified metric from `KubeletMetrics` tagged with // Finds the sample in the specified metric from `KubeletMetrics` tagged with
// the specified namespace and pvc name // the specified namespace and pvc name
func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics metrics.KubeletMetrics) bool { func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics e2emetrics.KubeletMetrics) bool {
found := false found := false
errCount := 0 errCount := 0
framework.Logf("Looking for sample in metric `%s` tagged with namespace `%s`, PVC `%s`", metricKeyName, namespace, pvcName) framework.Logf("Looking for sample in metric `%s` tagged with namespace `%s`, PVC `%s`", metricKeyName, namespace, pvcName)
@ -683,7 +683,7 @@ func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string
} }
// Wait for the count of a pv controller's metric specified by metricName and dimension bigger than zero. // Wait for the count of a pv controller's metric specified by metricName and dimension bigger than zero.
func waitForPVControllerSync(metricsGrabber *metrics.Grabber, metricName, dimension string) { func waitForPVControllerSync(metricsGrabber *e2emetrics.Grabber, metricName, dimension string) {
backoff := wait.Backoff{ backoff := wait.Backoff{
Duration: 10 * time.Second, Duration: 10 * time.Second,
Factor: 1.2, Factor: 1.2,
@ -728,7 +728,7 @@ func getStatesMetrics(metricKey string, givenMetrics testutil.Metrics) map[strin
return states return states
} }
func waitForADControllerStatesMetrics(metricsGrabber *metrics.Grabber, metricName string, dimensions []string, stateNames []string) { func waitForADControllerStatesMetrics(metricsGrabber *e2emetrics.Grabber, metricName string, dimensions []string, stateNames []string) {
backoff := wait.Backoff{ backoff := wait.Backoff{
Duration: 10 * time.Second, Duration: 10 * time.Second,
Factor: 1.2, Factor: 1.2,

View File

@ -45,7 +45,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
@ -603,7 +603,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Name: serviceAccountName, Name: serviceAccountName,
} }
err := auth.BindClusterRole(c.RbacV1(), "system:persistent-volume-provisioner", ns, subject) err := e2eauth.BindClusterRole(c.RbacV1(), "system:persistent-volume-provisioner", ns, subject)
framework.ExpectNoError(err) framework.ExpectNoError(err)
roleName := "leader-locking-nfs-provisioner" roleName := "leader-locking-nfs-provisioner"
@ -619,10 +619,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, metav1.CreateOptions{}) }, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create leader-locking role") framework.ExpectNoError(err, "Failed to create leader-locking role")
err = auth.BindRoleInNamespace(c.RbacV1(), roleName, ns, subject) err = e2eauth.BindRoleInNamespace(c.RbacV1(), roleName, ns, subject)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = auth.WaitForAuthorizationUpdate(c.AuthorizationV1(), err = e2eauth.WaitForAuthorizationUpdate(c.AuthorizationV1(),
serviceaccount.MakeUsername(ns, serviceAccountName), serviceaccount.MakeUsername(ns, serviceAccountName),
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization") framework.ExpectNoError(err, "Failed to update authorization")

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
ginkgo.Describe("ConfigMap", func() { ginkgo.Describe("ConfigMap", func() {
ginkgo.It("should be mountable", func() { ginkgo.It("should be mountable", func() {
config := volume.TestConfig{ config := e2evolume.TestConfig{
Namespace: namespace.Name, Namespace: namespace.Name,
Prefix: "configmap", Prefix: "configmap",
} }
@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
}() }()
// Test one ConfigMap mounted several times to test #28502 // Test one ConfigMap mounted several times to test #28502
tests := []volume.Test{ tests := []e2evolume.Test{
{ {
Volume: v1.VolumeSource{ Volume: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{
@ -108,7 +108,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
ExpectedContent: "this is the second file", ExpectedContent: "this is the second file",
}, },
} }
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests) e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
}) })
}) })
}) })

View File

@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
framework.Logf("Deleting all statefulset in namespace: %v", namespace) framework.Logf("Deleting all statefulset in namespace: %v", namespace)
e2esset.DeleteAllStatefulSets(client, namespace) e2estatefulset.DeleteAllStatefulSets(client, namespace)
}) })
ginkgo.It("vsphere statefulset testing", func() { ginkgo.It("vsphere statefulset testing", func() {
@ -82,12 +82,12 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
ginkgo.By("Creating statefulset") ginkgo.By("Creating statefulset")
statefulset := e2esset.CreateStatefulSet(client, manifestPath, namespace) statefulset := e2estatefulset.CreateStatefulSet(client, manifestPath, namespace)
replicas := *(statefulset.Spec.Replicas) replicas := *(statefulset.Spec.Replicas)
// Waiting for pods status to be Ready // Waiting for pods status to be Ready
e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas) e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas)
framework.ExpectNoError(e2esset.CheckMount(client, statefulset, mountPath)) framework.ExpectNoError(e2estatefulset.CheckMount(client, statefulset, mountPath))
ssPodsBeforeScaleDown := e2esset.GetPodList(client, statefulset) ssPodsBeforeScaleDown := e2estatefulset.GetPodList(client, statefulset)
gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
framework.ExpectEqual(len(ssPodsBeforeScaleDown.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas") framework.ExpectEqual(len(ssPodsBeforeScaleDown.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas")
@ -105,9 +105,9 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
} }
ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1))
_, scaledownErr := e2esset.Scale(client, statefulset, replicas-1) _, scaledownErr := e2estatefulset.Scale(client, statefulset, replicas-1)
framework.ExpectNoError(scaledownErr) framework.ExpectNoError(scaledownErr)
e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas-1) e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas-1)
// After scale down, verify vsphere volumes are detached from deleted pods // After scale down, verify vsphere volumes are detached from deleted pods
ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
@ -126,12 +126,12 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
} }
ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas))
_, scaleupErr := e2esset.Scale(client, statefulset, replicas) _, scaleupErr := e2estatefulset.Scale(client, statefulset, replicas)
framework.ExpectNoError(scaleupErr) framework.ExpectNoError(scaleupErr)
e2esset.WaitForStatusReplicas(client, statefulset, replicas) e2estatefulset.WaitForStatusReplicas(client, statefulset, replicas)
e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas) e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas)
ssPodsAfterScaleUp := e2esset.GetPodList(client, statefulset) ssPodsAfterScaleUp := e2estatefulset.GetPodList(client, statefulset)
gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
framework.ExpectEqual(len(ssPodsAfterScaleUp.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas") framework.ExpectEqual(len(ssPodsAfterScaleUp.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas")

View File

@ -32,7 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -97,12 +97,12 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
volumePath := pvs[0].Spec.VsphereVolume.VolumePath volumePath := pvs[0].Spec.VsphereVolume.VolumePath
ginkgo.By("Creating a Deployment") ginkgo.By("Creating a Deployment")
deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") deployment, err := e2edeployment.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err))
defer client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) defer client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
ginkgo.By("Get pod from the deployment") ginkgo.By("Get pod from the deployment")
podList, err := e2edeploy.GetPodsForDeployment(client, deployment) podList, err := e2edeployment.GetPodsForDeployment(client, deployment)
framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployment with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployment with err: %v", err))
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0] pod := podList.Items[0]
@ -179,7 +179,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *appsv1.Deploym
// getNodeForDeployment returns node name for the Deployment // getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (string, error) { func getNodeForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (string, error) {
podList, err := e2edeploy.GetPodsForDeployment(client, deployment) podList, err := e2edeployment.GetPodsForDeployment(client, deployment)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -28,7 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -66,12 +66,12 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
rsClient := c.AppsV1().ReplicaSets(ns) rsClient := c.AppsV1().ReplicaSets(ns)
ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns)) ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(context.TODO(), d, metav1.CreateOptions{}) deployment, err := deploymentClient.Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment)) framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment))
ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName)) ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
@ -87,13 +87,13 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
// Trigger a new rollout so that we have some history. // Trigger a new rollout so that we have some history.
ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName)) ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name" update.Spec.Template.Spec.Containers[0].Name = "updated-name"
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment)) framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment))
ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName)) ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
rsList, err = rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()}) rsList, err = rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()})
@ -155,17 +155,17 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
framework.ExpectEqual(deployment.Annotations[deploymentutil.RevisionAnnotation], "2") framework.ExpectEqual(deployment.Annotations[deploymentutil.RevisionAnnotation], "2")
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment)) framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment))
// Verify the upgraded deployment is active by scaling up the deployment by 1 // Verify the upgraded deployment is active by scaling up the deployment by 1
ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName)) ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
deploymentWithUpdatedReplicas, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *appsv1.Deployment) { deploymentWithUpdatedReplicas, err := e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *appsv1.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1 *deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName)) ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deploymentWithUpdatedReplicas)) framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deploymentWithUpdatedReplicas))
} }
// Teardown cleans up any remaining resources. // Teardown cleans up any remaining resources.

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/replicaset" e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -63,7 +63,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName)) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName)) framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName))
r.UID = rs.UID r.UID = rs.UID
} }
@ -87,17 +87,17 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
} }
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName)) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName)) framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName))
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready // Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum)) ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
_, err = replicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *appsv1.ReplicaSet) { _, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *appsv1.ReplicaSet) {
*rs.Spec.Replicas = scaleNum *rs.Spec.Replicas = scaleNum
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName)) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName)) framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName))
} }
// Teardown cleans up any remaining resources. // Teardown cleans up any remaining resources.

View File

@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades"
) )
@ -79,10 +79,10 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}} podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ns := f.Namespace.Name ns := f.Namespace.Name
t.set = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) t.set = e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
t.service = createStatefulSetService(ssName, labels) t.service = createStatefulSetService(ssName, labels)
*(t.set.Spec.Replicas) = 3 *(t.set.Spec.Replicas) = 3
e2esset.PauseNewPods(t.set) e2estatefulset.PauseNewPods(t.set)
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), t.service, metav1.CreateOptions{}) _, err := f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), t.service, metav1.CreateOptions{})
@ -94,7 +94,7 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Saturating stateful set " + t.set.Name) ginkgo.By("Saturating stateful set " + t.set.Name)
e2esset.Saturate(f.ClientSet, t.set) e2estatefulset.Saturate(f.ClientSet, t.set)
t.verify(f) t.verify(f)
t.restart(f) t.restart(f)
t.verify(f) t.verify(f)
@ -108,26 +108,26 @@ func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct
// Teardown deletes all StatefulSets // Teardown deletes all StatefulSets
func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) { func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) {
e2esset.DeleteAllStatefulSets(f.ClientSet, t.set.Name) e2estatefulset.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
} }
func (t *StatefulSetUpgradeTest) verify(f *framework.Framework) { func (t *StatefulSetUpgradeTest) verify(f *framework.Framework) {
ginkgo.By("Verifying statefulset mounted data directory is usable") ginkgo.By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(e2esset.CheckMount(f.ClientSet, t.set, "/data")) framework.ExpectNoError(e2estatefulset.CheckMount(f.ClientSet, t.set, "/data"))
ginkgo.By("Verifying statefulset provides a stable hostname for each pod") ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
framework.ExpectNoError(e2esset.CheckHostname(f.ClientSet, t.set)) framework.ExpectNoError(e2estatefulset.CheckHostname(f.ClientSet, t.set))
ginkgo.By("Verifying statefulset set proper service name") ginkgo.By("Verifying statefulset set proper service name")
framework.ExpectNoError(e2esset.CheckServiceName(t.set, t.set.Spec.ServiceName)) framework.ExpectNoError(e2estatefulset.CheckServiceName(t.set, t.set.Spec.ServiceName))
cmd := "echo $(hostname) > /data/hostname; sync;" cmd := "echo $(hostname) > /data/hostname; sync;"
ginkgo.By("Running " + cmd + " in all stateful pods") ginkgo.By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(e2esset.ExecInStatefulPods(f.ClientSet, t.set, cmd)) framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(f.ClientSet, t.set, cmd))
} }
func (t *StatefulSetUpgradeTest) restart(f *framework.Framework) { func (t *StatefulSetUpgradeTest) restart(f *framework.Framework) {
ginkgo.By("Restarting statefulset " + t.set.Name) ginkgo.By("Restarting statefulset " + t.set.Name)
e2esset.Restart(f.ClientSet, t.set) e2estatefulset.Restart(f.ClientSet, t.set)
e2esset.WaitForRunningAndReady(f.ClientSet, *t.set.Spec.Replicas, t.set) e2estatefulset.WaitForRunningAndReady(f.ClientSet, *t.set.Spec.Replicas, t.set)
} }

View File

@ -32,8 +32,8 @@ import (
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
) )
const cassandraManifestPath = "test/e2e/testing-manifests/statefulset/cassandra" const cassandraManifestPath = "test/e2e/testing-manifests/statefulset/cassandra"
@ -60,7 +60,7 @@ func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool {
} }
func cassandraKubectlCreate(ns, file string) { func cassandraKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file))) input := string(e2etestfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file)))
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
} }
@ -78,7 +78,7 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
cassandraKubectlCreate(ns, "pdb.yaml") cassandraKubectlCreate(ns, "pdb.yaml")
ginkgo.By("Creating a Cassandra StatefulSet") ginkgo.By("Creating a Cassandra StatefulSet")
e2esset.CreateStatefulSet(f.ClientSet, cassandraManifestPath, ns) e2estatefulset.CreateStatefulSet(f.ClientSet, cassandraManifestPath, ns)
ginkgo.By("Creating a cassandra-test-server deployment") ginkgo.By("Creating a cassandra-test-server deployment")
cassandraKubectlCreate(ns, "tester.yaml") cassandraKubectlCreate(ns, "tester.yaml")

View File

@ -32,8 +32,8 @@ import (
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
) )
const manifestPath = "test/e2e/testing-manifests/statefulset/etcd" const manifestPath = "test/e2e/testing-manifests/statefulset/etcd"
@ -59,7 +59,7 @@ func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
} }
func kubectlCreate(ns, file string) { func kubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file))) input := string(e2etestfiles.ReadOrDie(filepath.Join(manifestPath, file)))
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
} }
@ -73,7 +73,7 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
kubectlCreate(ns, "pdb.yaml") kubectlCreate(ns, "pdb.yaml")
ginkgo.By("Creating an etcd StatefulSet") ginkgo.By("Creating an etcd StatefulSet")
e2esset.CreateStatefulSet(f.ClientSet, manifestPath, ns) e2estatefulset.CreateStatefulSet(f.ClientSet, manifestPath, ns)
ginkgo.By("Creating an etcd--test-server deployment") ginkgo.By("Creating an etcd--test-server deployment")
kubectlCreate(ns, "tester.yaml") kubectlCreate(ns, "tester.yaml")

View File

@ -32,8 +32,8 @@ import (
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
) )
const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade" const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade"
@ -61,7 +61,7 @@ func (MySQLUpgradeTest) Skip(upgCtx UpgradeContext) bool {
} }
func mysqlKubectlCreate(ns, file string) { func mysqlKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file))) input := string(e2etestfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file)))
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns)) framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
} }
@ -88,7 +88,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
mysqlKubectlCreate(ns, "configmap.yaml") mysqlKubectlCreate(ns, "configmap.yaml")
ginkgo.By("Creating a mysql StatefulSet") ginkgo.By("Creating a mysql StatefulSet")
e2esset.CreateStatefulSet(f.ClientSet, mysqlManifestPath, ns) e2estatefulset.CreateStatefulSet(f.ClientSet, mysqlManifestPath, ns)
ginkgo.By("Creating a mysql-test-server deployment") ginkgo.By("Creating a mysql-test-server deployment")
mysqlKubectlCreate(ns, "tester.yaml") mysqlKubectlCreate(ns, "tester.yaml")

View File

@ -25,7 +25,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"regexp" "regexp"
@ -80,7 +80,7 @@ func numberOfSampleResources(node *v1.Node) int64 {
// getSampleDevicePluginPod returns the Device Plugin pod for sample resources in e2e tests. // getSampleDevicePluginPod returns the Device Plugin pod for sample resources in e2e tests.
func getSampleDevicePluginPod() *v1.Pod { func getSampleDevicePluginPod() *v1.Pod {
ds := readDaemonSetV1OrDie(testfiles.ReadOrDie(sampleDevicePluginDSYAML)) ds := readDaemonSetV1OrDie(e2etestfiles.ReadOrDie(sampleDevicePluginDSYAML))
p := &v1.Pod{ p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: sampleDevicePluginName, Name: sampleDevicePluginName,

View File

@ -34,7 +34,7 @@ import (
controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig" controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -1161,7 +1161,7 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
// error // error
errorSamples := model.Samples{mkErrorSample(len(tc.expectConfigStatus.err) > 0)} errorSamples := model.Samples{mkErrorSample(len(tc.expectConfigStatus.err) > 0)}
// expected metrics // expected metrics
expect := frameworkmetrics.KubeletMetrics(map[string]model.Samples{ expect := e2emetrics.KubeletMetrics(map[string]model.Samples{
assignedConfigKey: assignedSamples, assignedConfigKey: assignedSamples,
activeConfigKey: activeSamples, activeConfigKey: activeSamples,
lastKnownGoodConfigKey: lastKnownGoodSamples, lastKnownGoodConfigKey: lastKnownGoodSamples,

View File

@ -43,7 +43,7 @@ import (
commontest "k8s.io/kubernetes/test/e2e/common" commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config" e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/generated" "k8s.io/kubernetes/test/e2e/generated"
"k8s.io/kubernetes/test/e2e_node/services" "k8s.io/kubernetes/test/e2e_node/services"
system "k8s.io/system-validators/validators" system "k8s.io/system-validators/validators"
@ -85,7 +85,7 @@ func registerNodeFlags(flags *flag.FlagSet) {
func init() { func init() {
// Enable bindata file lookup as fallback. // Enable bindata file lookup as fallback.
testfiles.AddFileSource(testfiles.BindataFileSource{ e2etestfiles.AddFileSource(e2etestfiles.BindataFileSource{
Asset: generated.Asset, Asset: generated.Asset,
AssetNames: generated.AssetNames, AssetNames: generated.AssetNames,
}) })

View File

@ -27,8 +27,8 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
"k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -40,7 +40,7 @@ import (
// After the NVIDIA drivers were installed // After the NVIDIA drivers were installed
// TODO make this generic and not linked to COS only // TODO make this generic and not linked to COS only
func numberOfNVIDIAGPUs(node *v1.Node) int64 { func numberOfNVIDIAGPUs(node *v1.Node) int64 {
val, ok := node.Status.Capacity[gpu.NVIDIAGPUResourceName] val, ok := node.Status.Capacity[e2egpu.NVIDIAGPUResourceName]
if !ok { if !ok {
return 0 return 0
} }
@ -49,7 +49,7 @@ func numberOfNVIDIAGPUs(node *v1.Node) int64 {
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE // NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
func NVIDIADevicePlugin() *v1.Pod { func NVIDIADevicePlugin() *v1.Pod {
ds, err := framework.DsFromManifest(gpu.GPUDevicePluginDSYAML) ds, err := framework.DsFromManifest(e2egpu.GPUDevicePluginDSYAML)
framework.ExpectNoError(err) framework.ExpectNoError(err)
p := &v1.Pod{ p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
ginkgo.It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() { ginkgo.It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() {
ginkgo.By("Creating one GPU pod on a node with at least two GPUs") ginkgo.By("Creating one GPU pod on a node with at least two GPUs")
podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs" podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"
p1 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD)) p1 := f.PodClient().CreateSync(makeBusyboxPod(e2egpu.NVIDIAGPUResourceName, podRECMD))
deviceIDRE := "gpu devices: (nvidia[0-9]+)" deviceIDRE := "gpu devices: (nvidia[0-9]+)"
devID1 := parseLog(f, p1.Name, p1.Name, deviceIDRE) devID1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
gomega.Eventually(func() bool { gomega.Eventually(func() bool {
return numberOfNVIDIAGPUs(getLocalNode(f)) > 0 return numberOfNVIDIAGPUs(getLocalNode(f)) > 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
p2 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD)) p2 := f.PodClient().CreateSync(makeBusyboxPod(e2egpu.NVIDIAGPUResourceName, podRECMD))
ginkgo.By("Checking that pods got a different GPU") ginkgo.By("Checking that pods got a different GPU")
devID2 := parseLog(f, p2.Name, p2.Name, deviceIDRE) devID2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
@ -179,7 +179,7 @@ func checkIfNvidiaGPUsExistOnNode() bool {
} }
func logDevicePluginMetrics() { func logDevicePluginMetrics() {
ms, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") ms, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
framework.ExpectNoError(err) framework.ExpectNoError(err)
for msKey, samples := range ms { for msKey, samples := range ms {
switch msKey { switch msKey {

View File

@ -30,8 +30,8 @@ import (
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
commontest "k8s.io/kubernetes/test/e2e/common" commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -171,7 +171,7 @@ func PrePullAllImages() error {
// getGPUDevicePluginImage returns the image of GPU device plugin. // getGPUDevicePluginImage returns the image of GPU device plugin.
func getGPUDevicePluginImage() string { func getGPUDevicePluginImage() string {
ds, err := framework.DsFromManifest(gpu.GPUDevicePluginDSYAML) ds, err := framework.DsFromManifest(e2egpu.GPUDevicePluginDSYAML)
if err != nil { if err != nil {
klog.Errorf("Failed to parse the device plugin image: %v", err) klog.Errorf("Failed to parse the device plugin image: %v", err)
return "" return ""
@ -189,7 +189,7 @@ func getGPUDevicePluginImage() string {
// getSRIOVDevicePluginImage returns the image of SRIOV device plugin. // getSRIOVDevicePluginImage returns the image of SRIOV device plugin.
func getSRIOVDevicePluginImage() string { func getSRIOVDevicePluginImage() string {
data, err := testfiles.Read(SRIOVDevicePluginDSYAML) data, err := e2etestfiles.Read(SRIOVDevicePluginDSYAML)
if err != nil { if err != nil {
klog.Errorf("Failed to read the device plugin manifest: %v", err) klog.Errorf("Failed to read the device plugin manifest: %v", err)
return "" return ""

View File

@ -24,8 +24,8 @@ import (
kubeletresourcemetricsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1" kubeletresourcemetricsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
"k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -76,7 +76,7 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
"": boundedSample(1, 1e6), "": boundedSample(1, 1e6),
}), }),
"node_memory_working_set_bytes": gstruct.MatchAllElements(nodeID, gstruct.Elements{ "node_memory_working_set_bytes": gstruct.MatchAllElements(nodeID, gstruct.Elements{
"": boundedSample(10*volume.Mb, memoryLimit), "": boundedSample(10*e2evolume.Mb, memoryLimit),
}), }),
"container_cpu_usage_seconds_total": gstruct.MatchElements(containerID, gstruct.IgnoreExtras, gstruct.Elements{ "container_cpu_usage_seconds_total": gstruct.MatchElements(containerID, gstruct.IgnoreExtras, gstruct.Elements{
@ -85,8 +85,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
}), }),
"container_memory_working_set_bytes": gstruct.MatchAllElements(containerID, gstruct.Elements{ "container_memory_working_set_bytes": gstruct.MatchAllElements(containerID, gstruct.Elements{
fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod0, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb), fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod0, "busybox-container"): boundedSample(10*e2evolume.Kb, 80*e2evolume.Mb),
fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb), fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*e2evolume.Kb, 80*e2evolume.Mb),
}), }),
}) })
ginkgo.By("Giving pods a minute to start up and produce metrics") ginkgo.By("Giving pods a minute to start up and produce metrics")
@ -110,8 +110,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
}) })
}) })
func getV1alpha1ResourceMetrics() (metrics.KubeletMetrics, error) { func getV1alpha1ResourceMetrics() (e2emetrics.KubeletMetrics, error) {
return metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource/"+kubeletresourcemetricsv1alpha1.Version) return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource/"+kubeletresourcemetricsv1alpha1.Version)
} }
func nodeID(element interface{}) string { func nodeID(element interface{}) string {

View File

@ -29,7 +29,7 @@ import (
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
"k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
systemdutil "github.com/coreos/go-systemd/util" systemdutil "github.com/coreos/go-systemd/util"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
node := getLocalNode(f) node := getLocalNode(f)
memoryCapacity := node.Status.Capacity["memory"] memoryCapacity := node.Status.Capacity["memory"]
memoryLimit := memoryCapacity.Value() memoryLimit := memoryCapacity.Value()
fsCapacityBounds := bounded(100*volume.Mb, 10*volume.Tb) fsCapacityBounds := bounded(100*e2evolume.Mb, 10*e2evolume.Tb)
// Expectations for system containers. // Expectations for system containers.
sysContExpectations := func() types.GomegaMatcher { sysContExpectations := func() types.GomegaMatcher {
return gstruct.MatchAllFields(gstruct.Fields{ return gstruct.MatchAllFields(gstruct.Fields{
@ -97,10 +97,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
// We don't limit system container memory. // We don't limit system container memory.
"AvailableBytes": gomega.BeNil(), "AvailableBytes": gomega.BeNil(),
"UsageBytes": bounded(1*volume.Mb, memoryLimit), "UsageBytes": bounded(1*e2evolume.Mb, memoryLimit),
"WorkingSetBytes": bounded(1*volume.Mb, memoryLimit), "WorkingSetBytes": bounded(1*e2evolume.Mb, memoryLimit),
// this now returns /sys/fs/cgroup/memory.stat total_rss // this now returns /sys/fs/cgroup/memory.stat total_rss
"RSSBytes": bounded(1*volume.Mb, memoryLimit), "RSSBytes": bounded(1*e2evolume.Mb, memoryLimit),
"PageFaults": bounded(1000, 1e9), "PageFaults": bounded(1000, 1e9),
"MajorPageFaults": bounded(0, 100000), "MajorPageFaults": bounded(0, 100000),
}), }),
@ -114,10 +114,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
podsContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{ podsContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
// Pods are limited by Node Allocatable // Pods are limited by Node Allocatable
"AvailableBytes": bounded(1*volume.Kb, memoryLimit), "AvailableBytes": bounded(1*e2evolume.Kb, memoryLimit),
"UsageBytes": bounded(10*volume.Kb, memoryLimit), "UsageBytes": bounded(10*e2evolume.Kb, memoryLimit),
"WorkingSetBytes": bounded(10*volume.Kb, memoryLimit), "WorkingSetBytes": bounded(10*e2evolume.Kb, memoryLimit),
"RSSBytes": bounded(1*volume.Kb, memoryLimit), "RSSBytes": bounded(1*e2evolume.Kb, memoryLimit),
"PageFaults": bounded(0, 1000000), "PageFaults": bounded(0, 1000000),
"MajorPageFaults": bounded(0, 10), "MajorPageFaults": bounded(0, 10),
}) })
@ -159,9 +159,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
// We don't limit system container memory. // We don't limit system container memory.
"AvailableBytes": gomega.BeNil(), "AvailableBytes": gomega.BeNil(),
"UsageBytes": bounded(100*volume.Kb, memoryLimit), "UsageBytes": bounded(100*e2evolume.Kb, memoryLimit),
"WorkingSetBytes": bounded(100*volume.Kb, memoryLimit), "WorkingSetBytes": bounded(100*e2evolume.Kb, memoryLimit),
"RSSBytes": bounded(100*volume.Kb, memoryLimit), "RSSBytes": bounded(100*e2evolume.Kb, memoryLimit),
"PageFaults": bounded(1000, 1e9), "PageFaults": bounded(1000, 1e9),
"MajorPageFaults": bounded(0, 100000), "MajorPageFaults": bounded(0, 100000),
}) })
@ -182,10 +182,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}), }),
"Memory": ptrMatchAllFields(gstruct.Fields{ "Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
"AvailableBytes": bounded(1*volume.Kb, 80*volume.Mb), "AvailableBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb),
"UsageBytes": bounded(10*volume.Kb, 80*volume.Mb), "UsageBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb),
"WorkingSetBytes": bounded(10*volume.Kb, 80*volume.Mb), "WorkingSetBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb),
"RSSBytes": bounded(1*volume.Kb, 80*volume.Mb), "RSSBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb),
"PageFaults": bounded(100, 1000000), "PageFaults": bounded(100, 1000000),
"MajorPageFaults": bounded(0, 10), "MajorPageFaults": bounded(0, 10),
}), }),
@ -194,7 +194,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds, "AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 10*volume.Mb), "UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Mb),
"InodesFree": bounded(1e4, 1e8), "InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8), "InodesUsed": bounded(0, 1e8),
@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds, "AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 10*volume.Mb), "UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Mb),
"InodesFree": bounded(1e4, 1e8), "InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8), "InodesUsed": bounded(0, 1e8),
@ -215,9 +215,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{ "InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
"Name": gomega.Equal("eth0"), "Name": gomega.Equal("eth0"),
"RxBytes": bounded(10, 10*volume.Mb), "RxBytes": bounded(10, 10*e2evolume.Mb),
"RxErrors": bounded(0, 1000), "RxErrors": bounded(0, 1000),
"TxBytes": bounded(10, 10*volume.Mb), "TxBytes": bounded(10, 10*e2evolume.Mb),
"TxErrors": bounded(0, 1000), "TxErrors": bounded(0, 1000),
}), }),
"Interfaces": gomega.Not(gomega.BeNil()), "Interfaces": gomega.Not(gomega.BeNil()),
@ -229,10 +229,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}), }),
"Memory": ptrMatchAllFields(gstruct.Fields{ "Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
"AvailableBytes": bounded(1*volume.Kb, 80*volume.Mb), "AvailableBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb),
"UsageBytes": bounded(10*volume.Kb, 80*volume.Mb), "UsageBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb),
"WorkingSetBytes": bounded(10*volume.Kb, 80*volume.Mb), "WorkingSetBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb),
"RSSBytes": bounded(1*volume.Kb, 80*volume.Mb), "RSSBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb),
"PageFaults": bounded(0, 1000000), "PageFaults": bounded(0, 1000000),
"MajorPageFaults": bounded(0, 10), "MajorPageFaults": bounded(0, 10),
}), }),
@ -244,7 +244,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds, "AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 1*volume.Mb), "UsedBytes": bounded(e2evolume.Kb, 1*e2evolume.Mb),
"InodesFree": bounded(1e4, 1e8), "InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8), "InodesUsed": bounded(0, 1e8),
@ -255,7 +255,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds, "AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 21*volume.Mb), "UsedBytes": bounded(e2evolume.Kb, 21*e2evolume.Mb),
"InodesFree": bounded(1e4, 1e8), "InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8), "InodesUsed": bounded(0, 1e8),
@ -277,11 +277,11 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}), }),
"Memory": ptrMatchAllFields(gstruct.Fields{ "Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
"AvailableBytes": bounded(100*volume.Mb, memoryLimit), "AvailableBytes": bounded(100*e2evolume.Mb, memoryLimit),
"UsageBytes": bounded(10*volume.Mb, memoryLimit), "UsageBytes": bounded(10*e2evolume.Mb, memoryLimit),
"WorkingSetBytes": bounded(10*volume.Mb, memoryLimit), "WorkingSetBytes": bounded(10*e2evolume.Mb, memoryLimit),
// this now returns /sys/fs/cgroup/memory.stat total_rss // this now returns /sys/fs/cgroup/memory.stat total_rss
"RSSBytes": bounded(1*volume.Kb, memoryLimit), "RSSBytes": bounded(1*e2evolume.Kb, memoryLimit),
"PageFaults": bounded(1000, 1e9), "PageFaults": bounded(1000, 1e9),
"MajorPageFaults": bounded(0, 100000), "MajorPageFaults": bounded(0, 100000),
}), }),
@ -290,9 +290,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge), "Time": recent(maxStatsAge),
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{ "InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
"Name": gomega.Or(gomega.BeEmpty(), gomega.Equal("eth0")), "Name": gomega.Or(gomega.BeEmpty(), gomega.Equal("eth0")),
"RxBytes": gomega.Or(gomega.BeNil(), bounded(1*volume.Mb, 100*volume.Gb)), "RxBytes": gomega.Or(gomega.BeNil(), bounded(1*e2evolume.Mb, 100*e2evolume.Gb)),
"RxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)), "RxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)),
"TxBytes": gomega.Or(gomega.BeNil(), bounded(10*volume.Kb, 10*volume.Gb)), "TxBytes": gomega.Or(gomega.BeNil(), bounded(10*e2evolume.Kb, 10*e2evolume.Gb)),
"TxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)), "TxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)),
}), }),
"Interfaces": gomega.Not(gomega.BeNil()), "Interfaces": gomega.Not(gomega.BeNil()),
@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"AvailableBytes": fsCapacityBounds, "AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds,
// we assume we are not running tests on machines < 10tb of disk // we assume we are not running tests on machines < 10tb of disk
"UsedBytes": bounded(volume.Kb, 10*volume.Tb), "UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Tb),
"InodesFree": bounded(1e4, 1e8), "InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8), "InodesUsed": bounded(0, 1e8),
@ -313,7 +313,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"AvailableBytes": fsCapacityBounds, "AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds, "CapacityBytes": fsCapacityBounds,
// we assume we are not running tests on machines < 10tb of disk // we assume we are not running tests on machines < 10tb of disk
"UsedBytes": bounded(volume.Kb, 10*volume.Tb), "UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Tb),
"InodesFree": bounded(1e4, 1e8), "InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8), "Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8), "InodesUsed": bounded(0, 1e8),

View File

@ -41,7 +41,7 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/testfiles" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -246,7 +246,7 @@ func configureTopologyManagerInKubelet(f *framework.Framework, oldCfg *kubeletco
// getSRIOVDevicePluginPod returns the Device Plugin pod for sriov resources in e2e tests. // getSRIOVDevicePluginPod returns the Device Plugin pod for sriov resources in e2e tests.
func getSRIOVDevicePluginPod() *v1.Pod { func getSRIOVDevicePluginPod() *v1.Pod {
ds := readDaemonSetV1OrDie(testfiles.ReadOrDie(SRIOVDevicePluginDSYAML)) ds := readDaemonSetV1OrDie(e2etestfiles.ReadOrDie(SRIOVDevicePluginDSYAML))
p := &v1.Pod{ p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: SRIOVDevicePluginName, Name: SRIOVDevicePluginName,
@ -415,7 +415,7 @@ func isTopologyAffinityError(pod *v1.Pod) bool {
} }
func getSRIOVDevicePluginConfigMap(cmFile string) *v1.ConfigMap { func getSRIOVDevicePluginConfigMap(cmFile string) *v1.ConfigMap {
cmData := testfiles.ReadOrDie(SRIOVDevicePluginCMYAML) cmData := e2etestfiles.ReadOrDie(SRIOVDevicePluginCMYAML)
var err error var err error
// the SRIOVDP configuration is hw-dependent, so we allow per-test-host customization. // the SRIOVDP configuration is hw-dependent, so we allow per-test-host customization.
@ -449,7 +449,7 @@ func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sr
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
serviceAccount := readServiceAccountV1OrDie(testfiles.ReadOrDie(SRIOVDevicePluginSAYAML)) serviceAccount := readServiceAccountV1OrDie(e2etestfiles.ReadOrDie(SRIOVDevicePluginSAYAML))
ginkgo.By(fmt.Sprintf("Creating serviceAccount %v/%v", metav1.NamespaceSystem, serviceAccount.Name)) ginkgo.By(fmt.Sprintf("Creating serviceAccount %v/%v", metav1.NamespaceSystem, serviceAccount.Name))
if _, err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}); err != nil { if _, err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test serviceAccount %s: %v", serviceAccount.Name, err) framework.Failf("unable to create test serviceAccount %s: %v", serviceAccount.Name, err)