Merge pull request #89454 from gavinfish/import-aliases

Update .import-aliases for e2e test framework
This commit is contained in:
Kubernetes Prow Robot 2020-03-27 14:35:54 -07:00 committed by GitHub
commit 4e9dd8fd36
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
75 changed files with 713 additions and 702 deletions

View File

@ -51,22 +51,34 @@
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1",
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1",
"k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1",
"k8s.io/kubernetes/test/e2e/framework/auth": "e2eauth",
"k8s.io/kubernetes/test/e2e/framework/autoscaling": "e2eautoscaling",
"k8s.io/kubernetes/test/e2e/framework/config": "e2econfig",
"k8s.io/kubernetes/test/e2e/framework/deployment": "e2edeployment",
"k8s.io/kubernetes/test/e2e/framework/endpoints": "e2eendpoints",
"k8s.io/kubernetes/test/e2e/framework/events": "e2eevents",
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper": "e2eginkgowrapper",
"k8s.io/kubernetes/test/e2e/framework/gpu": "e2egpu",
"k8s.io/kubernetes/test/e2e/framework/ingress": "e2eingress",
"k8s.io/kubernetes/test/e2e/framework/job": "e2ejob",
"k8s.io/kubernetes/test/e2e/framework/kubectl": "e2ekubectl",
"k8s.io/kubernetes/test/e2e/framework/kubelet": "e2ekubelet",
"k8s.io/kubernetes/test/e2e/framework/log": "e2elog",
"k8s.io/kubernetes/test/e2e/framework/metrics": "e2emetrics",
"k8s.io/kubernetes/test/e2e/framework/network": "e2enetwork",
"k8s.io/kubernetes/test/e2e/framework/node": "e2enode",
"k8s.io/kubernetes/test/e2e/framework/perf": "e2eperf",
"k8s.io/kubernetes/test/e2e/framework/pod": "e2epod",
"k8s.io/kubernetes/test/e2e/framework/pv": "e2epv",
"k8s.io/kubernetes/test/e2e/framework/rc": "e2erc",
"k8s.io/kubernetes/test/e2e/framework/replicaset": "e2ereplicaset",
"k8s.io/kubernetes/test/e2e/framework/resource": "e2eresource",
"k8s.io/kubernetes/test/e2e/framework/security": "e2esecurity",
"k8s.io/kubernetes/test/e2e/framework/service": "e2eservice",
"k8s.io/kubernetes/test/e2e/framework/skipper": "e2eskipper",
"k8s.io/kubernetes/test/e2e/framework/ssh": "e2essh"
"k8s.io/kubernetes/test/e2e/framework/ssh": "e2essh",
"k8s.io/kubernetes/test/e2e/framework/statefulset": "e2estatefulset",
"k8s.io/kubernetes/test/e2e/framework/testfiles": "e2etestfiles",
"k8s.io/kubernetes/test/e2e/framework/timer": "e2etimer",
"k8s.io/kubernetes/test/e2e/framework/volume": "e2evolume"
}

View File

@ -42,7 +42,7 @@ import (
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
@ -274,9 +274,9 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
}
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage)
err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s to complete", etcdImage, deploymentName, namespace)
// kubectl create -f service.yaml
@ -333,7 +333,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
// NOTE: aggregated apis should generally be set up in their own namespace (<aggregated-api-namespace>). As the test framework
// is setting up a new namespace, we are just using that.
err = e2edeploy.WaitForDeploymentComplete(client, deployment)
err = e2edeployment.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
// kubectl create -f apiservice.yaml

View File

@ -35,7 +35,7 @@ import (
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/pointer"
@ -339,9 +339,9 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
ginkgo.By("Wait for the deployment to be ready")
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image)
err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = e2edeploy.WaitForDeploymentComplete(client, deployment)
err = e2edeployment.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentCRDName, namespace)
ginkgo.By("Deploying the webhook service")

View File

@ -42,7 +42,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -840,9 +840,9 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
ginkgo.By("Wait for the deployment to be ready")
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = e2edeploy.WaitForDeploymentComplete(client, deployment)
err = e2edeployment.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentName, namespace)
ginkgo.By("Deploying the webhook service")

View File

@ -41,9 +41,9 @@ import (
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -233,16 +233,16 @@ func testDeleteDeployment(f *framework.Framework) {
podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1)
framework.Logf("Creating simple deployment %s", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err)
// Wait for it to be updated to revision 1
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage)
err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err)
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
@ -280,17 +280,17 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// Create a deployment to delete webserver pods and instead bring up agnhost pods.
deploymentName := "test-rolling-update-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err)
// Wait for it to be updated to revision 3546343826724305833.
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", AgnhostImage)
err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", AgnhostImage)
framework.ExpectNoError(err)
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err)
// There should be 1 old RS (webserver-controller, which is adopted)
@ -309,22 +309,22 @@ func testRecreateDeployment(f *framework.Framework) {
// Create a deployment that brings up agnhost pods.
deploymentName := "test-recreate-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType)
d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType)
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err)
// Wait for it to be updated to revision 1
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", AgnhostImage)
err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", AgnhostImage)
framework.ExpectNoError(err)
framework.Logf("Waiting deployment %q to complete", deploymentName)
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err)
// Update deployment to delete agnhost pods and bring up webserver pods.
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = WebserverImageName
update.Spec.Template.Spec.Containers[0].Image = WebserverImage
})
@ -395,7 +395,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}
}
}()
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err)
@ -427,7 +427,7 @@ func testRolloverDeployment(f *framework.Framework) {
// Wait for replica set to become ready before adopting it.
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
err = replicaset.WaitForReadyReplicaSet(c, ns, rsName)
err = e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName)
framework.ExpectNoError(err)
// Create a deployment to delete webserver pods and instead bring up redis-slave pods.
@ -437,7 +437,7 @@ func testRolloverDeployment(f *framework.Framework) {
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %q", deploymentName)
newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment := e2edeployment.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
MaxUnavailable: intOrStrP(0),
MaxSurge: intOrStrP(1),
@ -468,7 +468,7 @@ func testRolloverDeployment(f *framework.Framework) {
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up agnhost pods.
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
updatedDeploymentImageName, updatedDeploymentImage := AgnhostImageName, AgnhostImage
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
})
@ -481,7 +481,7 @@ func testRolloverDeployment(f *framework.Framework) {
// Wait for it to be updated to revision 2
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
framework.ExpectNoError(err)
framework.Logf("Make sure deployment %q is complete", deploymentName)
@ -528,7 +528,7 @@ func testIterativeDeployments(f *framework.Framework) {
// Create a webserver deployment.
deploymentName := "webserver"
thirty := int32(30)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
@ -546,7 +546,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.2:
// trigger a new deployment
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i)
@ -556,7 +556,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.4:
// rollback to the previous version
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
if update.Annotations == nil {
update.Annotations = make(map[string]string)
}
@ -567,7 +567,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.6:
// just scaling
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
randomScale(update, i)
})
framework.ExpectNoError(err)
@ -576,14 +576,14 @@ func testIterativeDeployments(f *framework.Framework) {
// toggling the deployment
if deployment.Spec.Paused {
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = true
randomScale(update, i)
})
framework.ExpectNoError(err)
} else {
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = false
randomScale(update, i)
})
@ -620,7 +620,7 @@ func testIterativeDeployments(f *framework.Framework) {
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment.Spec.Paused {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = false
})
}
@ -630,7 +630,7 @@ func testIterativeDeployments(f *framework.Framework) {
framework.ExpectNoError(err)
framework.Logf("Waiting for deployment %q status", deploymentName)
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err)
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
@ -646,10 +646,10 @@ func testDeploymentsControllerRef(f *framework.Framework) {
framework.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err)
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
@ -673,10 +673,10 @@ func testDeploymentsControllerRef(f *framework.Framework) {
deploymentName = "test-adopt-deployment"
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d = e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err)
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
@ -703,7 +703,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Create a webserver deployment.
deploymentName := "webserver-deployment"
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(appsv1.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
@ -722,7 +722,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err)
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
@ -731,7 +731,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Update the deployment with a non-existent image so that the new replica set
// will be blocked to simulate a partial rollout.
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "webserver:404"
})
framework.ExpectNoError(err)
@ -747,7 +747,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
minAvailableReplicas := replicas - int32(maxUnavailable)
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
err = replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)
err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)
framework.ExpectNoError(err)
// First rollout's replicaset should have .spec.replicas = 8 too.
@ -796,7 +796,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Scale the deployment to 30 replicas.
newReplicas = int32(30)
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Replicas = &newReplicas
})
framework.ExpectNoError(err)
@ -868,7 +868,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
framework.Logf("Creating Deployment %q", name)
podLabels := map[string]string{"name": name}
replicas := int32(3)
d := e2edeploy.NewDeployment(name, replicas, podLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeployment.NewDeployment(name, replicas, podLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
// NewDeployment assigned the same value to both d.Spec.Selector and
// d.Spec.Template.Labels, so mutating the one would mutate the other.
// Thus we need to set d.Spec.Template.Labels to a new value if we want
@ -893,7 +893,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
}
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err)
framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns)
@ -939,7 +939,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
framework.Logf("Triggering a rolling deployment several times")
for i := 1; i <= 3; i++ {
framework.Logf("Updating label deployment %q pod spec (iteration #%d)", name, i)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Labels["iteration"] = fmt.Sprintf("%d", i)
setAffinities(update, true)
})

View File

@ -43,7 +43,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
@ -379,13 +379,13 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.DumpDebugInfo(c, ns)
}
framework.Logf("Deleting all stateful set in ns %v", ns)
e2esset.DeleteAllStatefulSets(c, ns)
e2estatefulset.DeleteAllStatefulSets(c, ns)
})
ginkgo.It("should come back up if node goes down [Slow] [Disruptive]", func() {
petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
ps := e2estatefulset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{})
framework.ExpectNoError(err)
@ -396,19 +396,19 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
common.RestartNodes(f.ClientSet, nodes)
ginkgo.By("waiting for pods to be running again")
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
e2estatefulset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
})
ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
e2eskipper.SkipUnlessSSHKeyPresent()
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
ps := e2estatefulset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
e2estatefulset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
pod := e2esset.GetPodList(c, ps).Items[0]
pod := e2estatefulset.GetPodList(c, ps).Items[0]
node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
@ -427,7 +427,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
}
ginkgo.By("waiting for pods to be running again")
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
e2estatefulset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
})
})

View File

@ -32,7 +32,7 @@ import (
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"github.com/onsi/ginkgo"
@ -229,7 +229,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *appsv1.ReplicaSet) {
rs, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, namespace, name, func(update *appsv1.ReplicaSet) {
x := int32(2)
update.Spec.Replicas = &x
})

View File

@ -39,7 +39,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -99,7 +99,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.BeforeEach(func() {
statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ss = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
ss = e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels)
@ -112,7 +112,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.DumpDebugInfo(c, ns)
}
framework.Logf("Deleting all statefulset in ns %v", ns)
e2esset.DeleteAllStatefulSets(c, ns)
e2estatefulset.DeleteAllStatefulSets(c, ns)
})
// This can't be Conformance yet because it depends on a default
@ -121,37 +121,37 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 3
e2esset.PauseNewPods(ss)
e2estatefulset.PauseNewPods(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Saturating stateful set " + ss.Name)
e2esset.Saturate(c, ss)
e2estatefulset.Saturate(c, ss)
ginkgo.By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(e2esset.CheckMount(c, ss, "/data"))
framework.ExpectNoError(e2estatefulset.CheckMount(c, ss, "/data"))
ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
framework.ExpectNoError(e2esset.CheckHostname(c, ss))
framework.ExpectNoError(e2estatefulset.CheckHostname(c, ss))
ginkgo.By("Verifying statefulset set proper service name")
framework.ExpectNoError(e2esset.CheckServiceName(ss, headlessSvcName))
framework.ExpectNoError(e2estatefulset.CheckServiceName(ss, headlessSvcName))
cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync"
ginkgo.By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(e2esset.ExecInStatefulPods(c, ss, cmd))
framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(c, ss, cmd))
ginkgo.By("Restarting statefulset " + ss.Name)
e2esset.Restart(c, ss)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
e2estatefulset.Restart(c, ss)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ginkgo.By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(e2esset.CheckMount(c, ss, "/data"))
framework.ExpectNoError(e2estatefulset.CheckMount(c, ss, "/data"))
cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi"
ginkgo.By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(e2esset.ExecInStatefulPods(c, ss, cmd))
framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(c, ss, cmd))
})
// This can't be Conformance yet because it depends on a default
@ -160,7 +160,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 1
e2esset.PauseNewPods(ss)
e2estatefulset.PauseNewPods(ss)
// Replace ss with the one returned from Create() so it has the UID.
// Save Kind since it won't be populated in the returned ss.
@ -170,8 +170,8 @@ var _ = SIGDescribe("StatefulSet", func() {
ss.Kind = kind
ginkgo.By("Saturating stateful set " + ss.Name)
e2esset.Saturate(c, ss)
pods := e2esset.GetPodList(c, ss)
e2estatefulset.Saturate(c, ss)
pods := e2estatefulset.GetPodList(c, ss)
gomega.Expect(pods.Items).To(gomega.HaveLen(int(*ss.Spec.Replicas)))
ginkgo.By("Checking that stateful set pods are created with ControllerRef")
@ -245,18 +245,18 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
e2epv.SkipIfNoDefaultStorageClass(c)
*(ss.Spec.Replicas) = 2
e2esset.PauseNewPods(ss)
e2estatefulset.PauseNewPods(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2esset.WaitForRunning(c, 1, 0, ss)
e2estatefulset.WaitForRunning(c, 1, 0, ss)
ginkgo.By("Resuming stateful pod at index 0.")
e2esset.ResumeNextPod(c, ss)
e2estatefulset.ResumeNextPod(c, ss)
ginkgo.By("Waiting for stateful pod at index 1 to enter running.")
e2esset.WaitForRunning(c, 2, 1, ss)
e2estatefulset.WaitForRunning(c, 2, 1, ss)
// Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not*
// create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till
@ -266,13 +266,13 @@ var _ = SIGDescribe("StatefulSet", func() {
deleteStatefulPodAtIndex(c, 0, ss)
ginkgo.By("Confirming stateful pod at index 0 is recreated.")
e2esset.WaitForRunning(c, 2, 1, ss)
e2estatefulset.WaitForRunning(c, 2, 1, ss)
ginkgo.By("Resuming stateful pod at index 1.")
e2esset.ResumeNextPod(c, ss)
e2estatefulset.ResumeNextPod(c, ss)
ginkgo.By("Confirming all stateful pods in statefulset are created.")
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
})
// This can't be Conformance yet because it depends on a default
@ -291,7 +291,7 @@ var _ = SIGDescribe("StatefulSet", func() {
*/
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
ginkgo.By("Creating a new StatefulSet")
ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
rollbackTest(c, ns, ss)
})
@ -302,7 +302,7 @@ var _ = SIGDescribe("StatefulSet", func() {
*/
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() {
ginkgo.By("Creating a new StatefulSet")
ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
setHTTPProbe(ss)
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
@ -316,12 +316,12 @@ var _ = SIGDescribe("StatefulSet", func() {
}
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := e2esset.GetPodList(c, ss)
pods := e2estatefulset.GetPodList(c, ss)
for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s",
pods.Items[i].Namespace,
@ -412,9 +412,9 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Restoring Pods to the correct revision when they are deleted")
deleteStatefulPodAtIndex(c, 0, ss)
deleteStatefulPodAtIndex(c, 2, ss)
e2esset.WaitForRunningAndReady(c, 3, ss)
e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ss = getStatefulSet(c, ss.Namespace, ss.Name)
pods = e2esset.GetPodList(c, ss)
pods = e2estatefulset.GetPodList(c, ss)
for i := range pods.Items {
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
@ -494,19 +494,19 @@ var _ = SIGDescribe("StatefulSet", func() {
// The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs.
ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func() {
ginkgo.By("Creating a new StatefulSet")
ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
setHTTPProbe(ss)
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.OnDeleteStatefulSetStrategyType,
}
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := e2esset.GetPodList(c, ss)
pods := e2estatefulset.GetPodList(c, ss)
for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
@ -519,9 +519,9 @@ var _ = SIGDescribe("StatefulSet", func() {
deleteStatefulPodAtIndex(c, 0, ss)
deleteStatefulPodAtIndex(c, 1, ss)
deleteStatefulPodAtIndex(c, 2, ss)
e2esset.WaitForRunningAndReady(c, 3, ss)
e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ss = getStatefulSet(c, ss.Namespace, ss.Name)
pods = e2esset.GetPodList(c, ss)
pods = e2estatefulset.GetPodList(c, ss)
for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
@ -548,9 +548,9 @@ var _ = SIGDescribe("StatefulSet", func() {
deleteStatefulPodAtIndex(c, 0, ss)
deleteStatefulPodAtIndex(c, 1, ss)
deleteStatefulPodAtIndex(c, 2, ss)
e2esset.WaitForRunningAndReady(c, 3, ss)
e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ss = getStatefulSet(c, ss.Namespace, ss.Name)
pods = e2esset.GetPodList(c, ss)
pods = e2estatefulset.GetPodList(c, ss)
for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s",
pods.Items[i].Namespace,
@ -579,24 +579,24 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
setHTTPProbe(ss)
ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ginkgo.By("Confirming that stateful set scale up will halt with unhealthy stateful pod")
breakHTTPProbe(c, ss)
waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
e2esset.UpdateReplicas(c, ss, 3)
e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0)
e2estatefulset.UpdateReplicas(c, ss, 3)
confirmStatefulPodCount(c, 1, ss, 10*time.Second, true)
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
restoreHTTPProbe(c, ss)
e2esset.WaitForRunningAndReady(c, 3, ss)
e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ginkgo.By("Verifying that stateful set " + ssName + " was scaled up in order")
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
@ -622,14 +622,14 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
breakHTTPProbe(c, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0)
waitForRunningAndNotReady(c, 3, ss)
e2esset.UpdateReplicas(c, ss, 0)
e2estatefulset.UpdateReplicas(c, ss, 0)
confirmStatefulPodCount(c, 3, ss, 10*time.Second, true)
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
restoreHTTPProbe(c, ss)
e2esset.Scale(c, ss, 0)
e2estatefulset.Scale(c, ss, 0)
ginkgo.By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
@ -658,37 +658,37 @@ var _ = SIGDescribe("StatefulSet", func() {
psLabels := klabels.Set(labels)
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement
setHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod")
breakHTTPProbe(c, ss)
waitForRunningAndNotReady(c, *ss.Spec.Replicas, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
e2esset.UpdateReplicas(c, ss, 3)
e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0)
e2estatefulset.UpdateReplicas(c, ss, 3)
confirmStatefulPodCount(c, 3, ss, 10*time.Second, false)
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
restoreHTTPProbe(c, ss)
e2esset.WaitForRunningAndReady(c, 3, ss)
e2estatefulset.WaitForRunningAndReady(c, 3, ss)
ginkgo.By("Scale down will not halt with unhealthy stateful pod")
breakHTTPProbe(c, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
e2estatefulset.WaitForStatusReadyReplicas(c, ss, 0)
waitForRunningAndNotReady(c, 3, ss)
e2esset.UpdateReplicas(c, ss, 0)
e2estatefulset.UpdateReplicas(c, ss, 0)
confirmStatefulPodCount(c, 0, ss, 10*time.Second, false)
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
restoreHTTPProbe(c, ss)
e2esset.Scale(c, ss, 0)
e2esset.WaitForStatusReplicas(c, ss, 0)
e2estatefulset.Scale(c, ss, 0)
e2estatefulset.WaitForStatusReplicas(c, ss, 0)
})
/*
@ -724,7 +724,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)
ss := e2esset.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels)
ss := e2estatefulset.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels)
statefulPodContainer := &ss.Spec.Template.Spec.Containers[0]
statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort)
ss.Spec.Template.Spec.NodeName = node.Name
@ -791,11 +791,11 @@ var _ = SIGDescribe("StatefulSet", func() {
*/
framework.ConformanceIt("should have a working scale subresource", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
setHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss)
ginkgo.By("getting scale subresource")
@ -836,7 +836,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.DumpDebugInfo(c, ns)
}
framework.Logf("Deleting all statefulset in ns %v", ns)
e2esset.DeleteAllStatefulSets(c, ns)
e2estatefulset.DeleteAllStatefulSets(c, ns)
})
// Do not mark this as Conformance.
@ -907,8 +907,8 @@ func (c *clusterAppTester) run() {
default:
if restartCluster {
ginkgo.By("Restarting stateful set " + ss.Name)
e2esset.Restart(c.client, ss)
e2esset.WaitForRunningAndReady(c.client, *ss.Spec.Replicas, ss)
e2estatefulset.Restart(c.client, ss)
e2estatefulset.WaitForRunningAndReady(c.client, *ss.Spec.Replicas, ss)
}
}
@ -928,7 +928,7 @@ func (z *zookeeperTester) name() string {
}
func (z *zookeeperTester) deploy(ns string) *appsv1.StatefulSet {
z.ss = e2esset.CreateStatefulSet(z.client, zookeeperManifestPath, ns)
z.ss = e2estatefulset.CreateStatefulSet(z.client, zookeeperManifestPath, ns)
return z.ss
}
@ -966,7 +966,7 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
}
func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet {
m.ss = e2esset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns)
m.ss = e2estatefulset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns)
framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
for _, cmd := range []string{
@ -1006,7 +1006,7 @@ func (m *redisTester) redisExec(cmd, ns, podName string) string {
}
func (m *redisTester) deploy(ns string) *appsv1.StatefulSet {
m.ss = e2esset.CreateStatefulSet(m.client, redisManifestPath, ns)
m.ss = e2estatefulset.CreateStatefulSet(m.client, redisManifestPath, ns)
return m.ss
}
@ -1037,7 +1037,7 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
}
func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet {
c.ss = e2esset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns)
c.ss = e2estatefulset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns)
framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
for _, cmd := range []string{
"CREATE DATABASE IF NOT EXISTS foo;",
@ -1088,12 +1088,12 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
setHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := e2esset.GetPodList(c, ss)
pods := e2estatefulset.GetPodList(c, ss)
for i := range pods.Items {
framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
@ -1101,7 +1101,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
}
e2esset.SortStatefulPods(pods)
e2estatefulset.SortStatefulPods(pods)
err = breakPodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err)
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name)
@ -1121,11 +1121,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during rolling update")
ginkgo.By("Updating Pods in reverse ordinal order")
pods = e2esset.GetPodList(c, ss)
e2esset.SortStatefulPods(pods)
pods = e2estatefulset.GetPodList(c, ss)
e2estatefulset.SortStatefulPods(pods)
err = restorePodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err)
ss, pods = e2esset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = waitForRollingUpdate(c, ss)
framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
@ -1161,10 +1161,10 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
framework.ExpectNotEqual(currentRevision, updateRevision, "Current revision should not equal update revision during roll back")
ginkgo.By("Rolling back update in reverse ordinal order")
pods = e2esset.GetPodList(c, ss)
e2esset.SortStatefulPods(pods)
pods = e2estatefulset.GetPodList(c, ss)
e2estatefulset.SortStatefulPods(pods)
restorePodHTTPProbe(ss, &pods.Items[1])
ss, pods = e2esset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = waitForRollingUpdate(c, ss)
framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
@ -1192,7 +1192,7 @@ func confirmStatefulPodCount(c clientset.Interface, count int, ss *appsv1.Statef
start := time.Now()
deadline := start.Add(timeout)
for t := time.Now(); t.Before(deadline); t = time.Now() {
podList := e2esset.GetPodList(c, ss)
podList := e2estatefulset.GetPodList(c, ss)
statefulPodCount := len(podList.Items)
if statefulPodCount != count {
e2epod.LogPodStates(podList.Items)
@ -1224,7 +1224,7 @@ func breakHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error {
}
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
return e2esset.ExecInStatefulPods(c, ss, cmd)
return e2estatefulset.ExecInStatefulPods(c, ss, cmd)
}
// breakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
@ -1248,7 +1248,7 @@ func restoreHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error {
}
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
return e2esset.ExecInStatefulPods(c, ss, cmd)
return e2estatefulset.ExecInStatefulPods(c, ss, cmd)
}
// restorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.

View File

@ -23,7 +23,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
)
// waitForPartitionedRollingUpdate waits for all Pods in set to exist and have the correct revision. set must have
@ -43,7 +43,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
set.Namespace,
set.Name)
}
e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
@ -55,7 +55,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
set.Namespace,
set.Name,
)
e2esset.SortStatefulPods(pods)
e2estatefulset.SortStatefulPods(pods)
for i := range pods.Items {
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
@ -85,7 +85,7 @@ func waitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
// waitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
// The returned StatefulSet contains such a StatefulSetStatus
func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet {
e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
if set2.Status.ObservedGeneration >= set.Generation {
set = set2
return true, nil
@ -98,7 +98,7 @@ func waitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.State
// waitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition.
func waitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
var pods *v1.PodList
e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
for i := range pods.Items {
@ -121,7 +121,7 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
set.Name,
set.Spec.UpdateStrategy.Type)
}
e2esset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
e2estatefulset.WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
set = set2
pods = pods2
if len(pods.Items) < int(*set.Spec.Replicas) {
@ -132,7 +132,7 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
set.Namespace,
set.Name,
)
e2esset.SortStatefulPods(pods)
e2estatefulset.SortStatefulPods(pods)
for i := range pods.Items {
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
framework.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
@ -151,5 +151,5 @@ func waitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*apps
// waitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
func waitForRunningAndNotReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) {
e2esset.WaitForRunning(c, numStatefulPods, 0, ss)
e2estatefulset.WaitForRunning(c, numStatefulPods, 0, ss)
}

View File

@ -36,8 +36,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -204,7 +204,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() {
podLabels := map[string]string{"name": "audit-deployment-pod"}
d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "agnhost", imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RecreateDeploymentStrategyType)
d := e2edeployment.NewDeployment("audit-deployment", int32(1), podLabels, "agnhost", imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RecreateDeploymentStrategyType)
_, err := f.ClientSet.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create audit-deployment")
@ -656,7 +656,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
// test authorizer annotations, RBAC is required.
ginkgo.It("should audit API calls to get a pod with unauthorized user.", func() {
if !auth.IsRBACEnabled(f.ClientSet.RbacV1()) {
if !e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) {
e2eskipper.Skipf("RBAC not enabled.")
}

View File

@ -36,7 +36,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -347,7 +347,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
},
}
if auth.IsRBACEnabled(f.ClientSet.RbacV1()) {
if e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) {
testCases = append(testCases, annotationTestCases...)
}
expectedEvents := []utils.AuditEvent{}

View File

@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -56,7 +56,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
if !framework.IsPodSecurityPolicyEnabled(f.ClientSet) {
e2eskipper.Skipf("PodSecurityPolicy not enabled")
}
if !auth.IsRBACEnabled(f.ClientSet.RbacV1()) {
if !e2eauth.IsRBACEnabled(f.ClientSet.RbacV1()) {
e2eskipper.Skipf("RBAC not enabled")
}
ns = f.Namespace.Name
@ -72,7 +72,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
framework.ExpectNoError(err)
ginkgo.By("Binding the edit role to the default SA")
err = auth.BindClusterRole(f.ClientSet.RbacV1(), "edit", ns,
err = e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "edit", ns,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns, Name: "default"})
framework.ExpectNoError(err)
})
@ -233,14 +233,14 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu
framework.ExpectNoError(err, "Failed to create PSP role")
// Bind the role to the namespace.
err = auth.BindRoleInNamespace(f.ClientSet.RbacV1(), name, ns, rbacv1.Subject{
err = e2eauth.BindRoleInNamespace(f.ClientSet.RbacV1(), name, ns, rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
Namespace: ns,
Name: "default",
})
framework.ExpectNoError(err)
framework.ExpectNoError(auth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
framework.ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
serviceaccount.MakeUsername(ns, "default"), ns, "use", name,
schema.GroupResource{Group: "policy", Resource: "podsecuritypolicies"}, true))

View File

@ -35,8 +35,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/upgrades"
@ -48,8 +48,8 @@ import (
)
var (
upgradeTarget = config.Flags.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
upgradeImage = config.Flags.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
upgradeTarget = e2econfig.Flags.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
upgradeImage = e2econfig.Flags.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
)
var upgradeTests = []upgrades.Test{
@ -408,7 +408,7 @@ func finalizeUpgradeTest(start time.Time, tc *junit.TestCase) {
}
switch r := r.(type) {
case ginkgowrapper.FailurePanic:
case e2eginkgowrapper.FailurePanic:
tc.Failures = []*junit.Failure{
{
Message: r.Message,

View File

@ -50,7 +50,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"github.com/onsi/ginkgo"
)
@ -77,10 +77,10 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
////////////////////////////////////////////////////////////////////////
ginkgo.Describe("NFSv4", func() {
ginkgo.It("should be mountable for NFSv4", func() {
config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{})
defer volume.TestServerCleanup(f, config)
config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config)
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
@ -95,16 +95,16 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
}
// Must match content of test/images/volumes-tester/nfs/index.html
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
})
})
ginkgo.Describe("NFSv3", func() {
ginkgo.It("should be mountable for NFSv3", func() {
config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{})
defer volume.TestServerCleanup(f, config)
config, _, serverIP := e2evolume.NewNFSServer(c, namespace.Name, []string{})
defer e2evolume.TestServerCleanup(f, config)
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
@ -118,7 +118,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
},
}
// Must match content of test/images/volume-tester/nfs/index.html
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
})
})
@ -128,15 +128,15 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
ginkgo.Describe("GlusterFS", func() {
ginkgo.It("should be mountable", func() {
// create gluster server and endpoints
config, _, _ := volume.NewGlusterfsServer(c, namespace.Name)
config, _, _ := e2evolume.NewGlusterfsServer(c, namespace.Name)
name := config.Prefix + "-server"
defer func() {
volume.TestServerCleanup(f, config)
e2evolume.TestServerCleanup(f, config)
err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "defer: Gluster delete endpoints failed")
}()
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
@ -151,7 +151,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
ExpectedContent: "Hello from GlusterFS!",
},
}
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
})
})
})

View File

@ -57,7 +57,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -445,10 +445,10 @@ func NewIngressTestJig(c clientset.Interface) *TestJig {
func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[string]string, svcAnnotations map[string]string) {
var err error
read := func(file string) string {
return string(testfiles.ReadOrDie(filepath.Join(manifestPath, file)))
return string(e2etestfiles.ReadOrDie(filepath.Join(manifestPath, file)))
}
exists := func(file string) bool {
return testfiles.Exists(filepath.Join(manifestPath, file))
return e2etestfiles.Exists(filepath.Join(manifestPath, file))
}
j.Logger.Infof("creating replication controller")
@ -499,7 +499,7 @@ func marshalToYaml(obj runtime.Object, gv schema.GroupVersion) ([]byte, error) {
// ingressFromManifest reads a .json/yaml file and returns the ingress in it.
func ingressFromManifest(fileName string) (*networkingv1beta1.Ingress, error) {
var ing networkingv1beta1.Ingress
data, err := testfiles.Read(fileName)
data, err := e2etestfiles.Read(fileName)
if err != nil {
return nil, err
}
@ -1008,7 +1008,7 @@ func (cont *NginxIngressController) Init() {
framework.ExpectNoError(err)
read := func(file string) string {
return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file)))
return string(e2etestfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file)))
}
framework.Logf("initializing nginx ingress controller")
framework.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns))

View File

@ -26,7 +26,7 @@ import (
"github.com/onsi/ginkgo"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
@ -53,7 +53,7 @@ func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
skip := offset + 1
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgowrapper.Fail(nowStamp()+": "+msg, skip)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
}
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs
@ -64,7 +64,7 @@ func Fail(msg string, callerSkip ...int) {
skip += callerSkip[0]
}
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgowrapper.Fail(nowStamp()+": "+msg, skip)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
}
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`)

View File

@ -24,7 +24,7 @@ import (
"github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
@ -50,5 +50,5 @@ func Failf(format string, args ...interface{}) {
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("FAIL", msg)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}

View File

@ -33,7 +33,7 @@ import (
"github.com/onsi/ginkgo"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework/auth"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
)
const (
@ -128,7 +128,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
}
if auth.IsRBACEnabled(kubeClient.RbacV1()) {
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
// Create the Role to bind it to the namespace.
_, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
@ -145,10 +145,10 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
}
})
if auth.IsRBACEnabled(kubeClient.RbacV1()) {
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
podSecurityPolicyPrivileged, namespace))
err := auth.BindClusterRoleInNamespace(kubeClient.RbacV1(),
err := e2eauth.BindClusterRoleInNamespace(kubeClient.RbacV1(),
podSecurityPolicyPrivileged,
namespace,
rbacv1.Subject{
@ -157,7 +157,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
Name: "default",
})
ExpectNoError(err)
ExpectNoError(auth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
}

View File

@ -25,7 +25,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -37,7 +37,7 @@ var loggingSoak struct {
Scale int `default:"1" usage:"number of waves of pods"`
TimeBetweenWaves time.Duration `default:"5000ms" usage:"time to wait before dumping the next wave of pods"`
}
var _ = config.AddOptions(&loggingSoak, "instrumentation.logging.soak")
var _ = e2econfig.AddOptions(&loggingSoak, "instrumentation.logging.soak")
var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() {

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu"
e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"k8s.io/kubernetes/test/e2e/scheduling"
@ -93,7 +93,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
Args: []string{"nvidia-smi && sleep infinity"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
gpu.NVIDIAGPUResourceName: *resource.NewQuantity(1, resource.DecimalSI),
e2egpu.NVIDIAGPUResourceName: *resource.NewQuantity(1, resource.DecimalSI),
},
},
},

View File

@ -23,7 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
@ -34,13 +34,13 @@ import (
var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
f := framework.NewDefaultFramework("metrics-grabber")
var c, ec clientset.Interface
var grabber *metrics.Grabber
var grabber *e2emetrics.Grabber
gin.BeforeEach(func() {
var err error
c = f.ClientSet
ec = f.KubemarkExternalClusterClientSet
framework.ExpectNoError(err)
grabber, err = metrics.NewMetricsGrabber(c, ec, true, true, true, true, true)
grabber, err = e2emetrics.NewMetricsGrabber(c, ec, true, true, true, true, true)
framework.ExpectNoError(err)
})

View File

@ -62,12 +62,12 @@ import (
"k8s.io/kubernetes/pkg/controller"
commonutils "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/utils/crd"
@ -208,7 +208,7 @@ func assertCleanup(ns string, selectors ...string) {
}
func readTestFileOrDie(file string) []byte {
return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file))
return e2etestfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file))
}
func runKubectlRetryOrDie(ns string, args ...string) string {
@ -300,7 +300,7 @@ var _ = SIGDescribe("Kubectl client", func() {
var nautilus string
ginkgo.BeforeEach(func() {
updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
nautilus = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"))))
nautilus = commonutils.SubstituteImageName(string(e2etestfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"))))
})
/*
Release : v1.9
@ -348,7 +348,7 @@ var _ = SIGDescribe("Kubectl client", func() {
"agnhost-master-deployment.yaml.in",
"agnhost-slave-deployment.yaml.in",
} {
contents := commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile))))
contents := commonutils.SubstituteImageName(string(e2etestfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile))))
run(contents)
}
}
@ -621,11 +621,11 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.It("should handle in-cluster config", func() {
ginkgo.By("adding rbac permissions")
// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
err := auth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name,
err := e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
framework.ExpectNoError(err)
err = auth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
err = e2eauth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)

View File

@ -24,13 +24,13 @@ import (
"k8s.io/apimachinery/pkg/runtime"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
scheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
)
// PodFromManifest reads a .json/yaml file and returns the pod in it.
func PodFromManifest(filename string) (*v1.Pod, error) {
var pod v1.Pod
data, err := testfiles.Read(filename)
data, err := e2etestfiles.Read(filename)
if err != nil {
return nil, err
}
@ -48,7 +48,7 @@ func PodFromManifest(filename string) (*v1.Pod, error) {
// RcFromManifest reads a .json/yaml file and returns the rc in it.
func RcFromManifest(fileName string) (*v1.ReplicationController, error) {
var controller v1.ReplicationController
data, err := testfiles.Read(fileName)
data, err := e2etestfiles.Read(fileName)
if err != nil {
return nil, err
}
@ -66,7 +66,7 @@ func RcFromManifest(fileName string) (*v1.ReplicationController, error) {
// SvcFromManifest reads a .json/yaml file and returns the service in it.
func SvcFromManifest(fileName string) (*v1.Service, error) {
var svc v1.Service
data, err := testfiles.Read(fileName)
data, err := e2etestfiles.Read(fileName)
if err != nil {
return nil, err
}
@ -84,7 +84,7 @@ func SvcFromManifest(fileName string) (*v1.Service, error) {
// StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns.
func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) {
var ss appsv1.StatefulSet
data, err := testfiles.Read(fileName)
data, err := e2etestfiles.Read(fileName)
if err != nil {
return nil, err
}
@ -108,7 +108,7 @@ func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) {
// DaemonSetFromManifest returns a DaemonSet from a manifest stored in fileName in the Namespace indicated by ns.
func DaemonSetFromManifest(fileName, ns string) (*appsv1.DaemonSet, error) {
var ds appsv1.DaemonSet
data, err := testfiles.Read(fileName)
data, err := e2etestfiles.Read(fileName)
if err != nil {
return nil, err
}
@ -128,7 +128,7 @@ func DaemonSetFromManifest(fileName, ns string) (*appsv1.DaemonSet, error) {
// RoleFromManifest returns a Role from a manifest stored in fileName in the Namespace indicated by ns.
func RoleFromManifest(fileName, ns string) (*rbacv1.Role, error) {
var role rbacv1.Role
data, err := testfiles.Read(fileName)
data, err := e2etestfiles.Read(fileName)
if err != nil {
return nil, err
}

View File

@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -136,7 +136,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
replicas := int32(len(nodeList.Items))
serverDeploymentSpec := e2edeploy.NewDeployment(serverDeploymentName,
serverDeploymentSpec := e2edeployment.NewDeployment(serverDeploymentName,
replicas,
map[string]string{"test": "dual-stack-server"},
"dualstack-test-server",
@ -165,7 +165,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
},
}
clientDeploymentSpec := e2edeploy.NewDeployment(clientDeploymentName,
clientDeploymentSpec := e2edeployment.NewDeployment(clientDeploymentName,
replicas,
map[string]string{"test": "dual-stack-client"},
"dualstack-test-client",
@ -198,15 +198,15 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), clientDeploymentSpec, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(cs, serverDeployment)
err = e2edeployment.WaitForDeploymentComplete(cs, serverDeployment)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(cs, clientDeployment)
err = e2edeployment.WaitForDeploymentComplete(cs, clientDeployment)
framework.ExpectNoError(err)
serverPods, err := e2edeploy.GetPodsForDeployment(cs, serverDeployment)
serverPods, err := e2edeployment.GetPodsForDeployment(cs, serverDeployment)
framework.ExpectNoError(err)
clientPods, err := e2edeploy.GetPodsForDeployment(cs, clientDeployment)
clientPods, err := e2edeployment.GetPodsForDeployment(cs, clientDeployment)
framework.ExpectNoError(err)
assertNetworkConnectivity(f, *serverPods, *clientPods, "dualstack-test-client", "80")

View File

@ -36,8 +36,8 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
"k8s.io/kubernetes/test/e2e/framework/ingress"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -54,22 +54,22 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
defer ginkgo.GinkgoRecover()
var (
ns string
jig *ingress.TestJig
conformanceTests []ingress.ConformanceTests
jig *e2eingress.TestJig
conformanceTests []e2eingress.ConformanceTests
)
f := framework.NewDefaultFramework("ingress")
ginkgo.BeforeEach(func() {
jig = ingress.NewIngressTestJig(f.ClientSet)
jig = e2eingress.NewIngressTestJig(f.ClientSet)
ns = f.Namespace.Name
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
// lying around so we don't have to race any caches
err := auth.BindClusterRole(jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name,
err := e2eauth.BindClusterRole(jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
framework.ExpectNoError(err)
err = auth.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1(),
err = e2eauth.WaitForAuthorizationUpdate(jig.Client.AuthorizationV1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
"", "create", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
@ -116,7 +116,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
})
ginkgo.It("should conform to Ingress spec", func() {
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests {
ginkgo.By(t.EntryLog)
t.Execute()
@ -131,8 +131,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should support multiple TLS certs", func() {
ginkgo.By("Creating an ingress with no certs.")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{
ingress.IngressStaticIPKey: ns,
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{
e2eingress.IngressStaticIPKey: ns,
}, map[string]string{})
ginkgo.By("Adding multiple certs to the ingress.")
@ -167,8 +167,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("multicluster ingress should get instance group annotation", func() {
name := "echomap"
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{
ingress.IngressClassKey: ingress.MulticlusterIngressClassValue,
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "http"), ns, map[string]string{
e2eingress.IngressClassKey: e2eingress.MulticlusterIngressClassValue,
}, map[string]string{})
ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
@ -186,13 +186,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc.
// Note: All resources except the firewall rule have an annotation.
umKey := ingress.StatusPrefix + "/url-map"
fwKey := ingress.StatusPrefix + "/forwarding-rule"
tpKey := ingress.StatusPrefix + "/target-proxy"
fwsKey := ingress.StatusPrefix + "/https-forwarding-rule"
tpsKey := ingress.StatusPrefix + "/https-target-proxy"
scKey := ingress.StatusPrefix + "/ssl-cert"
beKey := ingress.StatusPrefix + "/backends"
umKey := e2eingress.StatusPrefix + "/url-map"
fwKey := e2eingress.StatusPrefix + "/forwarding-rule"
tpKey := e2eingress.StatusPrefix + "/target-proxy"
fwsKey := e2eingress.StatusPrefix + "/https-forwarding-rule"
tpsKey := e2eingress.StatusPrefix + "/https-target-proxy"
scKey := e2eingress.StatusPrefix + "/ssl-cert"
beKey := e2eingress.StatusPrefix + "/backends"
wait.Poll(2*time.Second, time.Minute, func() (bool, error) {
ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err)
@ -272,8 +272,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should conform to Ingress spec", func() {
jig.PollInterval = 5 * time.Second
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
ingress.NEGAnnotation: `{"ingress": true}`,
conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{
e2eingress.NEGAnnotation: `{"ingress": true}`,
})
for _, t := range conformanceTests {
ginkgo.By(t.EntryLog)
@ -288,7 +288,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should be able to switch between IG and NEG modes", func() {
var err error
ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
err = gceController.WaitForNegBackendService(jig.GetServicePorts(false))
framework.ExpectNoError(err)
@ -297,7 +297,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}`
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": false}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err)
}
@ -315,7 +315,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}`
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": true}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err)
}
@ -332,7 +332,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should be able to create a ClusterIP service", func() {
ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
svcPorts := jig.GetServicePorts(false)
err := gceController.WaitForNegBackendService(svcPorts)
@ -367,7 +367,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}
ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
jig.WaitForIngressToStable()
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
@ -392,7 +392,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
name := "hostname"
replicas := 8
ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
jig.WaitForIngressToStable()
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
@ -459,11 +459,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err)
var status ingress.NegStatus
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
var status e2eingress.NegStatus
v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
if !ok {
// Wait for NEG sync loop to find NEGs
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
return false, nil
}
err = json.Unmarshal([]byte(v), &status)
@ -471,7 +471,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
return false, nil
}
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
// Expect 2 NEGs to be created based on the test setup (neg-exposed)
if len(status.NetworkEndpointGroups) != 2 {
@ -506,7 +506,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}
ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
framework.ExpectNoError(err)
@ -528,7 +528,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() {
ginkgo.By("Create a basic HTTP ingress using standalone NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
name := "hostname"
@ -539,7 +539,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}`
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err)
}
@ -550,7 +550,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}`
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err)
}
@ -561,7 +561,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}`
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err)
}
@ -572,7 +572,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, svc := range svcList.Items {
delete(svc.Annotations, ingress.NEGAnnotation)
delete(svc.Annotations, e2eingress.NEGAnnotation)
// Service cannot be ClusterIP if it's using Instance Groups.
svc.Spec.Type = v1.ServiceTypeNodePort
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
@ -589,7 +589,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Platform specific setup
ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs("gce", "gke")
jig.Class = ingress.MulticlusterIngressClassValue
jig.Class = e2eingress.MulticlusterIngressClassValue
jig.PollInterval = 5 * time.Second
ginkgo.By("Initializing gce controller")
gceController = &gce.IngressController{
@ -626,8 +626,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
})
ginkgo.It("should conform to Ingress spec", func() {
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
ingress.IngressStaticIPKey: ipName,
conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{
e2eingress.IngressStaticIPKey: ipName,
})
for _, t := range conformanceTests {
ginkgo.By(t.EntryLog)
@ -651,9 +651,9 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should remove clusters as expected", func() {
ingAnnotations := map[string]string{
ingress.IngressStaticIPKey: ipName,
e2eingress.IngressStaticIPKey: ipName,
}
ingFilePath := filepath.Join(ingress.IngressManifestPath, "http")
ingFilePath := filepath.Join(e2eingress.IngressManifestPath, "http")
jig.CreateIngress(ingFilePath, ns, ingAnnotations, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/)
name := jig.Ingress.Name
@ -681,7 +681,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("single and multi-cluster ingresses should be able to exist together", func() {
ginkgo.By("Creating a single cluster ingress first")
jig.Class = ""
singleIngFilePath := filepath.Join(ingress.GCEIngressManifestPath, "static-ip-2")
singleIngFilePath := filepath.Join(e2eingress.GCEIngressManifestPath, "static-ip-2")
jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/)
// jig.Ingress will be overwritten when we create MCI, so keep a reference.
@ -689,11 +689,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Create the multi-cluster ingress next.
ginkgo.By("Creating a multi-cluster ingress next")
jig.Class = ingress.MulticlusterIngressClassValue
jig.Class = e2eingress.MulticlusterIngressClassValue
ingAnnotations := map[string]string{
ingress.IngressStaticIPKey: ipName,
e2eingress.IngressStaticIPKey: ipName,
}
multiIngFilePath := filepath.Join(ingress.IngressManifestPath, "http")
multiIngFilePath := filepath.Join(e2eingress.IngressManifestPath, "http")
jig.CreateIngress(multiIngFilePath, ns, ingAnnotations, map[string]string{})
jig.WaitForIngress(false /*waitForNodePort*/)
mciIngress := jig.Ingress
@ -703,7 +703,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
jig.Class = ""
jig.TryDeleteIngress()
jig.Ingress = mciIngress
jig.Class = ingress.MulticlusterIngressClassValue
jig.Class = e2eingress.MulticlusterIngressClassValue
jig.WaitForIngress(false /*waitForNodePort*/)
ginkgo.By("Cleanup: Deleting the multi-cluster ingress")
@ -713,13 +713,13 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Time: borderline 5m, slow by design
ginkgo.Describe("[Slow] Nginx", func() {
var nginxController *ingress.NginxIngressController
var nginxController *e2eingress.NginxIngressController
ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs("gce", "gke")
ginkgo.By("Initializing nginx controller")
jig.Class = "nginx"
nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client}
nginxController = &e2eingress.NginxIngressController{Ns: ns, Client: jig.Client}
// TODO: This test may fail on other platforms. We can simply skip it
// but we want to allow easy testing where a user might've hand
@ -753,7 +753,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Poll more frequently to reduce e2e completion time.
// This test runs in presubmit.
jig.PollInterval = 5 * time.Second
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
conformanceTests = e2eingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests {
ginkgo.By(t.EntryLog)
t.Execute()
@ -775,11 +775,11 @@ func verifyKubemciStatusHas(name, expectedSubStr string) {
}
}
func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) {
func executePresharedCertTest(f *framework.Framework, jig *e2eingress.TestJig, staticIPName string) {
preSharedCertName := "test-pre-shared-cert"
ginkgo.By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
testHostname := "test.ingress.com"
cert, key, err := ingress.GenerateRSACerts(testHostname, true)
cert, key, err := e2eingress.GenerateRSACerts(testHostname, true)
framework.ExpectNoError(err)
gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err)
@ -811,36 +811,36 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
ginkgo.By("Creating an ingress referencing the pre-shared certificate")
// Create an ingress referencing this cert using pre-shared-cert annotation.
ingAnnotations := map[string]string{
ingress.IngressPreSharedCertKey: preSharedCertName,
e2eingress.IngressPreSharedCertKey: preSharedCertName,
// Disallow HTTP to save resources. This is irrelevant to the
// pre-shared cert test.
ingress.IngressAllowHTTPKey: "false",
e2eingress.IngressAllowHTTPKey: "false",
}
if staticIPName != "" {
ingAnnotations[ingress.IngressStaticIPKey] = staticIPName
ingAnnotations[e2eingress.IngressStaticIPKey] = staticIPName
}
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
ginkgo.By("Test that ingress works with the pre-shared certificate")
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
}
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) {
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{
ingress.IngressStaticIPKey: ipName,
ingress.IngressAllowHTTPKey: "false",
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *e2eingress.TestJig, ipName, ip string) {
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "static-ip"), f.Namespace.Name, map[string]string{
e2eingress.IngressStaticIPKey: ipName,
e2eingress.IngressAllowHTTPKey: "false",
}, map[string]string{})
ginkgo.By("waiting for Ingress to come up with ip: " + ip)
httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout)
framework.ExpectNoError(ingress.PollURL(fmt.Sprintf("https://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
httpClient := e2eingress.BuildInsecureClient(e2eingress.IngressReqTimeout)
framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("https://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
ginkgo.By("should reject HTTP traffic")
framework.ExpectNoError(ingress.PollURL(fmt.Sprintf("http://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("http://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
}
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) {
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *e2eingress.TestJig, staticIPName string) {
ginkgo.By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName)
defer func() {
@ -856,9 +856,9 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP")
ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout}
timeoutClient := &http.Client{Timeout: e2eingress.IngressReqTimeout}
err = wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
resp, err := ingress.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
resp, err := e2eingress.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
if err != nil {
framework.Logf("SimpleGET failed: %v", err)
return false, nil
@ -872,7 +872,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
framework.ExpectNoError(err, "ginkgo.Failed to verify backside re-encryption ingress")
}
func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) {
func detectNegAnnotation(f *framework.Framework, jig *e2eingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) {
if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
@ -889,10 +889,10 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
return true, nil
}
var status ingress.NegStatus
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
var status e2eingress.NegStatus
v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
if !ok {
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
return false, nil
}
@ -901,7 +901,7 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
return false, nil
}
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
if len(status.NetworkEndpointGroups) != negs {
framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))

View File

@ -32,7 +32,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ingress"
e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
)
@ -64,10 +64,10 @@ var (
// IngressScaleFramework defines the framework for ingress scale testing.
type IngressScaleFramework struct {
Clientset clientset.Interface
Jig *ingress.TestJig
Jig *e2eingress.TestJig
GCEController *gce.IngressController
CloudConfig framework.CloudConfig
Logger ingress.TestLogger
Logger e2eingress.TestLogger
Namespace string
EnableTLS bool
@ -97,7 +97,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra
Namespace: ns,
Clientset: cs,
CloudConfig: cloudConfig,
Logger: &ingress.E2ELogger{},
Logger: &e2eingress.E2ELogger{},
EnableTLS: true,
NumIngressesTest: []int{
numIngressesSmall,
@ -111,7 +111,7 @@ func NewIngressScaleFramework(cs clientset.Interface, ns string, cloudConfig fra
// PrepareScaleTest prepares framework for ingress scale testing.
func (f *IngressScaleFramework) PrepareScaleTest() error {
f.Logger.Infof("Initializing ingress test suite and gce controller...")
f.Jig = ingress.NewIngressTestJig(f.Clientset)
f.Jig = e2eingress.NewIngressTestJig(f.Clientset)
f.Jig.Logger = f.Logger
f.Jig.PollInterval = scaleTestPollInterval
f.GCEController = &gce.IngressController{

View File

@ -34,7 +34,7 @@ import (
gcecloud "k8s.io/legacy-cloud-providers/gce"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ingress"
e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
"k8s.io/kubernetes/test/e2e/network/scale"
)
@ -153,7 +153,7 @@ func main() {
// Setting up a localized scale test framework.
f := scale.NewIngressScaleFramework(cs, ns.Name, cloudConfig)
f.Logger = &ingress.GLogger{}
f.Logger = &e2eingress.GLogger{}
// Customizing scale test.
f.EnableTLS = enableTLS
f.OutputFile = outputFile

View File

@ -45,7 +45,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -931,7 +931,7 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
}()
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
deployment, err = cs.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Error in retrieving pause pod deployment")
@ -2992,7 +2992,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
ginkgo.By("Creating pause pod deployment to make sure, pausePods are in desired state")
deployment := createPausePodDeployment(cs, "pause-pod-deployment", namespace, 1)
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
defer func() {
framework.Logf("Deleting deployment")
@ -3360,7 +3360,7 @@ func createAndGetExternalServiceFQDN(cs clientset.Interface, ns, serviceName str
func createPausePodDeployment(cs clientset.Interface, name, ns string, replicas int) *appsv1.Deployment {
labels := map[string]string{"deployment": "agnhost-pause"}
pauseDeployment := e2edeploy.NewDeployment(name, int32(replicas), labels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
pauseDeployment := e2edeployment.NewDeployment(name, int32(replicas), labels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
pauseDeployment.Spec.Template.Spec.Containers[0] = v1.Container{
Name: "agnhost-pause",

View File

@ -36,7 +36,7 @@ import (
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -418,7 +418,7 @@ var _ = SIGDescribe("kubelet", func() {
ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
_, nfsServerPod, nfsIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
_, nfsServerPod, nfsIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
})
ginkgo.AfterEach(func() {

View File

@ -28,7 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu"
e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -133,7 +133,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
} else {
dsYamlURL = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
}
gpuResourceName = gpu.NVIDIAGPUResourceName
gpuResourceName = e2egpu.NVIDIAGPUResourceName
framework.Logf("Using %v", dsYamlURL)
// Creates the DaemonSet that installs Nvidia Drivers.

View File

@ -40,7 +40,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"github.com/onsi/ginkgo"
@ -686,7 +686,7 @@ func createPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSe
func runPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet {
rs := createPauseRS(f, conf)
framework.ExpectNoError(replicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout))
framework.ExpectNoError(e2ereplicaset.WaitForReplicaSetTargetAvailableReplicasWithTimeout(f.ClientSet, rs, conf.Replicas, framework.PodGetTimeout))
return rs
}

View File

@ -54,7 +54,7 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -83,7 +83,7 @@ func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]b
SupportedFsType: sets.NewString(
"", // Default fsType
),
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
Capabilities: capabilities,
@ -363,7 +363,7 @@ func InitGcePDCSIDriver() testsuites.TestDriver {
Name: GCEPDCSIDriverName,
FeatureTag: "[Serial]",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: sets.NewString(

View File

@ -54,12 +54,12 @@ import (
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -99,7 +99,7 @@ func InitNFSDriver() testsuites.TestDriver {
Name: "nfs",
InTreePluginName: "kubernetes.io/nfs",
MaxFileSize: testpatterns.FileSizeLarge,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: sets.NewString(
@ -124,8 +124,8 @@ func (n *nfsDriver) GetDriverInfo() *testsuites.DriverInfo {
func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
nv, ok := volume.(*nfsVolume)
func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
nv, ok := e2evolume.(*nfsVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
return &v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
@ -136,8 +136,8 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
}
}
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
nv, ok := volume.(*nfsVolume)
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
nv, ok := e2evolume.(*nfsVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume")
return &v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
@ -164,11 +164,11 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf
// TODO(mkimuram): cluster-admin gives too much right but system:persistent-volume-provisioner
// is not enough. We should create new clusterrole for testing.
err := auth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name,
err := e2eauth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"})
framework.ExpectNoError(err)
err = auth.WaitForAuthorizationUpdate(cs.AuthorizationV1(),
err = e2eauth.WaitForAuthorizationUpdate(cs.AuthorizationV1(),
serviceaccount.MakeUsername(ns.Name, "default"),
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization: %v", err)
@ -199,7 +199,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
c, serverPod, serverIP := volume.NewNFSServer(cs, ns.Name, []string{})
c, serverPod, serverIP := e2evolume.NewNFSServer(cs, ns.Name, []string{})
config.ServerConfig = &c
return &nfsVolume{
serverIP: serverIP,
@ -241,7 +241,7 @@ func InitGlusterFSDriver() testsuites.TestDriver {
Name: "gluster",
InTreePluginName: "kubernetes.io/glusterfs",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: sets.NewString(
@ -265,8 +265,8 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern)
e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
}
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
gv, ok := volume.(*glusterVolume)
func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
gv, ok := e2evolume.(*glusterVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume")
name := gv.prefix + "-server"
@ -280,8 +280,8 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume t
}
}
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gv, ok := volume.(*glusterVolume)
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gv, ok := e2evolume.(*glusterVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume")
name := gv.prefix + "-server"
@ -308,7 +308,7 @@ func (g *glusterFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType
cs := f.ClientSet
ns := f.Namespace
c, serverPod, _ := volume.NewGlusterfsServer(cs, ns.Name)
c, serverPod, _ := e2evolume.NewGlusterfsServer(cs, ns.Name)
config.ServerConfig = &c
return &glusterVolume{
prefix: config.Prefix,
@ -391,8 +391,8 @@ func (i *iSCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
iv, ok := volume.(*iSCSIVolume)
func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
iv, ok := e2evolume.(*iSCSIVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume")
volSource := v1.VolumeSource{
@ -409,8 +409,8 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume tests
return &volSource
}
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
iv, ok := volume.(*iSCSIVolume)
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
iv, ok := e2evolume.(*iSCSIVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume")
pvSource := v1.PersistentVolumeSource{
@ -452,10 +452,10 @@ func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
}
// newISCSIServer is an iSCSI-specific wrapper for CreateStorageServer.
func newISCSIServer(cs clientset.Interface, namespace string) (config volume.TestConfig, pod *v1.Pod, ip, iqn string) {
func newISCSIServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, ip, iqn string) {
// Generate cluster-wide unique IQN
iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace)
config = volume.TestConfig{
config = e2evolume.TestConfig{
Namespace: namespace,
Prefix: "iscsi",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
@ -471,15 +471,15 @@ func newISCSIServer(cs clientset.Interface, namespace string) (config volume.Tes
ServerReadyMessage: "iscsi target started",
ServerHostNetwork: true,
}
pod, ip = volume.CreateStorageServer(cs, config)
pod, ip = e2evolume.CreateStorageServer(cs, config)
// Make sure the client runs on the same node as server so we don't need to open any firewalls.
config.ClientNodeSelection = e2epod.NodeSelection{Name: pod.Spec.NodeName}
return config, pod, ip, iqn
}
// newRBDServer is a CephRBD-specific wrapper for CreateStorageServer.
func newRBDServer(cs clientset.Interface, namespace string) (config volume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
config = volume.TestConfig{
func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
config = e2evolume.TestConfig{
Namespace: namespace,
Prefix: "rbd",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
@ -489,7 +489,7 @@ func newRBDServer(cs clientset.Interface, namespace string) (config volume.TestC
},
ServerReadyMessage: "Ceph is ready",
}
pod, ip = volume.CreateStorageServer(cs, config)
pod, ip = e2evolume.CreateStorageServer(cs, config)
// create secrets for the server
secret = &v1.Secret{
TypeMeta: metav1.TypeMeta{
@ -543,7 +543,7 @@ func InitRbdDriver() testsuites.TestDriver {
InTreePluginName: "kubernetes.io/rbd",
FeatureTag: "[Feature:Volumes][Serial]",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: sets.NewString(
@ -571,8 +571,8 @@ func (r *rbdDriver) GetDriverInfo() *testsuites.DriverInfo {
func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
rv, ok := volume.(*rbdVolume)
func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
rv, ok := e2evolume.(*rbdVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume")
volSource := v1.VolumeSource{
@ -593,8 +593,8 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
return &volSource
}
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
rv, ok := volume.(*rbdVolume)
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
rv, ok := e2evolume.(*rbdVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume")
f := rv.f
@ -671,7 +671,7 @@ func InitCephFSDriver() testsuites.TestDriver {
InTreePluginName: "kubernetes.io/cephfs",
FeatureTag: "[Feature:Volumes][Serial]",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: sets.NewString(
@ -694,8 +694,8 @@ func (c *cephFSDriver) GetDriverInfo() *testsuites.DriverInfo {
func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
cv, ok := volume.(*cephVolume)
func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
cv, ok := e2evolume.(*cephVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume")
return &v1.VolumeSource{
@ -710,8 +710,8 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume test
}
}
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
cv, ok := volume.(*cephVolume)
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
cv, ok := e2evolume.(*cephVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume")
ns := cv.f.Namespace
@ -793,7 +793,7 @@ func (h *hostPathDriver) GetDriverInfo() *testsuites.DriverInfo {
func (h *hostPathDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
// hostPath doesn't support readOnly volume
if readOnly {
return nil
@ -868,8 +868,8 @@ func (h *hostPathSymlinkDriver) GetDriverInfo() *testsuites.DriverInfo {
func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
hv, ok := volume.(*hostPathSymlinkVolume)
func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
hv, ok := e2evolume.(*hostPathSymlinkVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Hostpath Symlink test volume")
// hostPathSymlink doesn't support readOnly volume
@ -1010,7 +1010,7 @@ func (e *emptydirDriver) GetDriverInfo() *testsuites.DriverInfo {
func (e *emptydirDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
// emptydir doesn't support readOnly volume
if readOnly {
return nil
@ -1059,7 +1059,7 @@ func InitCinderDriver() testsuites.TestDriver {
Name: "cinder",
InTreePluginName: "kubernetes.io/cinder",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: sets.NewString(
@ -1089,8 +1089,8 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
e2eskipper.SkipUnlessProviderIs("openstack")
}
func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
cv, ok := volume.(*cinderVolume)
func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
cv, ok := e2evolume.(*cinderVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
volSource := v1.VolumeSource{
@ -1105,8 +1105,8 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume test
return &volSource
}
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
cv, ok := volume.(*cinderVolume)
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
cv, ok := e2evolume.(*cinderVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume")
pvSource := v1.PersistentVolumeSource{
@ -1233,7 +1233,7 @@ func InitGcePdDriver() testsuites.TestDriver {
Name: "gcepd",
InTreePluginName: "kubernetes.io/gce-pd",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: supportedTypes,
@ -1267,8 +1267,8 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
}
func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
gv, ok := volume.(*gcePdVolume)
func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
gv, ok := e2evolume.(*gcePdVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
volSource := v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
@ -1282,8 +1282,8 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume tests
return &volSource
}
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gv, ok := volume.(*gcePdVolume)
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gv, ok := e2evolume.(*gcePdVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume")
pvSource := v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
@ -1372,7 +1372,7 @@ func InitVSphereDriver() testsuites.TestDriver {
Name: "vsphere",
InTreePluginName: "kubernetes.io/vsphere-volume",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: sets.NewString(
@ -1398,8 +1398,8 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
e2eskipper.SkipUnlessProviderIs("vsphere")
}
func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
vsv, ok := volume.(*vSphereVolume)
func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
vsv, ok := e2evolume.(*vSphereVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
// vSphere driver doesn't seem to support readOnly volume
@ -1418,8 +1418,8 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume tes
return &volSource
}
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
vsv, ok := volume.(*vSphereVolume)
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
vsv, ok := e2evolume.(*vSphereVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume")
// vSphere driver doesn't seem to support readOnly volume
@ -1496,7 +1496,7 @@ func InitAzureDiskDriver() testsuites.TestDriver {
Name: "azure-disk",
InTreePluginName: "kubernetes.io/azure-disk",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: sets.NewString(
@ -1529,8 +1529,8 @@ func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern)
e2eskipper.SkipUnlessProviderIs("azure")
}
func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
av, ok := volume.(*azureDiskVolume)
func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
av, ok := e2evolume.(*azureDiskVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
@ -1549,8 +1549,8 @@ func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, volume t
return &volSource
}
func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
av, ok := volume.(*azureDiskVolume)
func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
av, ok := e2evolume.(*azureDiskVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume")
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
@ -1626,7 +1626,7 @@ func InitAwsDriver() testsuites.TestDriver {
Name: "aws",
InTreePluginName: "kubernetes.io/aws-ebs",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
SupportedFsType: sets.NewString(
@ -1664,8 +1664,8 @@ func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
e2eskipper.SkipUnlessProviderIs("aws")
}
func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource {
av, ok := volume.(*awsVolume)
func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource {
av, ok := e2evolume.(*awsVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
volSource := v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
@ -1679,8 +1679,8 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
return &volSource
}
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
av, ok := volume.(*awsVolume)
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
av, ok := e2evolume.(*awsVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume")
pvSource := v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
@ -1921,8 +1921,8 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity
}
}
func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
lv, ok := volume.(*localVolume)
func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
lv, ok := e2evolume.(*localVolume)
framework.ExpectEqual(ok, true, "Failed to cast test volume to local test volume")
return &v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{

View File

@ -31,10 +31,10 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -122,7 +122,7 @@ type driverDefinition struct {
// SupportedSizeRange defines the desired size of dynamically
// provisioned volumes.
SupportedSizeRange volume.SizeRange
SupportedSizeRange e2evolume.SizeRange
// ClientNodeName selects a specific node for scheduling test pods.
// Can be left empty. Most drivers should not need this and instead
@ -146,7 +146,7 @@ var csiTestSuites = []func() testsuites.TestSuite{
}
func init() {
config.Flags.Var(testDriverParameter{}, "storage.testdriver", "name of a .yaml or .json file that defines a driver for storage testing, can be used more than once")
e2econfig.Flags.Var(testDriverParameter{}, "storage.testdriver", "name of a .yaml or .json file that defines a driver for storage testing, can be used more than once")
}
// testDriverParameter is used to hook loading of the driver
@ -203,7 +203,7 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) {
"", // Default fsType
),
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
}
@ -276,13 +276,13 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern)
}
}
func (d *driverDefinition) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
var (
sc *storagev1.StorageClass
err error
)
f := config.Framework
f := e2econfig.Framework
switch {
case d.StorageClass.FromName:
@ -331,15 +331,15 @@ func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {
return snapshotClass, nil
}
func (d *driverDefinition) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig) *unstructured.Unstructured {
if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" {
e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name)
}
f := config.Framework
f := e2econfig.Framework
snapshotter := d.DriverInfo.Name
parameters := map[string]string{}
ns := config.Framework.Namespace.Name
ns := e2econfig.Framework.Namespace.Name
suffix := "vsc"
switch {
@ -368,24 +368,24 @@ func (d *driverDefinition) GetSnapshotClass(config *testsuites.PerTestConfig) *u
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
}
func (d *driverDefinition) GetVolume(config *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
func (d *driverDefinition) GetVolume(e2econfig *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
if len(d.InlineVolumes) == 0 {
e2eskipper.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
}
volume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)]
return volume.Attributes, volume.Shared, volume.ReadOnly
e2evolume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)]
return e2evolume.Attributes, e2evolume.Shared, e2evolume.ReadOnly
}
func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) string {
func (d *driverDefinition) GetCSIDriverName(e2econfig *testsuites.PerTestConfig) string {
return d.DriverInfo.Name
}
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
config := &testsuites.PerTestConfig{
e2econfig := &testsuites.PerTestConfig{
Driver: d,
Prefix: "external",
Framework: f,
ClientNodeSelection: e2epod.NodeSelection{Name: d.ClientNodeName},
}
return config, func() {}
return e2econfig, func() {}
}

View File

@ -22,7 +22,7 @@ import (
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
)
@ -34,7 +34,7 @@ func TestDriverParameter(t *testing.T) {
"", // Default fsType
),
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "5Gi",
},
}

View File

@ -32,8 +32,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -51,8 +51,8 @@ const (
// testFlexVolume tests that a client pod using a given flexvolume driver
// successfully mounts it and runs
func testFlexVolume(driver string, config volume.TestConfig, f *framework.Framework) {
tests := []volume.Test{
func testFlexVolume(driver string, config e2evolume.TestConfig, f *framework.Framework) {
tests := []e2evolume.Test{
{
Volume: v1.VolumeSource{
FlexVolume: &v1.FlexVolumeSource{
@ -64,7 +64,7 @@ func testFlexVolume(driver string, config volume.TestConfig, f *framework.Framew
ExpectedContent: "Hello from flexvolume!",
},
}
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
}
// installFlex installs the driver found at filePath on the node, and restarts
@ -92,7 +92,7 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath
cmd := fmt.Sprintf("sudo mkdir -p %s", flexDir)
sshAndLog(cmd, host, true /*failOnError*/)
data := testfiles.ReadOrDie(filePath)
data := e2etestfiles.ReadOrDie(filePath)
cmd = fmt.Sprintf("sudo tee <<'EOF' %s\n%s\nEOF", flexFile, string(data))
sshAndLog(cmd, host, true /*failOnError*/)
@ -161,7 +161,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
var cs clientset.Interface
var ns *v1.Namespace
var node *v1.Node
var config volume.TestConfig
var config e2evolume.TestConfig
var suffix string
ginkgo.BeforeEach(func() {
@ -175,7 +175,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
var err error
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
config = volume.TestConfig{
config = e2evolume.TestConfig{
Namespace: ns.Name,
Prefix: "flex",
ClientNodeSelection: e2epod.NodeSelection{Name: node.Name},

View File

@ -31,7 +31,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
@ -153,7 +153,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
framework.ExpectEqual(len(pvs), 1)
ginkgo.By("Creating a deployment with the provisioned volume")
deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
@ -174,7 +174,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Getting a pod from deployment")
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
podList, err := e2edeployment.GetPodsForDeployment(c, deployment)
framework.ExpectNoError(err, "While getting pods from deployment")
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0]

View File

@ -33,7 +33,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
// Keeping pod on same node reproduces the scenario that volume might already be mounted when resize is attempted.
// We should consider adding a unit test that exercises this better.
ginkgo.By("Creating a deployment with selected PVC")
deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
deployment, err := e2edeployment.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "")
framework.ExpectNoError(err, "Failed creating deployment %v", err)
defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
@ -147,7 +147,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Getting a pod from deployment")
podList, err := e2edeploy.GetPodsForDeployment(c, deployment)
podList, err := e2edeployment.GetPodsForDeployment(c, deployment)
framework.ExpectNoError(err, "While getting pods from deployment")
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0]
@ -172,7 +172,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *appsv1.Deployment) (v1.Pod, error) {
var runningPod v1.Pod
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
podList, err := e2edeployment.GetPodsForDeployment(client, deployment)
if err != nil {
return false, fmt.Errorf("failed to get pods for deployment: %v", err)
}

View File

@ -36,7 +36,7 @@ import (
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -98,7 +98,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
// Start the NFS server pod.
_, nfsServerPod, nfsServerIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
_, nfsServerPod, nfsServerIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
nfsPVconfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-",
Labels: volLabel,

View File

@ -44,7 +44,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -1194,12 +1194,12 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(context.TODO(), spec, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(config.client, ssReplicas, ss)
e2estatefulset.WaitForRunningAndReady(config.client, ssReplicas, ss)
return ss
}
func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti bool) {
pods := e2esset.GetPodList(config.client, ss)
pods := e2estatefulset.GetPodList(config.client, ss)
nodes := sets.NewString()
for _, pod := range pods.Items {

View File

@ -33,8 +33,8 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -127,7 +127,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
)
ginkgo.BeforeEach(func() {
_, nfsServerPod, serverIP = volume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
_, nfsServerPod, serverIP = e2evolume.NewNFSServer(c, ns, []string{"-G", "777", "/exports"})
pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "nfs-",
Labels: volLabel,
@ -315,7 +315,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.Context("pods that use multiple volumes", func() {
ginkgo.AfterEach(func() {
e2esset.DeleteAllStatefulSets(c, ns)
e2estatefulset.DeleteAllStatefulSets(c, ns)
})
ginkgo.It("should be reschedulable [Slow]", func() {
@ -355,13 +355,13 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, 1, ss)
e2estatefulset.WaitForRunningAndReady(c, 1, ss)
ginkgo.By("Deleting the StatefulSet but not the volumes")
// Scale down to 0 first so that the Delete is quick
ss, err = e2esset.Scale(c, ss, 0)
ss, err = e2estatefulset.Scale(c, ss, 0)
framework.ExpectNoError(err)
e2esset.WaitForStatusReplicas(c, ss, 0)
e2estatefulset.WaitForStatusReplicas(c, ss, 0)
err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
@ -375,7 +375,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe)
ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec, metav1.CreateOptions{})
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, 1, ss)
e2estatefulset.WaitForRunningAndReady(c, 1, ss)
})
})
})

View File

@ -19,19 +19,19 @@ package testpatterns
import (
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
)
const (
// MinFileSize represents minimum file size (1 MiB) for testing
MinFileSize = 1 * volume.MiB
MinFileSize = 1 * e2evolume.MiB
// FileSizeSmall represents small file size (1 MiB) for testing
FileSizeSmall = 1 * volume.MiB
FileSizeSmall = 1 * e2evolume.MiB
// FileSizeMedium represents medium file size (100 MiB) for testing
FileSizeMedium = 100 * volume.MiB
FileSizeMedium = 100 * e2evolume.MiB
// FileSizeLarge represents large file size (1 GiB) for testing
FileSizeLarge = 1 * volume.GiB
FileSizeLarge = 1 * e2evolume.GiB
)
// TestVolType represents a volume type to be tested in a TestSuite

View File

@ -26,7 +26,7 @@ limitations under the License.
package testsuites_test
import (
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
)
@ -39,7 +39,7 @@ func (f *fakeSuite) GetTestSuiteInfo() testsuites.TestSuiteInfo {
Name: "fake",
FeatureTag: "",
TestPatterns: []testpatterns.TestPattern{testpatterns.DefaultFsDynamicPV},
SupportedSizeRange: volume.SizeRange{Min: "1Mi", Max: "1Gi"},
SupportedSizeRange: e2evolume.SizeRange{Min: "1Mi", Max: "1Gi"},
}
}

View File

@ -40,10 +40,10 @@ import (
"k8s.io/component-base/metrics/testutil"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/podlogs"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@ -77,7 +77,7 @@ type TestSuiteInfo struct {
Name string // name of the TestSuite
FeatureTag string // featureTag for the TestSuite
TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
SupportedSizeRange volume.SizeRange // Size range supported by the test suite
SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
@ -181,7 +181,7 @@ type VolumeResource struct {
// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with
// different test pattern volume types.
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange volume.SizeRange) *VolumeResource {
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource {
r := VolumeResource{
Config: config,
Pattern: pattern,
@ -423,12 +423,12 @@ func deleteStorageClass(cs clientset.Interface, className string) error {
// the testsuites package whereas volume.TestConfig is merely
// an implementation detail. It contains fields that have no effect,
// which makes it unsuitable for use in the testsuits public API.
func convertTestConfig(in *PerTestConfig) volume.TestConfig {
func convertTestConfig(in *PerTestConfig) e2evolume.TestConfig {
if in.ServerConfig != nil {
return *in.ServerConfig
}
return volume.TestConfig{
return e2evolume.TestConfig{
Namespace: in.Framework.Namespace.Name,
Prefix: in.Prefix,
ClientNodeSelection: in.ClientNodeSelection,
@ -439,7 +439,7 @@ func convertTestConfig(in *PerTestConfig) volume.TestConfig {
// intersection of the intervals (if it exists) and return the minimum of the intersection
// to be used as the claim size for the test.
// if value not set, that means there's no minimum or maximum size limitation and we set default size for it.
func getSizeRangesIntersection(first volume.SizeRange, second volume.SizeRange) (string, error) {
func getSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
var firstMin, firstMax, secondMin, secondMax resource.Quantity
var err error
@ -575,7 +575,7 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
nodeLimit := 25
metricsGrabber, err := metrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
metricsGrabber, err := e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
if err != nil {
framework.ExpectNoError(err, "Error creating metrics grabber: %v", err)

View File

@ -19,7 +19,7 @@ package testsuites
import (
"testing"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
)
// getSizeRangesIntersection takes two instances of storage size ranges and determines the
@ -43,8 +43,8 @@ import (
// |---------------------------------------------------------------|
func Test_getSizeRangesIntersection(t *testing.T) {
type args struct {
first volume.SizeRange
second volume.SizeRange
first e2evolume.SizeRange
second e2evolume.SizeRange
}
tests := []struct {
name string
@ -55,10 +55,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #1: first{min=A,max=?} second{min=C,max=?} where C > A ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "10Gi",
},
},
@ -68,10 +68,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #1: first{min=A,max=?} second{min=C,max=?} where C < A ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "1Gi",
},
},
@ -81,10 +81,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #2: first{min=A,max=?} second{min=C,max=D} where A > D ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "1Gi",
Max: "4Gi",
},
@ -95,11 +95,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #2: first{min=A,max=?} second{min=C,max=D} where D > A > C ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "3Gi",
Max: "10Gi",
},
@ -110,11 +110,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #2: first{min=A,max=?} second{min=C,max=D} where A < C ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "6Gi",
Max: "10Gi",
},
@ -125,11 +125,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #3: first{min=A,max=?} second{min=?,max=D} where A > D",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Max: "1Gi",
},
},
@ -139,11 +139,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #3: first{min=A,max=?} second{min=?,max=D} where A < D",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Max: "10Gi",
},
},
@ -153,11 +153,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #4: first{min=A,max=?} second{min=?,max=?} ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "",
},
second: volume.SizeRange{},
second: e2evolume.SizeRange{},
},
want: "5Gi",
wantErr: false,
@ -166,11 +166,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #5: first{min=A,max=B} second{min=C,max=?} where C < A ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "1Gi",
},
},
@ -180,11 +180,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #5: first{min=A,max=B} second{min=C,max=?} where B > C > A ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "6Gi",
},
},
@ -194,11 +194,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #5: first{min=A,max=B} second{min=C,max=?} where C > B ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "15Gi",
},
},
@ -208,11 +208,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #6: first{min=A,max=B} second{min=C,max=D} where A < B < C < D",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "6Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "7Gi",
Max: "8Gi",
},
@ -223,11 +223,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #6: first{min=A,max=B} second{min=C,max=D} where A < C < B < D ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "8Gi",
Max: "15Gi",
},
@ -238,11 +238,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #7: first{min=A,max=B} second{min=?,max=D} where D < A",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Max: "3Gi",
},
},
@ -252,11 +252,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #7: first{min=A,max=B} second{min=?,max=D} where B > D > A",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Max: "8Gi",
},
},
@ -266,11 +266,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #7: first{min=A,max=B} second{min=?,max=D} where D > B",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Max: "15Gi",
},
},
@ -280,11 +280,11 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #8: first{min=A,max=B} second{min=?,max=?}",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Min: "5Gi",
Max: "10Gi",
},
second: volume.SizeRange{},
second: e2evolume.SizeRange{},
},
want: "5Gi",
wantErr: false,
@ -292,10 +292,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #9: first{min=?,max=B} second{min=C,max=?} where C > B",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Max: "5Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "10Gi",
},
},
@ -305,10 +305,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #9: first{min=?,max=B} second{min=C,max=?} where C < B",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "5Gi",
},
},
@ -318,10 +318,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #10: first{min=?,max=B} second{min=C,max=D} where B > D",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "1Gi",
Max: "5Gi",
},
@ -332,10 +332,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #10: first{min=?,max=B} second{min=C,max=D} where C < B < D",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "5Gi",
Max: "15Gi",
},
@ -346,10 +346,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #10: first{min=?,max=B} second{min=C,max=D} where B < C",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Min: "15Gi",
Max: "20Gi",
},
@ -360,10 +360,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #11: first{min=?,max=B} second{min=?,max=D} where D < B",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Max: "5Gi",
},
},
@ -373,10 +373,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #11: first{min=?,max=B} second{min=?,max=D} where D > B",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Max: "10Gi",
},
second: volume.SizeRange{
second: e2evolume.SizeRange{
Max: "15Gi",
},
},
@ -386,10 +386,10 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #12: first{min=?,max=B} second{min=?,max=?} ",
args: args{
first: volume.SizeRange{
first: e2evolume.SizeRange{
Max: "10Gi",
},
second: volume.SizeRange{},
second: e2evolume.SizeRange{},
},
want: minValidSize,
wantErr: false,
@ -397,8 +397,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #13: first{min=?,max=?} second{min=C,max=?} ",
args: args{
first: volume.SizeRange{},
second: volume.SizeRange{
first: e2evolume.SizeRange{},
second: e2evolume.SizeRange{
Min: "5Gi",
},
},
@ -408,8 +408,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #14: first{min=?,max=?} second{min=C,max=D} where C < D",
args: args{
first: volume.SizeRange{},
second: volume.SizeRange{
first: e2evolume.SizeRange{},
second: e2evolume.SizeRange{
Min: "5Gi",
Max: "10Gi",
},
@ -420,8 +420,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #14: first{min=?,max=?} second{min=C,max=D} where C > D",
args: args{
first: volume.SizeRange{},
second: volume.SizeRange{
first: e2evolume.SizeRange{},
second: e2evolume.SizeRange{
Min: "10Gi",
Max: "5Gi",
},
@ -432,8 +432,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #14: first{min=?,max=?} second{min=C,max=D} where C = D",
args: args{
first: volume.SizeRange{},
second: volume.SizeRange{
first: e2evolume.SizeRange{},
second: e2evolume.SizeRange{
Min: "1Mi",
Max: "1Mi",
},
@ -444,8 +444,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #15: first{min=?,max=?} second{min=?,max=D}",
args: args{
first: volume.SizeRange{},
second: volume.SizeRange{
first: e2evolume.SizeRange{},
second: e2evolume.SizeRange{
Max: "10Gi",
},
},
@ -455,8 +455,8 @@ func Test_getSizeRangesIntersection(t *testing.T) {
{
name: "case #16: first{min=?,max=?} second{min=?,max=?}",
args: args{
first: volume.SizeRange{},
second: volume.SizeRange{},
first: e2evolume.SizeRange{},
second: e2evolume.SizeRange{},
},
want: minValidSize,
wantErr: false,

View File

@ -32,7 +32,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -294,8 +294,8 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
Containers: []v1.Container{
{
Name: "csi-volume-tester",
Image: volume.GetTestImage(framework.BusyBoxImage),
Command: volume.GenerateScriptCmd(command),
Image: e2evolume.GetTestImage(framework.BusyBoxImage),
Command: e2evolume.GenerateScriptCmd(command),
},
},
RestartPolicy: v1.RestartPolicyNever,

View File

@ -32,7 +32,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -54,7 +54,7 @@ func InitMultiVolumeTestSuite() TestSuite {
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
},

View File

@ -38,7 +38,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@ -78,7 +78,7 @@ func InitProvisioningTestSuite() TestSuite {
testpatterns.BlockVolModeDynamicPV,
testpatterns.NtfsDynamicPV,
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
},
@ -221,7 +221,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data")
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: *createVolumeSource(claim.Name, false /* readOnly */),
Mode: pattern.VolMode,
@ -229,7 +229,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
ExpectedContent: expectedContent,
},
}
volume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
}
l.testCase.TestDynamicProvisioning()
})
@ -249,7 +249,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data")
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: *createVolumeSource(claim.Name, false /* readOnly */),
Mode: pattern.VolMode,
@ -257,7 +257,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
ExpectedContent: expectedContent,
},
}
volume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
}
l.testCase.TestDynamicProvisioning()
})
@ -297,7 +297,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
myTestCase.Class = nil // Do not create/delete the storage class in TestDynamicProvisioning, it already exists.
myTestCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i))
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: *createVolumeSource(claim.Name, false /* readOnly */),
Mode: pattern.VolMode,
@ -305,7 +305,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
ExpectedContent: expectedContent,
},
}
volume.TestVolumeClientSlow(f, myTestConfig, nil, "", tests)
e2evolume.TestVolumeClientSlow(f, myTestConfig, nil, "", tests)
}
myTestCase.TestDynamicProvisioning()
}(i)
@ -467,7 +467,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
pod = nil // Don't stop twice.
// Get a new copy of the PV
volume, err := getBoundPV(client, claim)
e2evolume, err := getBoundPV(client, claim)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
@ -475,7 +475,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
// We give the second pod the additional responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range volume.Spec.MountOptions {
for _, option := range e2evolume.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
@ -486,7 +486,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
}
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
return volume
return e2evolume
}
// PVMultiNodeCheck checks that a PV retains data when moved between nodes.
@ -650,8 +650,8 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
Containers: []v1.Container{
{
Name: "volume-tester",
Image: volume.GetTestImage(framework.BusyBoxImage),
Command: volume.GenerateScriptCmd(command),
Image: e2evolume.GetTestImage(framework.BusyBoxImage),
Command: e2evolume.GenerateScriptCmd(command),
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
@ -708,7 +708,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
func prepareSnapshotDataSourceForProvisioning(
f *framework.Framework,
config volume.TestConfig,
config e2evolume.TestConfig,
client clientset.Interface,
dynamicClient dynamic.Interface,
initClaim *v1.PersistentVolumeClaim,
@ -729,7 +729,7 @@ func prepareSnapshotDataSourceForProvisioning(
framework.ExpectNoError(err)
// write namespace to the /mnt/test (= the volume).
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: *createVolumeSource(updatedClaim.Name, false /* readOnly */),
Mode: mode,
@ -737,7 +737,7 @@ func prepareSnapshotDataSourceForProvisioning(
ExpectedContent: injectContent,
},
}
volume.InjectContent(f, config, nil, "", tests)
e2evolume.InjectContent(f, config, nil, "", tests)
ginkgo.By("[Initialize dataSource]creating a SnapshotClass")
snapshotClass, err = dynamicClient.Resource(SnapshotClassGVR).Create(context.TODO(), snapshotClass, metav1.CreateOptions{})
@ -784,7 +784,7 @@ func prepareSnapshotDataSourceForProvisioning(
func preparePVCDataSourceForProvisioning(
f *framework.Framework,
config volume.TestConfig,
config e2evolume.TestConfig,
client clientset.Interface,
source *v1.PersistentVolumeClaim,
class *storagev1.StorageClass,
@ -802,7 +802,7 @@ func preparePVCDataSourceForProvisioning(
sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{})
framework.ExpectNoError(err)
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: *createVolumeSource(sourcePVC.Name, false /* readOnly */),
Mode: mode,
@ -810,7 +810,7 @@ func preparePVCDataSourceForProvisioning(
ExpectedContent: injectContent,
},
}
volume.InjectContent(f, config, nil, "", tests)
e2evolume.InjectContent(f, config, nil, "", tests)
dataSourceRef := &v1.TypedLocalObjectReference{
Kind: "PersistentVolumeClaim",

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@ -64,7 +64,7 @@ func InitSnapshottableTestSuite() TestSuite {
TestPatterns: []testpatterns.TestPattern{
testpatterns.DynamicSnapshot,
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
},

View File

@ -37,7 +37,7 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -70,7 +70,7 @@ func InitSubPathTestSuite() TestSuite {
testpatterns.DefaultFsDynamicPV,
testpatterns.NtfsDynamicPV,
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
},
@ -441,8 +441,8 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Change volume container to busybox so we can exec later
l.pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
l.pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000")
l.pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
l.pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
removeUnusedContainers(l.pod)
@ -516,7 +516,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
InitContainers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", suffix),
Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)),
Image: e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
@ -527,7 +527,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath,
},
},
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
},
{
Name: fmt.Sprintf("test-init-subpath-%s", suffix),
@ -543,7 +543,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath,
},
},
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
},
{
Name: fmt.Sprintf("test-init-volume-%s", suffix),
@ -558,7 +558,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath,
},
},
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
},
},
Containers: []v1.Container{
@ -576,7 +576,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath,
},
},
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
},
{
Name: fmt.Sprintf("test-container-volume-%s", suffix),
@ -591,7 +591,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
MountPath: probeVolumePath,
},
},
SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
},
},
RestartPolicy: v1.RestartPolicyNever,
@ -608,7 +608,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
},
},
},
SecurityContext: volume.GeneratePodSecurityContext(nil, seLinuxOptions),
SecurityContext: e2evolume.GeneratePodSecurityContext(nil, seLinuxOptions),
},
}
}
@ -651,8 +651,8 @@ func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.
Containers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name),
Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)),
Command: volume.GenerateScriptCmd("echo nothing"),
Image: e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)),
Command: e2evolume.GenerateScriptCmd("echo nothing"),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
@ -673,7 +673,7 @@ func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.
}
func setInitCommand(pod *v1.Pod, command string) {
pod.Spec.InitContainers[0].Command = volume.GenerateScriptCmd(command)
pod.Spec.InitContainers[0].Command = e2evolume.GenerateScriptCmd(command)
}
func setWriteCommand(file string, container *v1.Container) {
@ -796,10 +796,10 @@ func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerT
func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000")
pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000")
pod.Spec.Containers[0].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[0].Command = e2evolume.GenerateScriptCmd("sleep 100000")
pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
// Add liveness probe to subpath container
pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{
@ -905,10 +905,10 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
}
// Change to busybox
pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000")
pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000")
pod.Spec.Containers[0].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[0].Command = e2evolume.GenerateScriptCmd("sleep 100000")
pod.Spec.Containers[1].Image = e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
// If grace period is too short, then there is not enough time for the volume
// manager to cleanup the volumes

View File

@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@ -170,7 +170,7 @@ type DriverInfo struct {
// Maximum single file size supported by this driver
MaxFileSize int64
// The range of disk size supported by this driver
SupportedSizeRange volume.SizeRange
SupportedSizeRange e2evolume.SizeRange
// Map of string for supported fs type
SupportedFsType sets.String
// Map of string for supported mount option
@ -214,7 +214,7 @@ type PerTestConfig struct {
// Some test drivers initialize a storage server. This is
// the configuration that then has to be used to run tests.
// The values above are ignored for such tests.
ServerConfig *volume.TestConfig
ServerConfig *e2evolume.TestConfig
}
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests

View File

@ -34,7 +34,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@ -63,7 +63,7 @@ func InitVolumeExpandTestSuite() TestSuite {
testpatterns.DefaultFsDynamicPVAllowExpansion,
testpatterns.BlockVolModeDynamicPVAllowExpansion,
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
},

View File

@ -39,7 +39,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -71,7 +71,7 @@ func InitVolumeIOTestSuite() TestSuite {
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
},
@ -180,7 +180,7 @@ func createFileSizes(maxFileSize int64) []int64 {
}
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
func makePodSpec(config e2evolume.TestConfig, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
var gracePeriod int64 = 1
volName := fmt.Sprintf("io-volume-%s", config.Namespace)
pod := &v1.Pod{
@ -305,7 +305,7 @@ func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) {
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
// Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize`
// bytes.
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace))
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
loopCnt := testpatterns.MinFileSize / int64(len(writeBlk))
@ -333,7 +333,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(volume.PodCleanupTimeout)
time.Sleep(e2evolume.PodCleanupTimeout)
}
}()

View File

@ -38,7 +38,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -65,7 +65,7 @@ func InitVolumeModeTestSuite() TestSuite {
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
},

View File

@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -74,7 +74,7 @@ func InitVolumesTestSuite() TestSuite {
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
SupportedSizeRange: volume.SizeRange{
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
},
@ -156,11 +156,11 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
init()
defer func() {
volume.TestServerCleanup(f, convertTestConfig(l.config))
e2evolume.TestServerCleanup(f, convertTestConfig(l.config))
cleanup()
}()
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: *l.resource.VolSource,
Mode: pattern.VolMode,
@ -180,9 +180,9 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
// local), plugin skips setting fsGroup if volume is already mounted
// and we don't have reliable way to detect volumes are unmounted or
// not before starting the second pod.
volume.InjectContent(f, config, fsGroup, pattern.FsType, tests)
e2evolume.InjectContent(f, config, fsGroup, pattern.FsType, tests)
if driver.GetDriverInfo().Capabilities[CapPersistence] {
volume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests)
e2evolume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests)
} else {
ginkgo.By("Skipping persistence check for non-persistent volume")
}
@ -228,7 +228,7 @@ func testScriptInPod(
Containers: []v1.Container{
{
Name: fmt.Sprintf("exec-container-%s", suffix),
Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.Nginx)),
Image: e2evolume.GetTestImage(imageutils.GetE2EImage(imageutils.Nginx)),
Command: command,
VolumeMounts: []v1.VolumeMount{
{

View File

@ -22,8 +22,6 @@ import (
"encoding/json"
"fmt"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
@ -37,7 +35,8 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// LoadFromManifests loads .yaml or .json manifest files and returns
@ -80,7 +79,7 @@ func LoadFromManifests(files ...string) ([]interface{}, error) {
func visitManifests(cb func([]byte) error, files ...string) error {
for _, fileName := range files {
data, err := testfiles.Read(fileName)
data, err := e2etestfiles.Read(fileName)
if err != nil {
framework.Failf("reading manifest file: %v", err)
}

View File

@ -32,7 +32,7 @@ import (
"k8s.io/component-base/metrics/testutil"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -47,7 +47,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
c clientset.Interface
ns string
pvc *v1.PersistentVolumeClaim
metricsGrabber *metrics.Grabber
metricsGrabber *e2emetrics.Grabber
invalidSc *storagev1.StorageClass
defaultScName string
)
@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
VolumeMode: &test.VolumeMode,
}, ns)
metricsGrabber, err = metrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
metricsGrabber, err = e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
if err != nil {
framework.Failf("Error creating metrics grabber : %v", err)
@ -231,7 +231,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
// Poll kubelet metrics waiting for the volume to be picked up
// by the volume stats collector
var kubeMetrics metrics.KubeletMetrics
var kubeMetrics e2emetrics.KubeletMetrics
waitErr := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
framework.Logf("Grabbing Kubelet metrics")
// Grab kubelet metrics from the node the pod was scheduled on
@ -405,7 +405,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
}
pvcConfig = e2epv.PersistentVolumeClaimConfig{StorageClassName: &className}
metrics = []struct {
e2emetrics = []struct {
name string
dimension string
}{
@ -431,7 +431,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err)
for i, metric := range metrics {
for i, metric := range e2emetrics {
expectValues := metricValues[i]
if expectValues == nil {
expectValues = make(map[string]int64)
@ -457,7 +457,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// Initializes all original metric values.
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err)
for _, metric := range metrics {
for _, metric := range e2emetrics {
originMetricValues = append(originMetricValues,
testutil.GetMetricValuesForLabel(testutil.Metrics(controllerMetrics), metric.name, metric.dimension))
}
@ -528,7 +528,7 @@ func newStorageControllerMetrics() *storageControllerMetrics {
}
}
func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGrabber *metrics.Grabber, pluginName string) *storageControllerMetrics {
func waitForDetachAndGrabMetrics(oldMetrics *storageControllerMetrics, metricsGrabber *e2emetrics.Grabber, pluginName string) *storageControllerMetrics {
backoff := wait.Backoff{
Duration: 10 * time.Second,
Factor: 1.2,
@ -610,7 +610,7 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN
gomega.Expect(newStatusCount).To(gomega.BeNumerically(">", oldStatusCount), "New status count %d should be more than old count %d for action %s", newStatusCount, oldStatusCount, metricName)
}
func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics, pluginName string) *storageControllerMetrics {
func getControllerStorageMetrics(ms e2emetrics.ControllerManagerMetrics, pluginName string) *storageControllerMetrics {
result := newStorageControllerMetrics()
for method, samples := range ms {
@ -654,7 +654,7 @@ func getControllerStorageMetrics(ms metrics.ControllerManagerMetrics, pluginName
// Finds the sample in the specified metric from `KubeletMetrics` tagged with
// the specified namespace and pvc name
func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics metrics.KubeletMetrics) bool {
func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string, kubeletMetrics e2emetrics.KubeletMetrics) bool {
found := false
errCount := 0
framework.Logf("Looking for sample in metric `%s` tagged with namespace `%s`, PVC `%s`", metricKeyName, namespace, pvcName)
@ -683,7 +683,7 @@ func findVolumeStatMetric(metricKeyName string, namespace string, pvcName string
}
// Wait for the count of a pv controller's metric specified by metricName and dimension bigger than zero.
func waitForPVControllerSync(metricsGrabber *metrics.Grabber, metricName, dimension string) {
func waitForPVControllerSync(metricsGrabber *e2emetrics.Grabber, metricName, dimension string) {
backoff := wait.Backoff{
Duration: 10 * time.Second,
Factor: 1.2,
@ -728,7 +728,7 @@ func getStatesMetrics(metricKey string, givenMetrics testutil.Metrics) map[strin
return states
}
func waitForADControllerStatesMetrics(metricsGrabber *metrics.Grabber, metricName string, dimensions []string, stateNames []string) {
func waitForADControllerStatesMetrics(metricsGrabber *e2emetrics.Grabber, metricName string, dimensions []string, stateNames []string) {
backoff := wait.Backoff{
Duration: 10 * time.Second,
Factor: 1.2,

View File

@ -45,7 +45,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
@ -603,7 +603,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Name: serviceAccountName,
}
err := auth.BindClusterRole(c.RbacV1(), "system:persistent-volume-provisioner", ns, subject)
err := e2eauth.BindClusterRole(c.RbacV1(), "system:persistent-volume-provisioner", ns, subject)
framework.ExpectNoError(err)
roleName := "leader-locking-nfs-provisioner"
@ -619,10 +619,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create leader-locking role")
err = auth.BindRoleInNamespace(c.RbacV1(), roleName, ns, subject)
err = e2eauth.BindRoleInNamespace(c.RbacV1(), roleName, ns, subject)
framework.ExpectNoError(err)
err = auth.WaitForAuthorizationUpdate(c.AuthorizationV1(),
err = e2eauth.WaitForAuthorizationUpdate(c.AuthorizationV1(),
serviceaccount.MakeUsername(ns, serviceAccountName),
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization")

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
ginkgo.Describe("ConfigMap", func() {
ginkgo.It("should be mountable", func() {
config := volume.TestConfig{
config := e2evolume.TestConfig{
Namespace: namespace.Name,
Prefix: "configmap",
}
@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
}()
// Test one ConfigMap mounted several times to test #28502
tests := []volume.Test{
tests := []e2evolume.Test{
{
Volume: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
@ -108,7 +108,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
ExpectedContent: "this is the second file",
},
}
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
e2evolume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
})
})
})

View File

@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
})
ginkgo.AfterEach(func() {
framework.Logf("Deleting all statefulset in namespace: %v", namespace)
e2esset.DeleteAllStatefulSets(client, namespace)
e2estatefulset.DeleteAllStatefulSets(client, namespace)
})
ginkgo.It("vsphere statefulset testing", func() {
@ -82,12 +82,12 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
ginkgo.By("Creating statefulset")
statefulset := e2esset.CreateStatefulSet(client, manifestPath, namespace)
statefulset := e2estatefulset.CreateStatefulSet(client, manifestPath, namespace)
replicas := *(statefulset.Spec.Replicas)
// Waiting for pods status to be Ready
e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas)
framework.ExpectNoError(e2esset.CheckMount(client, statefulset, mountPath))
ssPodsBeforeScaleDown := e2esset.GetPodList(client, statefulset)
e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas)
framework.ExpectNoError(e2estatefulset.CheckMount(client, statefulset, mountPath))
ssPodsBeforeScaleDown := e2estatefulset.GetPodList(client, statefulset)
gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
framework.ExpectEqual(len(ssPodsBeforeScaleDown.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas")
@ -105,9 +105,9 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
}
ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1))
_, scaledownErr := e2esset.Scale(client, statefulset, replicas-1)
_, scaledownErr := e2estatefulset.Scale(client, statefulset, replicas-1)
framework.ExpectNoError(scaledownErr)
e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas-1)
e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas-1)
// After scale down, verify vsphere volumes are detached from deleted pods
ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
@ -126,12 +126,12 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
}
ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas))
_, scaleupErr := e2esset.Scale(client, statefulset, replicas)
_, scaleupErr := e2estatefulset.Scale(client, statefulset, replicas)
framework.ExpectNoError(scaleupErr)
e2esset.WaitForStatusReplicas(client, statefulset, replicas)
e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas)
e2estatefulset.WaitForStatusReplicas(client, statefulset, replicas)
e2estatefulset.WaitForStatusReadyReplicas(client, statefulset, replicas)
ssPodsAfterScaleUp := e2esset.GetPodList(client, statefulset)
ssPodsAfterScaleUp := e2estatefulset.GetPodList(client, statefulset)
gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
framework.ExpectEqual(len(ssPodsAfterScaleUp.Items), int(replicas), "Number of Pods in the statefulset should match with number of replicas")

View File

@ -32,7 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -97,12 +97,12 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
volumePath := pvs[0].Spec.VsphereVolume.VolumePath
ginkgo.By("Creating a Deployment")
deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
deployment, err := e2edeployment.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err))
defer client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
ginkgo.By("Get pod from the deployment")
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
podList, err := e2edeployment.GetPodsForDeployment(client, deployment)
framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployment with err: %v", err))
gomega.Expect(podList.Items).NotTo(gomega.BeEmpty())
pod := podList.Items[0]
@ -179,7 +179,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *appsv1.Deploym
// getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (string, error) {
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
podList, err := e2edeployment.GetPodsForDeployment(client, deployment)
if err != nil {
return "", err
}

View File

@ -28,7 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/upgrades"
"github.com/onsi/ginkgo"
@ -66,12 +66,12 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
rsClient := c.AppsV1().ReplicaSets(ns)
ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment))
ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
@ -87,13 +87,13 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
// Trigger a new rollout so that we have some history.
ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
})
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment))
ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
rsList, err = rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()})
@ -155,17 +155,17 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
framework.ExpectEqual(deployment.Annotations[deploymentutil.RevisionAnnotation], "2")
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment))
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deployment))
// Verify the upgraded deployment is active by scaling up the deployment by 1
ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
deploymentWithUpdatedReplicas, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *appsv1.Deployment) {
deploymentWithUpdatedReplicas, err := e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *appsv1.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
})
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deploymentWithUpdatedReplicas))
framework.ExpectNoError(e2edeployment.WaitForDeploymentComplete(c, deploymentWithUpdatedReplicas))
}
// Teardown cleans up any remaining resources.

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset"
"k8s.io/kubernetes/test/e2e/upgrades"
"github.com/onsi/ginkgo"
@ -63,7 +63,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName))
framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName))
r.UID = rs.UID
}
@ -87,17 +87,17 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
}
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName))
framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName))
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
_, err = replicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *appsv1.ReplicaSet) {
_, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *appsv1.ReplicaSet) {
*rs.Spec.Replicas = scaleNum
})
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
framework.ExpectNoError(replicaset.WaitForReadyReplicaSet(c, ns, rsName))
framework.ExpectNoError(e2ereplicaset.WaitForReadyReplicaSet(c, ns, rsName))
}
// Teardown cleans up any remaining resources.

View File

@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/upgrades"
)
@ -79,10 +79,10 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ns := f.Namespace.Name
t.set = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
t.set = e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
t.service = createStatefulSetService(ssName, labels)
*(t.set.Spec.Replicas) = 3
e2esset.PauseNewPods(t.set)
e2estatefulset.PauseNewPods(t.set)
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), t.service, metav1.CreateOptions{})
@ -94,7 +94,7 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By("Saturating stateful set " + t.set.Name)
e2esset.Saturate(f.ClientSet, t.set)
e2estatefulset.Saturate(f.ClientSet, t.set)
t.verify(f)
t.restart(f)
t.verify(f)
@ -108,26 +108,26 @@ func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct
// Teardown deletes all StatefulSets
func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) {
e2esset.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
e2estatefulset.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
}
func (t *StatefulSetUpgradeTest) verify(f *framework.Framework) {
ginkgo.By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(e2esset.CheckMount(f.ClientSet, t.set, "/data"))
framework.ExpectNoError(e2estatefulset.CheckMount(f.ClientSet, t.set, "/data"))
ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
framework.ExpectNoError(e2esset.CheckHostname(f.ClientSet, t.set))
framework.ExpectNoError(e2estatefulset.CheckHostname(f.ClientSet, t.set))
ginkgo.By("Verifying statefulset set proper service name")
framework.ExpectNoError(e2esset.CheckServiceName(t.set, t.set.Spec.ServiceName))
framework.ExpectNoError(e2estatefulset.CheckServiceName(t.set, t.set.Spec.ServiceName))
cmd := "echo $(hostname) > /data/hostname; sync;"
ginkgo.By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(e2esset.ExecInStatefulPods(f.ClientSet, t.set, cmd))
framework.ExpectNoError(e2estatefulset.ExecInStatefulPods(f.ClientSet, t.set, cmd))
}
func (t *StatefulSetUpgradeTest) restart(f *framework.Framework) {
ginkgo.By("Restarting statefulset " + t.set.Name)
e2esset.Restart(f.ClientSet, t.set)
e2esset.WaitForRunningAndReady(f.ClientSet, *t.set.Spec.Replicas, t.set)
e2estatefulset.Restart(f.ClientSet, t.set)
e2estatefulset.WaitForRunningAndReady(f.ClientSet, *t.set.Spec.Replicas, t.set)
}

View File

@ -32,8 +32,8 @@ import (
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
)
const cassandraManifestPath = "test/e2e/testing-manifests/statefulset/cassandra"
@ -60,7 +60,7 @@ func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool {
}
func cassandraKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file)))
input := string(e2etestfiles.ReadOrDie(filepath.Join(cassandraManifestPath, file)))
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
}
@ -78,7 +78,7 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
cassandraKubectlCreate(ns, "pdb.yaml")
ginkgo.By("Creating a Cassandra StatefulSet")
e2esset.CreateStatefulSet(f.ClientSet, cassandraManifestPath, ns)
e2estatefulset.CreateStatefulSet(f.ClientSet, cassandraManifestPath, ns)
ginkgo.By("Creating a cassandra-test-server deployment")
cassandraKubectlCreate(ns, "tester.yaml")

View File

@ -32,8 +32,8 @@ import (
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
)
const manifestPath = "test/e2e/testing-manifests/statefulset/etcd"
@ -59,7 +59,7 @@ func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
}
func kubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(manifestPath, file)))
input := string(e2etestfiles.ReadOrDie(filepath.Join(manifestPath, file)))
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
}
@ -73,7 +73,7 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
kubectlCreate(ns, "pdb.yaml")
ginkgo.By("Creating an etcd StatefulSet")
e2esset.CreateStatefulSet(f.ClientSet, manifestPath, ns)
e2estatefulset.CreateStatefulSet(f.ClientSet, manifestPath, ns)
ginkgo.By("Creating an etcd--test-server deployment")
kubectlCreate(ns, "tester.yaml")

View File

@ -32,8 +32,8 @@ import (
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
)
const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade"
@ -61,7 +61,7 @@ func (MySQLUpgradeTest) Skip(upgCtx UpgradeContext) bool {
}
func mysqlKubectlCreate(ns, file string) {
input := string(testfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file)))
input := string(e2etestfiles.ReadOrDie(filepath.Join(mysqlManifestPath, file)))
framework.RunKubectlOrDieInput(ns, input, "create", "-f", "-", fmt.Sprintf("--namespace=%s", ns))
}
@ -88,7 +88,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
mysqlKubectlCreate(ns, "configmap.yaml")
ginkgo.By("Creating a mysql StatefulSet")
e2esset.CreateStatefulSet(f.ClientSet, mysqlManifestPath, ns)
e2estatefulset.CreateStatefulSet(f.ClientSet, mysqlManifestPath, ns)
ginkgo.By("Creating a mysql-test-server deployment")
mysqlKubectlCreate(ns, "tester.yaml")

View File

@ -25,7 +25,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"regexp"
@ -80,7 +80,7 @@ func numberOfSampleResources(node *v1.Node) int64 {
// getSampleDevicePluginPod returns the Device Plugin pod for sample resources in e2e tests.
func getSampleDevicePluginPod() *v1.Pod {
ds := readDaemonSetV1OrDie(testfiles.ReadOrDie(sampleDevicePluginDSYAML))
ds := readDaemonSetV1OrDie(e2etestfiles.ReadOrDie(sampleDevicePluginDSYAML))
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: sampleDevicePluginName,

View File

@ -34,7 +34,7 @@ import (
controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
"k8s.io/kubernetes/pkg/kubelet/metrics"
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/framework"
@ -1161,7 +1161,7 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
// error
errorSamples := model.Samples{mkErrorSample(len(tc.expectConfigStatus.err) > 0)}
// expected metrics
expect := frameworkmetrics.KubeletMetrics(map[string]model.Samples{
expect := e2emetrics.KubeletMetrics(map[string]model.Samples{
assignedConfigKey: assignedSamples,
activeConfigKey: activeSamples,
lastKnownGoodConfigKey: lastKnownGoodSamples,

View File

@ -43,7 +43,7 @@ import (
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/generated"
"k8s.io/kubernetes/test/e2e_node/services"
system "k8s.io/system-validators/validators"
@ -85,7 +85,7 @@ func registerNodeFlags(flags *flag.FlagSet) {
func init() {
// Enable bindata file lookup as fallback.
testfiles.AddFileSource(testfiles.BindataFileSource{
e2etestfiles.AddFileSource(e2etestfiles.BindataFileSource{
Asset: generated.Asset,
AssetNames: generated.AssetNames,
})

View File

@ -27,8 +27,8 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu"
"k8s.io/kubernetes/test/e2e/framework/metrics"
e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -40,7 +40,7 @@ import (
// After the NVIDIA drivers were installed
// TODO make this generic and not linked to COS only
func numberOfNVIDIAGPUs(node *v1.Node) int64 {
val, ok := node.Status.Capacity[gpu.NVIDIAGPUResourceName]
val, ok := node.Status.Capacity[e2egpu.NVIDIAGPUResourceName]
if !ok {
return 0
}
@ -49,7 +49,7 @@ func numberOfNVIDIAGPUs(node *v1.Node) int64 {
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
func NVIDIADevicePlugin() *v1.Pod {
ds, err := framework.DsFromManifest(gpu.GPUDevicePluginDSYAML)
ds, err := framework.DsFromManifest(e2egpu.GPUDevicePluginDSYAML)
framework.ExpectNoError(err)
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
ginkgo.It("checks that when Kubelet restarts exclusive GPU assignation to pods is kept.", func() {
ginkgo.By("Creating one GPU pod on a node with at least two GPUs")
podRECMD := "devs=$(ls /dev/ | egrep '^nvidia[0-9]+$') && echo gpu devices: $devs"
p1 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
p1 := f.PodClient().CreateSync(makeBusyboxPod(e2egpu.NVIDIAGPUResourceName, podRECMD))
deviceIDRE := "gpu devices: (nvidia[0-9]+)"
devID1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
gomega.Eventually(func() bool {
return numberOfNVIDIAGPUs(getLocalNode(f)) > 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
p2 := f.PodClient().CreateSync(makeBusyboxPod(gpu.NVIDIAGPUResourceName, podRECMD))
p2 := f.PodClient().CreateSync(makeBusyboxPod(e2egpu.NVIDIAGPUResourceName, podRECMD))
ginkgo.By("Checking that pods got a different GPU")
devID2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
@ -179,7 +179,7 @@ func checkIfNvidiaGPUsExistOnNode() bool {
}
func logDevicePluginMetrics() {
ms, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
ms, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
framework.ExpectNoError(err)
for msKey, samples := range ms {
switch msKey {

View File

@ -30,8 +30,8 @@ import (
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -171,7 +171,7 @@ func PrePullAllImages() error {
// getGPUDevicePluginImage returns the image of GPU device plugin.
func getGPUDevicePluginImage() string {
ds, err := framework.DsFromManifest(gpu.GPUDevicePluginDSYAML)
ds, err := framework.DsFromManifest(e2egpu.GPUDevicePluginDSYAML)
if err != nil {
klog.Errorf("Failed to parse the device plugin image: %v", err)
return ""
@ -189,7 +189,7 @@ func getGPUDevicePluginImage() string {
// getSRIOVDevicePluginImage returns the image of SRIOV device plugin.
func getSRIOVDevicePluginImage() string {
data, err := testfiles.Read(SRIOVDevicePluginDSYAML)
data, err := e2etestfiles.Read(SRIOVDevicePluginDSYAML)
if err != nil {
klog.Errorf("Failed to read the device plugin manifest: %v", err)
return ""

View File

@ -24,8 +24,8 @@ import (
kubeletresourcemetricsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
"k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"github.com/prometheus/common/model"
@ -76,7 +76,7 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
"": boundedSample(1, 1e6),
}),
"node_memory_working_set_bytes": gstruct.MatchAllElements(nodeID, gstruct.Elements{
"": boundedSample(10*volume.Mb, memoryLimit),
"": boundedSample(10*e2evolume.Mb, memoryLimit),
}),
"container_cpu_usage_seconds_total": gstruct.MatchElements(containerID, gstruct.IgnoreExtras, gstruct.Elements{
@ -85,8 +85,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
}),
"container_memory_working_set_bytes": gstruct.MatchAllElements(containerID, gstruct.Elements{
fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod0, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb),
fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*volume.Kb, 80*volume.Mb),
fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod0, "busybox-container"): boundedSample(10*e2evolume.Kb, 80*e2evolume.Mb),
fmt.Sprintf("%s::%s::%s", f.Namespace.Name, pod1, "busybox-container"): boundedSample(10*e2evolume.Kb, 80*e2evolume.Mb),
}),
})
ginkgo.By("Giving pods a minute to start up and produce metrics")
@ -110,8 +110,8 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
})
})
func getV1alpha1ResourceMetrics() (metrics.KubeletMetrics, error) {
return metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource/"+kubeletresourcemetricsv1alpha1.Version)
func getV1alpha1ResourceMetrics() (e2emetrics.KubeletMetrics, error) {
return e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource/"+kubeletresourcemetricsv1alpha1.Version)
}
func nodeID(element interface{}) string {

View File

@ -29,7 +29,7 @@ import (
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
"k8s.io/kubernetes/test/e2e/framework/volume"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
systemdutil "github.com/coreos/go-systemd/util"
"github.com/onsi/ginkgo"
@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
node := getLocalNode(f)
memoryCapacity := node.Status.Capacity["memory"]
memoryLimit := memoryCapacity.Value()
fsCapacityBounds := bounded(100*volume.Mb, 10*volume.Tb)
fsCapacityBounds := bounded(100*e2evolume.Mb, 10*e2evolume.Tb)
// Expectations for system containers.
sysContExpectations := func() types.GomegaMatcher {
return gstruct.MatchAllFields(gstruct.Fields{
@ -97,10 +97,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge),
// We don't limit system container memory.
"AvailableBytes": gomega.BeNil(),
"UsageBytes": bounded(1*volume.Mb, memoryLimit),
"WorkingSetBytes": bounded(1*volume.Mb, memoryLimit),
"UsageBytes": bounded(1*e2evolume.Mb, memoryLimit),
"WorkingSetBytes": bounded(1*e2evolume.Mb, memoryLimit),
// this now returns /sys/fs/cgroup/memory.stat total_rss
"RSSBytes": bounded(1*volume.Mb, memoryLimit),
"RSSBytes": bounded(1*e2evolume.Mb, memoryLimit),
"PageFaults": bounded(1000, 1e9),
"MajorPageFaults": bounded(0, 100000),
}),
@ -114,10 +114,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
podsContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
// Pods are limited by Node Allocatable
"AvailableBytes": bounded(1*volume.Kb, memoryLimit),
"UsageBytes": bounded(10*volume.Kb, memoryLimit),
"WorkingSetBytes": bounded(10*volume.Kb, memoryLimit),
"RSSBytes": bounded(1*volume.Kb, memoryLimit),
"AvailableBytes": bounded(1*e2evolume.Kb, memoryLimit),
"UsageBytes": bounded(10*e2evolume.Kb, memoryLimit),
"WorkingSetBytes": bounded(10*e2evolume.Kb, memoryLimit),
"RSSBytes": bounded(1*e2evolume.Kb, memoryLimit),
"PageFaults": bounded(0, 1000000),
"MajorPageFaults": bounded(0, 10),
})
@ -159,9 +159,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge),
// We don't limit system container memory.
"AvailableBytes": gomega.BeNil(),
"UsageBytes": bounded(100*volume.Kb, memoryLimit),
"WorkingSetBytes": bounded(100*volume.Kb, memoryLimit),
"RSSBytes": bounded(100*volume.Kb, memoryLimit),
"UsageBytes": bounded(100*e2evolume.Kb, memoryLimit),
"WorkingSetBytes": bounded(100*e2evolume.Kb, memoryLimit),
"RSSBytes": bounded(100*e2evolume.Kb, memoryLimit),
"PageFaults": bounded(1000, 1e9),
"MajorPageFaults": bounded(0, 100000),
})
@ -182,10 +182,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}),
"Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"AvailableBytes": bounded(1*volume.Kb, 80*volume.Mb),
"UsageBytes": bounded(10*volume.Kb, 80*volume.Mb),
"WorkingSetBytes": bounded(10*volume.Kb, 80*volume.Mb),
"RSSBytes": bounded(1*volume.Kb, 80*volume.Mb),
"AvailableBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb),
"UsageBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb),
"WorkingSetBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb),
"RSSBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb),
"PageFaults": bounded(100, 1000000),
"MajorPageFaults": bounded(0, 10),
}),
@ -194,7 +194,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 10*volume.Mb),
"UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Mb),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
@ -203,7 +203,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 10*volume.Mb),
"UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Mb),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
@ -215,9 +215,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge),
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
"Name": gomega.Equal("eth0"),
"RxBytes": bounded(10, 10*volume.Mb),
"RxBytes": bounded(10, 10*e2evolume.Mb),
"RxErrors": bounded(0, 1000),
"TxBytes": bounded(10, 10*volume.Mb),
"TxBytes": bounded(10, 10*e2evolume.Mb),
"TxErrors": bounded(0, 1000),
}),
"Interfaces": gomega.Not(gomega.BeNil()),
@ -229,10 +229,10 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}),
"Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"AvailableBytes": bounded(1*volume.Kb, 80*volume.Mb),
"UsageBytes": bounded(10*volume.Kb, 80*volume.Mb),
"WorkingSetBytes": bounded(10*volume.Kb, 80*volume.Mb),
"RSSBytes": bounded(1*volume.Kb, 80*volume.Mb),
"AvailableBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb),
"UsageBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb),
"WorkingSetBytes": bounded(10*e2evolume.Kb, 80*e2evolume.Mb),
"RSSBytes": bounded(1*e2evolume.Kb, 80*e2evolume.Mb),
"PageFaults": bounded(0, 1000000),
"MajorPageFaults": bounded(0, 10),
}),
@ -244,7 +244,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 1*volume.Mb),
"UsedBytes": bounded(e2evolume.Kb, 1*e2evolume.Mb),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
@ -255,7 +255,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge),
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
"UsedBytes": bounded(volume.Kb, 21*volume.Mb),
"UsedBytes": bounded(e2evolume.Kb, 21*e2evolume.Mb),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
@ -277,11 +277,11 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}),
"Memory": ptrMatchAllFields(gstruct.Fields{
"Time": recent(maxStatsAge),
"AvailableBytes": bounded(100*volume.Mb, memoryLimit),
"UsageBytes": bounded(10*volume.Mb, memoryLimit),
"WorkingSetBytes": bounded(10*volume.Mb, memoryLimit),
"AvailableBytes": bounded(100*e2evolume.Mb, memoryLimit),
"UsageBytes": bounded(10*e2evolume.Mb, memoryLimit),
"WorkingSetBytes": bounded(10*e2evolume.Mb, memoryLimit),
// this now returns /sys/fs/cgroup/memory.stat total_rss
"RSSBytes": bounded(1*volume.Kb, memoryLimit),
"RSSBytes": bounded(1*e2evolume.Kb, memoryLimit),
"PageFaults": bounded(1000, 1e9),
"MajorPageFaults": bounded(0, 100000),
}),
@ -290,9 +290,9 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"Time": recent(maxStatsAge),
"InterfaceStats": gstruct.MatchAllFields(gstruct.Fields{
"Name": gomega.Or(gomega.BeEmpty(), gomega.Equal("eth0")),
"RxBytes": gomega.Or(gomega.BeNil(), bounded(1*volume.Mb, 100*volume.Gb)),
"RxBytes": gomega.Or(gomega.BeNil(), bounded(1*e2evolume.Mb, 100*e2evolume.Gb)),
"RxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)),
"TxBytes": gomega.Or(gomega.BeNil(), bounded(10*volume.Kb, 10*volume.Gb)),
"TxBytes": gomega.Or(gomega.BeNil(), bounded(10*e2evolume.Kb, 10*e2evolume.Gb)),
"TxErrors": gomega.Or(gomega.BeNil(), bounded(0, 100000)),
}),
"Interfaces": gomega.Not(gomega.BeNil()),
@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
// we assume we are not running tests on machines < 10tb of disk
"UsedBytes": bounded(volume.Kb, 10*volume.Tb),
"UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Tb),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),
@ -313,7 +313,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
"AvailableBytes": fsCapacityBounds,
"CapacityBytes": fsCapacityBounds,
// we assume we are not running tests on machines < 10tb of disk
"UsedBytes": bounded(volume.Kb, 10*volume.Tb),
"UsedBytes": bounded(e2evolume.Kb, 10*e2evolume.Tb),
"InodesFree": bounded(1e4, 1e8),
"Inodes": bounded(1e4, 1e8),
"InodesUsed": bounded(0, 1e8),

View File

@ -41,7 +41,7 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -246,7 +246,7 @@ func configureTopologyManagerInKubelet(f *framework.Framework, oldCfg *kubeletco
// getSRIOVDevicePluginPod returns the Device Plugin pod for sriov resources in e2e tests.
func getSRIOVDevicePluginPod() *v1.Pod {
ds := readDaemonSetV1OrDie(testfiles.ReadOrDie(SRIOVDevicePluginDSYAML))
ds := readDaemonSetV1OrDie(e2etestfiles.ReadOrDie(SRIOVDevicePluginDSYAML))
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: SRIOVDevicePluginName,
@ -415,7 +415,7 @@ func isTopologyAffinityError(pod *v1.Pod) bool {
}
func getSRIOVDevicePluginConfigMap(cmFile string) *v1.ConfigMap {
cmData := testfiles.ReadOrDie(SRIOVDevicePluginCMYAML)
cmData := e2etestfiles.ReadOrDie(SRIOVDevicePluginCMYAML)
var err error
// the SRIOVDP configuration is hw-dependent, so we allow per-test-host customization.
@ -449,7 +449,7 @@ func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sr
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
serviceAccount := readServiceAccountV1OrDie(testfiles.ReadOrDie(SRIOVDevicePluginSAYAML))
serviceAccount := readServiceAccountV1OrDie(e2etestfiles.ReadOrDie(SRIOVDevicePluginSAYAML))
ginkgo.By(fmt.Sprintf("Creating serviceAccount %v/%v", metav1.NamespaceSystem, serviceAccount.Name))
if _, err = f.ClientSet.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}); err != nil {
framework.Failf("unable to create test serviceAccount %s: %v", serviceAccount.Name, err)