Add ipv6 support to [sig-apps] StatefulSet e2e test

Use httpd docker images instead of nginx because they listen
by default both in IPv4 and IPv6
This commit is contained in:
Antonio Ojea 2019-06-10 17:59:26 +02:00
parent 8ce45b642e
commit 410df752cd
No known key found for this signature in database
GPG Key ID: E4833AA228D4E824
8 changed files with 64 additions and 58 deletions

View File

@ -94,7 +94,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
f = framework.NewDefaultFramework("daemonsets")
image := NginxImage
image := WebserverImage
dsName := "daemon-set"
var ns string

View File

@ -228,16 +228,16 @@ func testDeleteDeployment(f *framework.Framework) {
c := f.ClientSet
deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": NginxImageName}
podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1)
e2elog.Logf("Creating simple deployment %s", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
// Wait for it to be updated to revision 1
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
@ -254,11 +254,11 @@ func testDeleteDeployment(f *framework.Framework) {
func testRollingUpdateDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create nginx pods.
// Create webserver pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"}
rsPodLabels := map[string]string{
"name": "sample-pod",
"pod": NginxImageName,
"pod": WebserverImageName,
}
rsName := "test-rolling-update-controller"
@ -266,7 +266,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
rsRevision := "3546343826724305832"
annotations := make(map[string]string)
annotations[deploymentutil.RevisionAnnotation] = rsRevision
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage)
rs.Annotations = annotations
e2elog.Logf("Creating replica set %q (going to be adopted)", rs.Name)
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
@ -275,7 +275,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
// Create a deployment to delete nginx pods and instead bring up redis pods.
// Create a deployment to delete webserver pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment"
e2elog.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, appsv1.RollingUpdateDeploymentStrategyType)
@ -291,7 +291,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err)
// There should be 1 old RS (nginx-controller, which is adopted)
// There should be 1 old RS (webserver-controller, which is adopted)
e2elog.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
@ -320,11 +320,11 @@ func testRecreateDeployment(f *framework.Framework) {
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err)
// Update deployment to delete redis pods and bring up nginx pods.
// Update deployment to delete redis pods and bring up webserver pods.
e2elog.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
update.Spec.Template.Spec.Containers[0].Image = NginxImage
update.Spec.Template.Spec.Containers[0].Name = WebserverImageName
update.Spec.Template.Spec.Containers[0].Image = WebserverImage
})
framework.ExpectNoError(err)
@ -337,23 +337,23 @@ func testRecreateDeployment(f *framework.Framework) {
func testDeploymentCleanUpPolicy(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create nginx pods.
// Create webserver pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
rsPodLabels := map[string]string{
"name": "cleanup-pod",
"pod": NginxImageName,
"pod": WebserverImageName,
}
rsName := "test-cleanup-controller"
replicas := int32(1)
revisionHistoryLimit := utilpointer.Int32Ptr(0)
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage))
framework.ExpectNoError(err)
// Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
// Create a deployment to delete nginx pods and instead bring up redis pods.
// Create a deployment to delete webserver pods and instead bring up redis pods.
deploymentName := "test-cleanup-deployment"
e2elog.Logf("Creating deployment %s", deploymentName)
@ -412,12 +412,12 @@ func testRolloverDeployment(f *framework.Framework) {
deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{
"name": podName,
"pod": NginxImageName,
"pod": WebserverImageName,
}
rsName := "test-rollover-controller"
rsReplicas := int32(1)
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage))
framework.ExpectNoError(err)
// Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
@ -428,7 +428,7 @@ func testRolloverDeployment(f *framework.Framework) {
err = replicaset.WaitForReadyReplicaSet(c, ns, rsName)
framework.ExpectNoError(err)
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
// Create a deployment to delete webserver pods and instead bring up redis-slave pods.
// We use a nonexistent image here, so that we make sure it won't finish
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
deploymentReplicas := int32(1)
@ -518,15 +518,15 @@ func testIterativeDeployments(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podLabels := map[string]string{"name": NginxImageName}
podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(6)
zero := int64(0)
two := int32(2)
// Create a nginx deployment.
deploymentName := "nginx"
// Create a webserver deployment.
deploymentName := "webserver"
thirty := int32(30)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
@ -642,9 +642,9 @@ func testDeploymentsControllerRef(f *framework.Framework) {
deploymentName := "test-orphan-deployment"
e2elog.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": NginxImageName}
podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
@ -671,7 +671,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
deploymentName = "test-adopt-deployment"
e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
@ -696,12 +696,12 @@ func testProportionalScalingDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
podLabels := map[string]string{"name": NginxImageName}
podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(10)
// Create a nginx deployment.
deploymentName := "nginx-deployment"
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
// Create a webserver deployment.
deploymentName := "webserver-deployment"
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(appsv1.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
@ -716,7 +716,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Verify that the required pods have come up.
e2elog.Logf("Waiting for all required pods to come up")
err = e2epod.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas))
err = e2epod.VerifyPodsRunning(c, ns, WebserverImageName, false, *(deployment.Spec.Replicas))
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
e2elog.Logf("Waiting for deployment %q to complete", deployment.Name)
@ -730,7 +730,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// will be blocked to simulate a partial rollout.
e2elog.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
update.Spec.Template.Spec.Containers[0].Image = "webserver:404"
})
framework.ExpectNoError(err)

View File

@ -200,7 +200,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage)
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
framework.ExpectNoError(err)
@ -270,7 +270,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
Containers: []v1.Container{
{
Name: name,
Image: NginxImage,
Image: WebserverImage,
},
},
},
@ -278,7 +278,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
ginkgo.By("When a replication controller with a matching selector is created")
replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
framework.ExpectNoError(err)
@ -307,7 +307,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
name := "pod-release"
ginkgo.By("Given a ReplicationController is created")
replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
framework.ExpectNoError(err)

View File

@ -202,7 +202,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage)
rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs)
framework.ExpectNoError(err)
@ -273,7 +273,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
Containers: []v1.Container{
{
Name: name,
Image: NginxImage,
Image: WebserverImage,
},
},
},
@ -281,7 +281,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
ginkgo.By("When a replicaset with a matching selector is created")
replicas := int32(1)
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImage)
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage)
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt)
framework.ExpectNoError(err)

View File

@ -312,7 +312,7 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
}
newImage := NewNginxImage
newImage := NewWebserverImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
@ -532,7 +532,7 @@ var _ = SIGDescribe("StatefulSet", func() {
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
currentRevision))
}
newImage := NewNginxImage
newImage := NewWebserverImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
@ -720,8 +720,8 @@ var _ = SIGDescribe("StatefulSet", func() {
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.Nginx),
Name: "webserver",
Image: imageutils.GetE2EImage(imageutils.Httpd),
Ports: []v1.ContainerPort{conflictingPort},
},
},
@ -1111,7 +1111,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err)
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
newImage := NewWebserverImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))

View File

@ -23,8 +23,8 @@ import (
// NOTE(claudiub): These constants should NOT be used as Pod Container Images.
const (
NginxImageName = "nginx"
RedisImageName = "redis"
WebserverImageName = "httpd"
RedisImageName = "redis"
)
var (
@ -40,11 +40,11 @@ var (
// KittenImage is the fully qualified URI to the Kitten image
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
// NginxImage is the fully qualified URI to the Nginx image
NginxImage = imageutils.GetE2EImage(imageutils.Nginx)
// WebserverImage is the fully qualified URI to the Httpd image
WebserverImage = imageutils.GetE2EImage(imageutils.Httpd)
// NewNginxImage is the fully qualified URI to the NginxNew image
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew)
// NewWebserverImage is the fully qualified URI to the HttpdNew image
NewWebserverImage = imageutils.GetE2EImage(imageutils.HttpdNew)
// RedisImage is the fully qualified URI to the Redis image
RedisImage = imageutils.GetE2EImage(imageutils.Redis)

View File

@ -503,7 +503,7 @@ var httpProbe = &v1.Probe{
FailureThreshold: 1,
}
// SetHTTPProbe sets the pod template's ReadinessProbe for Nginx StatefulSet containers.
// SetHTTPProbe sets the pod template's ReadinessProbe for Webserver StatefulSet containers.
// This probe can then be controlled with BreakHTTPProbe() and RestoreHTTPProbe().
// Note that this cannot be used together with PauseNewPods().
func (s *StatefulSetTester) SetHTTPProbe(ss *appsv1.StatefulSet) {
@ -517,7 +517,7 @@ func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1.StatefulSet) error {
return fmt.Errorf("Path expected to be not empty: %v", path)
}
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/ || true", path)
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
return s.ExecInStatefulPods(ss, cmd)
}
@ -528,7 +528,7 @@ func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Po
return fmt.Errorf("Path expected to be not empty: %v", path)
}
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/ || true", path)
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err
@ -541,7 +541,7 @@ func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1.StatefulSet) error {
return fmt.Errorf("Path expected to be not empty: %v", path)
}
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/ || true", path)
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
return s.ExecInStatefulPods(ss, cmd)
}
@ -552,7 +552,7 @@ func (s *StatefulSetTester) RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.
return fmt.Errorf("Path expected to be not empty: %v", path)
}
// Ignore 'mv' errors to make this idempotent.
cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/ || true", path)
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
return err
@ -764,7 +764,7 @@ func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim {
}
}
// NewStatefulSet creates a new NGINX StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
// NewStatefulSet creates a new Webserver StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1.StatefulSet {
@ -808,8 +808,8 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.Nginx),
Name: "webserver",
Image: imageutils.GetE2EImage(imageutils.Httpd),
VolumeMounts: mounts,
SecurityContext: &v1.SecurityContext{},
},

View File

@ -141,6 +141,10 @@ const (
GBFrontend
// GBRedisSlave image
GBRedisSlave
// Httpd image
Httpd
// HttpdNew image
HttpdNew
// InClusterClient image
InClusterClient
// Invalid image
@ -226,6 +230,8 @@ func initImageConfigs() map[int]Config {
configs[Etcd] = Config{gcRegistry, "etcd", "3.3.10"}
configs[GBFrontend] = Config{sampleRegistry, "gb-frontend", "v6"}
configs[GBRedisSlave] = Config{sampleRegistry, "gb-redisslave", "v3"}
configs[Httpd] = Config{dockerLibraryRegistry, "httpd", "2.4.38-alpine"}
configs[HttpdNew] = Config{dockerLibraryRegistry, "httpd", "2.4.39-alpine"}
configs[InClusterClient] = Config{e2eRegistry, "inclusterclient", "1.0"}
configs[Invalid] = Config{gcRegistry, "invalid-image", "invalid-tag"}
configs[InvalidRegistryImage] = Config{invalidRegistry, "alpine", "3.1"}