mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Refactor statefulset e2e tests
This commit is contained in:
parent
896c901684
commit
032dde8754
@ -69,6 +69,7 @@ go_library(
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/statefulset:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -371,18 +372,16 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
e2elog.Logf("Deleting all stateful set in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
e2esset.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
ginkgo.It("should come back up if node goes down [Slow] [Disruptive]", func() {
|
||||
petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
||||
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
|
||||
nn, err := e2enode.TotalRegistered(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
nodes, err := e2enode.CheckReady(f.ClientSet, nn, framework.NodeReadyInitialTimeout)
|
||||
@ -390,18 +389,17 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
common.RestartNodes(f.ClientSet, nodes)
|
||||
|
||||
ginkgo.By("waiting for pods to be running again")
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
|
||||
})
|
||||
|
||||
ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
|
||||
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
|
||||
|
||||
pod := pst.GetPodList(ps).Items[0]
|
||||
pod := e2esset.GetPodList(c, ps).Items[0]
|
||||
node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -420,7 +418,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("waiting for pods to be running again")
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -76,7 +77,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||
ss = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
|
||||
ss = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
|
||||
|
||||
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
|
||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||
@ -89,7 +90,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
e2elog.Logf("Deleting all statefulset in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
e2esset.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
@ -98,38 +99,37 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
framework.SkipIfNoDefaultStorageClass(c)
|
||||
*(ss.Spec.Replicas) = 3
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
e2esset.PauseNewPods(ss)
|
||||
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Saturating stateful set " + ss.Name)
|
||||
sst.Saturate(ss)
|
||||
e2esset.Saturate(c, ss)
|
||||
|
||||
ginkgo.By("Verifying statefulset mounted data directory is usable")
|
||||
framework.ExpectNoError(sst.CheckMount(ss, "/data"))
|
||||
framework.ExpectNoError(e2esset.CheckMount(c, ss, "/data"))
|
||||
|
||||
ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
|
||||
framework.ExpectNoError(sst.CheckHostname(ss))
|
||||
framework.ExpectNoError(e2esset.CheckHostname(c, ss))
|
||||
|
||||
ginkgo.By("Verifying statefulset set proper service name")
|
||||
framework.ExpectNoError(sst.CheckServiceName(ss, headlessSvcName))
|
||||
framework.ExpectNoError(e2esset.CheckServiceName(ss, headlessSvcName))
|
||||
|
||||
cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync"
|
||||
ginkgo.By("Running " + cmd + " in all stateful pods")
|
||||
framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd))
|
||||
framework.ExpectNoError(e2esset.ExecInStatefulPods(c, ss, cmd))
|
||||
|
||||
ginkgo.By("Restarting statefulset " + ss.Name)
|
||||
sst.Restart(ss)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
e2esset.Restart(c, ss)
|
||||
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
|
||||
ginkgo.By("Verifying statefulset mounted data directory is usable")
|
||||
framework.ExpectNoError(sst.CheckMount(ss, "/data"))
|
||||
framework.ExpectNoError(e2esset.CheckMount(c, ss, "/data"))
|
||||
|
||||
cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi"
|
||||
ginkgo.By("Running " + cmd + " in all stateful pods")
|
||||
framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd))
|
||||
framework.ExpectNoError(e2esset.ExecInStatefulPods(c, ss, cmd))
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
@ -138,8 +138,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
framework.SkipIfNoDefaultStorageClass(c)
|
||||
*(ss.Spec.Replicas) = 1
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
e2esset.PauseNewPods(ss)
|
||||
|
||||
// Replace ss with the one returned from Create() so it has the UID.
|
||||
// Save Kind since it won't be populated in the returned ss.
|
||||
@ -149,8 +148,8 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss.Kind = kind
|
||||
|
||||
ginkgo.By("Saturating stateful set " + ss.Name)
|
||||
sst.Saturate(ss)
|
||||
pods := sst.GetPodList(ss)
|
||||
e2esset.Saturate(c, ss)
|
||||
pods := e2esset.GetPodList(c, ss)
|
||||
gomega.Expect(pods.Items).To(gomega.HaveLen(int(*ss.Spec.Replicas)))
|
||||
|
||||
ginkgo.By("Checking that stateful set pods are created with ControllerRef")
|
||||
@ -167,7 +166,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the stateful set readopts the pod")
|
||||
gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", e2esset.StatefulSetTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
@ -187,7 +186,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the stateful set releases the pod")
|
||||
gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", framework.StatefulSetTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", e2esset.StatefulSetTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef != nil {
|
||||
@ -204,7 +203,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the stateful set readopts the pod")
|
||||
gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", e2esset.StatefulSetTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
@ -224,35 +223,34 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
framework.SkipIfNoDefaultStorageClass(c)
|
||||
*(ss.Spec.Replicas) = 2
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
e2esset.PauseNewPods(ss)
|
||||
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
sst.WaitForRunning(1, 0, ss)
|
||||
e2esset.WaitForRunning(c, 1, 0, ss)
|
||||
|
||||
ginkgo.By("Resuming stateful pod at index 0.")
|
||||
sst.ResumeNextPod(ss)
|
||||
e2esset.ResumeNextPod(c, ss)
|
||||
|
||||
ginkgo.By("Waiting for stateful pod at index 1 to enter running.")
|
||||
sst.WaitForRunning(2, 1, ss)
|
||||
e2esset.WaitForRunning(c, 2, 1, ss)
|
||||
|
||||
// Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not*
|
||||
// create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till
|
||||
// we set the healthy bit.
|
||||
|
||||
ginkgo.By("Deleting healthy stateful pod at index 0.")
|
||||
sst.DeleteStatefulPodAtIndex(0, ss)
|
||||
e2esset.DeleteStatefulPodAtIndex(c, 0, ss)
|
||||
|
||||
ginkgo.By("Confirming stateful pod at index 0 is recreated.")
|
||||
sst.WaitForRunning(2, 1, ss)
|
||||
e2esset.WaitForRunning(c, 2, 1, ss)
|
||||
|
||||
ginkgo.By("Resuming stateful pod at index 1.")
|
||||
sst.ResumeNextPod(ss)
|
||||
e2esset.ResumeNextPod(c, ss)
|
||||
|
||||
ginkgo.By("Confirming all stateful pods in statefulset are created.")
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
@ -271,7 +269,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
|
||||
ginkgo.By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
rollbackTest(c, ns, ss)
|
||||
})
|
||||
|
||||
@ -282,9 +280,8 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() {
|
||||
ginkgo.By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
e2esset.SetHTTPProbe(ss)
|
||||
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
|
||||
@ -297,13 +294,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
ss = e2esset.WaitForStatus(c, ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
gomega.Expect(currentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
pods := e2esset.GetPodList(c, ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s",
|
||||
@ -317,13 +314,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
ss, err = e2esset.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
ss = e2esset.WaitForStatus(c, ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
@ -355,7 +352,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}()}
|
||||
}(),
|
||||
}
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
ss, err = e2esset.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
|
||||
@ -368,7 +365,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
|
||||
ss, pods = e2esset.WaitForPartitionedRollingUpdate(c, ss)
|
||||
for i := range pods.Items {
|
||||
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage),
|
||||
@ -400,11 +397,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("Restoring Pods to the correct revision when they are deleted")
|
||||
sst.DeleteStatefulPodAtIndex(0, ss)
|
||||
sst.DeleteStatefulPodAtIndex(2, ss)
|
||||
sst.WaitForRunningAndReady(3, ss)
|
||||
ss = sst.GetStatefulSet(ss.Namespace, ss.Name)
|
||||
pods = sst.GetPodList(ss)
|
||||
e2esset.DeleteStatefulPodAtIndex(c, 0, ss)
|
||||
e2esset.DeleteStatefulPodAtIndex(c, 2, ss)
|
||||
e2esset.WaitForRunningAndReady(c, 3, ss)
|
||||
ss = e2esset.GetStatefulSet(c, ss.Namespace, ss.Name)
|
||||
pods = e2esset.GetPodList(c, ss)
|
||||
for i := range pods.Items {
|
||||
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage),
|
||||
@ -437,7 +434,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
ginkgo.By("Performing a phased rolling update")
|
||||
for i := int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) - 1; i >= 0; i-- {
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
ss, err = e2esset.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
|
||||
@ -449,7 +446,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
|
||||
ss, pods = e2esset.WaitForPartitionedRollingUpdate(c, ss)
|
||||
for i := range pods.Items {
|
||||
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage),
|
||||
@ -493,21 +490,20 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
// The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs.
|
||||
ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func() {
|
||||
ginkgo.By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss := e2esset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
e2esset.SetHTTPProbe(ss)
|
||||
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.OnDeleteStatefulSetStrategyType,
|
||||
}
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
ss = e2esset.WaitForStatus(c, ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
gomega.Expect(currentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
pods := e2esset.GetPodList(c, ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
@ -518,12 +514,12 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("Restoring Pods to the current revision")
|
||||
sst.DeleteStatefulPodAtIndex(0, ss)
|
||||
sst.DeleteStatefulPodAtIndex(1, ss)
|
||||
sst.DeleteStatefulPodAtIndex(2, ss)
|
||||
sst.WaitForRunningAndReady(3, ss)
|
||||
ss = sst.GetStatefulSet(ss.Namespace, ss.Name)
|
||||
pods = sst.GetPodList(ss)
|
||||
e2esset.DeleteStatefulPodAtIndex(c, 0, ss)
|
||||
e2esset.DeleteStatefulPodAtIndex(c, 1, ss)
|
||||
e2esset.DeleteStatefulPodAtIndex(c, 2, ss)
|
||||
e2esset.WaitForRunningAndReady(c, 3, ss)
|
||||
ss = e2esset.GetStatefulSet(c, ss.Namespace, ss.Name)
|
||||
pods = e2esset.GetPodList(c, ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
@ -537,24 +533,24 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
ss, err = e2esset.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
ss = e2esset.WaitForStatus(c, ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
|
||||
ginkgo.By("Recreating Pods at the new revision")
|
||||
sst.DeleteStatefulPodAtIndex(0, ss)
|
||||
sst.DeleteStatefulPodAtIndex(1, ss)
|
||||
sst.DeleteStatefulPodAtIndex(2, ss)
|
||||
sst.WaitForRunningAndReady(3, ss)
|
||||
ss = sst.GetStatefulSet(ss.Namespace, ss.Name)
|
||||
pods = sst.GetPodList(ss)
|
||||
e2esset.DeleteStatefulPodAtIndex(c, 0, ss)
|
||||
e2esset.DeleteStatefulPodAtIndex(c, 1, ss)
|
||||
e2esset.DeleteStatefulPodAtIndex(c, 2, ss)
|
||||
e2esset.WaitForRunningAndReady(c, 3, ss)
|
||||
ss = e2esset.GetStatefulSet(c, ss.Namespace, ss.Name)
|
||||
pods = e2esset.GetPodList(c, ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s",
|
||||
@ -585,29 +581,28 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||
e2esset.SetHTTPProbe(ss)
|
||||
ss, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
|
||||
ginkgo.By("Confirming that stateful set scale up will halt with unhealthy stateful pod")
|
||||
sst.BreakHTTPProbe(ss)
|
||||
sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss)
|
||||
sst.WaitForStatusReadyReplicas(ss, 0)
|
||||
sst.UpdateReplicas(ss, 3)
|
||||
sst.ConfirmStatefulPodCount(1, ss, 10*time.Second, true)
|
||||
e2esset.BreakHTTPProbe(c, ss)
|
||||
e2esset.WaitForRunningAndNotReady(c, *ss.Spec.Replicas, ss)
|
||||
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
|
||||
e2esset.UpdateReplicas(c, ss, 3)
|
||||
e2esset.ConfirmStatefulPodCount(c, 1, ss, 10*time.Second, true)
|
||||
|
||||
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
|
||||
sst.RestoreHTTPProbe(ss)
|
||||
sst.WaitForRunningAndReady(3, ss)
|
||||
e2esset.RestoreHTTPProbe(c, ss)
|
||||
e2esset.WaitForRunningAndReady(c, 3, ss)
|
||||
|
||||
ginkgo.By("Verifying that stateful set " + ssName + " was scaled up in order")
|
||||
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), e2esset.StatefulSetTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Added {
|
||||
@ -628,19 +623,19 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
sst.BreakHTTPProbe(ss)
|
||||
sst.WaitForStatusReadyReplicas(ss, 0)
|
||||
sst.WaitForRunningAndNotReady(3, ss)
|
||||
sst.UpdateReplicas(ss, 0)
|
||||
sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, true)
|
||||
e2esset.BreakHTTPProbe(c, ss)
|
||||
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
|
||||
e2esset.WaitForRunningAndNotReady(c, 3, ss)
|
||||
e2esset.UpdateReplicas(c, ss, 0)
|
||||
e2esset.ConfirmStatefulPodCount(c, 3, ss, 10*time.Second, true)
|
||||
|
||||
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
|
||||
sst.RestoreHTTPProbe(ss)
|
||||
sst.Scale(ss, 0)
|
||||
e2esset.RestoreHTTPProbe(c, ss)
|
||||
e2esset.Scale(c, ss, 0)
|
||||
|
||||
ginkgo.By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
|
||||
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
|
||||
ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
|
||||
ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), e2esset.StatefulSetTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Deleted {
|
||||
@ -665,38 +660,37 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
psLabels := klabels.Set(labels)
|
||||
|
||||
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||
ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
e2esset.SetHTTPProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
|
||||
ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod")
|
||||
sst.BreakHTTPProbe(ss)
|
||||
sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss)
|
||||
sst.WaitForStatusReadyReplicas(ss, 0)
|
||||
sst.UpdateReplicas(ss, 3)
|
||||
sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, false)
|
||||
e2esset.BreakHTTPProbe(c, ss)
|
||||
e2esset.WaitForRunningAndNotReady(c, *ss.Spec.Replicas, ss)
|
||||
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
|
||||
e2esset.UpdateReplicas(c, ss, 3)
|
||||
e2esset.ConfirmStatefulPodCount(c, 3, ss, 10*time.Second, false)
|
||||
|
||||
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
|
||||
sst.RestoreHTTPProbe(ss)
|
||||
sst.WaitForRunningAndReady(3, ss)
|
||||
e2esset.RestoreHTTPProbe(c, ss)
|
||||
e2esset.WaitForRunningAndReady(c, 3, ss)
|
||||
|
||||
ginkgo.By("Scale down will not halt with unhealthy stateful pod")
|
||||
sst.BreakHTTPProbe(ss)
|
||||
sst.WaitForStatusReadyReplicas(ss, 0)
|
||||
sst.WaitForRunningAndNotReady(3, ss)
|
||||
sst.UpdateReplicas(ss, 0)
|
||||
sst.ConfirmStatefulPodCount(0, ss, 10*time.Second, false)
|
||||
e2esset.BreakHTTPProbe(c, ss)
|
||||
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
|
||||
e2esset.WaitForRunningAndNotReady(c, 3, ss)
|
||||
e2esset.UpdateReplicas(c, ss, 0)
|
||||
e2esset.ConfirmStatefulPodCount(c, 0, ss, 10*time.Second, false)
|
||||
|
||||
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
|
||||
sst.RestoreHTTPProbe(ss)
|
||||
sst.Scale(ss, 0)
|
||||
sst.WaitForStatusReplicas(ss, 0)
|
||||
e2esset.RestoreHTTPProbe(c, ss)
|
||||
e2esset.Scale(c, ss, 0)
|
||||
e2esset.WaitForStatusReplicas(c, ss, 0)
|
||||
})
|
||||
|
||||
/*
|
||||
@ -732,7 +726,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)
|
||||
ss := framework.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels)
|
||||
ss := e2esset.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels)
|
||||
statefulPodContainer := &ss.Spec.Template.Spec.Containers[0]
|
||||
statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort)
|
||||
ss.Spec.Template.Spec.NodeName = node.Name
|
||||
@ -748,7 +742,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ginkgo.By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
|
||||
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
|
||||
framework.ExpectNoError(err)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulPodTimeout)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), e2esset.StatefulPodTimeout)
|
||||
defer cancel()
|
||||
// we need to get UID from pod in any state and wait until stateful set controller will remove pod at least once
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||
@ -782,23 +776,22 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
return err
|
||||
}
|
||||
if statefulPod.Status.Phase != v1.PodRunning {
|
||||
return fmt.Errorf("Pod %v is not in running phase: %v", statefulPod.Name, statefulPod.Status.Phase)
|
||||
return fmt.Errorf("pod %v is not in running phase: %v", statefulPod.Name, statefulPod.Status.Phase)
|
||||
} else if statefulPod.UID == initialStatefulPodUID {
|
||||
return fmt.Errorf("Pod %v wasn't recreated: %v == %v", statefulPod.Name, statefulPod.UID, initialStatefulPodUID)
|
||||
return fmt.Errorf("pod %v wasn't recreated: %v == %v", statefulPod.Name, statefulPod.UID, initialStatefulPodUID)
|
||||
}
|
||||
return nil
|
||||
}, framework.StatefulPodTimeout, 2*time.Second).Should(gomega.BeNil())
|
||||
}, e2esset.StatefulPodTimeout, 2*time.Second).Should(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("should have a working scale subresource", func() {
|
||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
|
||||
e2esset.SetHTTPProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
ss = e2esset.WaitForStatus(c, ss)
|
||||
|
||||
ginkgo.By("getting scale subresource")
|
||||
scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{})
|
||||
@ -826,12 +819,10 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() {
|
||||
var sst *framework.StatefulSetTester
|
||||
var appTester *clusterAppTester
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
sst = framework.NewStatefulSetTester(c)
|
||||
appTester = &clusterAppTester{tester: sst, ns: ns}
|
||||
appTester = &clusterAppTester{client: c, ns: ns}
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
@ -839,34 +830,34 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
e2elog.Logf("Deleting all statefulset in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
e2esset.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
ginkgo.It("should creating a working zookeeper cluster", func() {
|
||||
appTester.statefulPod = &zookeeperTester{tester: sst}
|
||||
appTester.statefulPod = &zookeeperTester{client: c}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
ginkgo.It("should creating a working redis cluster", func() {
|
||||
appTester.statefulPod = &redisTester{tester: sst}
|
||||
appTester.statefulPod = &redisTester{client: c}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
ginkgo.It("should creating a working mysql cluster", func() {
|
||||
appTester.statefulPod = &mysqlGaleraTester{tester: sst}
|
||||
appTester.statefulPod = &mysqlGaleraTester{client: c}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
ginkgo.It("should creating a working CockroachDB cluster", func() {
|
||||
appTester.statefulPod = &cockroachDBTester{tester: sst}
|
||||
appTester.statefulPod = &cockroachDBTester{client: c}
|
||||
appTester.run()
|
||||
})
|
||||
})
|
||||
@ -894,7 +885,7 @@ type statefulPodTester interface {
|
||||
type clusterAppTester struct {
|
||||
ns string
|
||||
statefulPod statefulPodTester
|
||||
tester *framework.StatefulSetTester
|
||||
client clientset.Interface
|
||||
}
|
||||
|
||||
func (c *clusterAppTester) run() {
|
||||
@ -910,8 +901,8 @@ func (c *clusterAppTester) run() {
|
||||
default:
|
||||
if restartCluster {
|
||||
ginkgo.By("Restarting stateful set " + ss.Name)
|
||||
c.tester.Restart(ss)
|
||||
c.tester.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
e2esset.Restart(c.client, ss)
|
||||
e2esset.WaitForRunningAndReady(c.client, *ss.Spec.Replicas, ss)
|
||||
}
|
||||
}
|
||||
|
||||
@ -923,7 +914,7 @@ func (c *clusterAppTester) run() {
|
||||
|
||||
type zookeeperTester struct {
|
||||
ss *appsv1.StatefulSet
|
||||
tester *framework.StatefulSetTester
|
||||
client clientset.Interface
|
||||
}
|
||||
|
||||
func (z *zookeeperTester) name() string {
|
||||
@ -931,7 +922,7 @@ func (z *zookeeperTester) name() string {
|
||||
}
|
||||
|
||||
func (z *zookeeperTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
z.ss = z.tester.CreateStatefulSet(zookeeperManifestPath, ns)
|
||||
z.ss = e2esset.CreateStatefulSet(z.client, zookeeperManifestPath, ns)
|
||||
return z.ss
|
||||
}
|
||||
|
||||
@ -953,7 +944,7 @@ func (z *zookeeperTester) read(statefulPodIndex int, key string) string {
|
||||
|
||||
type mysqlGaleraTester struct {
|
||||
ss *appsv1.StatefulSet
|
||||
tester *framework.StatefulSetTester
|
||||
client clientset.Interface
|
||||
}
|
||||
|
||||
func (m *mysqlGaleraTester) name() string {
|
||||
@ -969,7 +960,7 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
|
||||
}
|
||||
|
||||
func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
m.ss = m.tester.CreateStatefulSet(mysqlGaleraManifestPath, ns)
|
||||
m.ss = e2esset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns)
|
||||
|
||||
e2elog.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
|
||||
for _, cmd := range []string{
|
||||
@ -996,7 +987,7 @@ func (m *mysqlGaleraTester) read(statefulPodIndex int, key string) string {
|
||||
|
||||
type redisTester struct {
|
||||
ss *appsv1.StatefulSet
|
||||
tester *framework.StatefulSetTester
|
||||
client clientset.Interface
|
||||
}
|
||||
|
||||
func (m *redisTester) name() string {
|
||||
@ -1009,7 +1000,7 @@ func (m *redisTester) redisExec(cmd, ns, podName string) string {
|
||||
}
|
||||
|
||||
func (m *redisTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
m.ss = m.tester.CreateStatefulSet(redisManifestPath, ns)
|
||||
m.ss = e2esset.CreateStatefulSet(m.client, redisManifestPath, ns)
|
||||
return m.ss
|
||||
}
|
||||
|
||||
@ -1027,7 +1018,7 @@ func (m *redisTester) read(statefulPodIndex int, key string) string {
|
||||
|
||||
type cockroachDBTester struct {
|
||||
ss *appsv1.StatefulSet
|
||||
tester *framework.StatefulSetTester
|
||||
client clientset.Interface
|
||||
}
|
||||
|
||||
func (c *cockroachDBTester) name() string {
|
||||
@ -1040,7 +1031,7 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
|
||||
}
|
||||
|
||||
func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
c.ss = c.tester.CreateStatefulSet(cockroachDBManifestPath, ns)
|
||||
c.ss = e2esset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns)
|
||||
e2elog.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
|
||||
for _, cmd := range []string{
|
||||
"CREATE DATABASE IF NOT EXISTS foo;",
|
||||
@ -1088,17 +1079,16 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
|
||||
// This function is used by two tests to test StatefulSet rollbacks: one using
|
||||
// PVCs and one using no storage.
|
||||
func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
e2esset.SetHTTPProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
ss = e2esset.WaitForStatus(c, ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
gomega.Expect(currentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
pods := e2esset.GetPodList(c, ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
@ -1107,33 +1097,33 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
sst.SortStatefulPods(pods)
|
||||
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
|
||||
e2esset.SortStatefulPods(pods)
|
||||
err = e2esset.BreakPodHTTPProbe(ss, &pods.Items[1])
|
||||
framework.ExpectNoError(err)
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
ss, pods = e2esset.WaitForPodNotReady(c, ss, pods.Items[1].Name)
|
||||
newImage := NewWebserverImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
ss, err = e2esset.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
ss = e2esset.WaitForStatus(c, ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
|
||||
ginkgo.By("Updating Pods in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
err = sst.RestorePodHTTPProbe(ss, &pods.Items[1])
|
||||
pods = e2esset.GetPodList(c, ss)
|
||||
e2esset.SortStatefulPods(pods)
|
||||
err = e2esset.RestorePodHTTPProbe(ss, &pods.Items[1])
|
||||
framework.ExpectNoError(err)
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
ss, pods = e2esset.WaitForPodReady(c, ss, pods.Items[1].Name)
|
||||
ss, pods = e2esset.WaitForRollingUpdate(c, ss)
|
||||
gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
@ -1156,16 +1146,16 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
|
||||
}
|
||||
|
||||
ginkgo.By("Rolling back to a previous revision")
|
||||
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
|
||||
err = e2esset.BreakPodHTTPProbe(ss, &pods.Items[1])
|
||||
framework.ExpectNoError(err)
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
ss, pods = e2esset.WaitForPodNotReady(c, ss, pods.Items[1].Name)
|
||||
priorRevision := currentRevision
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
ss, err = e2esset.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
ss = e2esset.WaitForStatus(c, ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||
"Current revision should not equal update revision during roll back")
|
||||
@ -1173,11 +1163,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
|
||||
"Prior revision should equal update revision during roll back")
|
||||
|
||||
ginkgo.By("Rolling back update in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.RestorePodHTTPProbe(ss, &pods.Items[1])
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
pods = e2esset.GetPodList(c, ss)
|
||||
e2esset.SortStatefulPods(pods)
|
||||
e2esset.RestorePodHTTPProbe(ss, &pods.Items[1])
|
||||
ss, pods = e2esset.WaitForPodReady(c, ss, pods.Items[1].Name)
|
||||
ss, pods = e2esset.WaitForRollingUpdate(c, ss)
|
||||
gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(priorRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
|
||||
ss.Namespace,
|
||||
|
@ -27,7 +27,6 @@ go_library(
|
||||
"resource_usage_gatherer.go",
|
||||
"service_util.go",
|
||||
"size.go",
|
||||
"statefulset_utils.go",
|
||||
"test_context.go",
|
||||
"util.go",
|
||||
],
|
||||
@ -56,7 +55,6 @@ go_library(
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
@ -107,7 +105,6 @@ go_library(
|
||||
"//test/e2e/framework/resource:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
@ -159,6 +156,7 @@ filegroup(
|
||||
"//test/e2e/framework/replicaset:all-srcs",
|
||||
"//test/e2e/framework/resource:all-srcs",
|
||||
"//test/e2e/framework/ssh:all-srcs",
|
||||
"//test/e2e/framework/statefulset:all-srcs",
|
||||
"//test/e2e/framework/testfiles:all-srcs",
|
||||
"//test/e2e/framework/timer:all-srcs",
|
||||
"//test/e2e/framework/viperconfig:all-srcs",
|
||||
|
45
test/e2e/framework/statefulset/BUILD
Normal file
45
test/e2e/framework/statefulset/BUILD
Normal file
@ -0,0 +1,45 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"const.go",
|
||||
"fixtures.go",
|
||||
"rest.go",
|
||||
"wait.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/statefulset",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
30
test/e2e/framework/statefulset/const.go
Normal file
30
test/e2e/framework/statefulset/const.go
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statefulset
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// StatefulSetPoll is a poll interval for StatefulSet tests
|
||||
StatefulSetPoll = 10 * time.Second
|
||||
// StatefulSetTimeout is a timeout interval for StatefulSet operations
|
||||
StatefulSetTimeout = 10 * time.Minute
|
||||
// StatefulPodTimeout is a timeout for stateful pods to change state
|
||||
StatefulPodTimeout = 5 * time.Minute
|
||||
)
|
278
test/e2e/framework/statefulset/fixtures.go
Normal file
278
test/e2e/framework/statefulset/fixtures.go
Normal file
@ -0,0 +1,278 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statefulset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
e2efwk "k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// NewStatefulSet creates a new Webserver StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
|
||||
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
|
||||
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
|
||||
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []corev1.VolumeMount, podMounts []corev1.VolumeMount, labels map[string]string) *appsv1.StatefulSet {
|
||||
mounts := append(statefulPodMounts, podMounts...)
|
||||
claims := []corev1.PersistentVolumeClaim{}
|
||||
for _, m := range statefulPodMounts {
|
||||
claims = append(claims, NewStatefulSetPVC(m.Name))
|
||||
}
|
||||
|
||||
vols := []corev1.Volume{}
|
||||
for _, m := range podMounts {
|
||||
vols = append(vols, corev1.Volume{
|
||||
Name: m.Name,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: fmt.Sprintf("/tmp/%v", m.Name),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StatefulSet",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "webserver",
|
||||
Image: imageutils.GetE2EImage(imageutils.Httpd),
|
||||
VolumeMounts: mounts,
|
||||
},
|
||||
},
|
||||
Volumes: vols,
|
||||
},
|
||||
},
|
||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{Type: appsv1.RollingUpdateStatefulSetStrategyType},
|
||||
VolumeClaimTemplates: claims,
|
||||
ServiceName: governingSvcName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewStatefulSetPVC returns a PersistentVolumeClaim named name, for testing StatefulSets.
|
||||
func NewStatefulSetPVC(name string) corev1.PersistentVolumeClaim {
|
||||
return corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CreateStatefulSetService creates a Headless Service with Name name and Selector set to match labels.
|
||||
func CreateStatefulSetService(name string, labels map[string]string) *corev1.Service {
|
||||
headlessService := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Selector: labels,
|
||||
},
|
||||
}
|
||||
headlessService.Spec.Ports = []corev1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: corev1.ProtocolTCP},
|
||||
}
|
||||
headlessService.Spec.ClusterIP = "None"
|
||||
return headlessService
|
||||
}
|
||||
|
||||
// SetHTTPProbe sets the pod template's ReadinessProbe for Webserver StatefulSet containers.
|
||||
// This probe can then be controlled with BreakHTTPProbe() and RestoreHTTPProbe().
|
||||
// Note that this cannot be used together with PauseNewPods().
|
||||
func SetHTTPProbe(ss *appsv1.StatefulSet) {
|
||||
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = httpProbe
|
||||
}
|
||||
|
||||
// BreakHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in ss.
|
||||
func BreakHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
|
||||
return ExecInStatefulPods(c, ss, cmd)
|
||||
}
|
||||
|
||||
// BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
|
||||
func BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *corev1.Pod) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
|
||||
stdout, err := e2efwk.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
// RestoreHTTPProbe restores the readiness probe for Nginx StatefulSet containers in ss.
|
||||
func RestoreHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
|
||||
return ExecInStatefulPods(c, ss, cmd)
|
||||
}
|
||||
|
||||
// RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.
|
||||
func RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *corev1.Pod) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
|
||||
stdout, err := e2efwk.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
func hasPauseProbe(pod *corev1.Pod) bool {
|
||||
probe := pod.Spec.Containers[0].ReadinessProbe
|
||||
return probe != nil && reflect.DeepEqual(probe.Exec.Command, pauseProbe.Exec.Command)
|
||||
}
|
||||
|
||||
var httpProbe = &corev1.Probe{
|
||||
Handler: corev1.Handler{
|
||||
HTTPGet: &corev1.HTTPGetAction{
|
||||
Path: "/index.html",
|
||||
Port: intstr.IntOrString{IntVal: 80},
|
||||
},
|
||||
},
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
|
||||
var pauseProbe = &corev1.Probe{
|
||||
Handler: corev1.Handler{
|
||||
Exec: &corev1.ExecAction{Command: []string{"test", "-f", "/data/statefulset-continue"}},
|
||||
},
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
|
||||
type statefulPodsByOrdinal []corev1.Pod
|
||||
|
||||
func (sp statefulPodsByOrdinal) Len() int {
|
||||
return len(sp)
|
||||
}
|
||||
|
||||
func (sp statefulPodsByOrdinal) Swap(i, j int) {
|
||||
sp[i], sp[j] = sp[j], sp[i]
|
||||
}
|
||||
|
||||
func (sp statefulPodsByOrdinal) Less(i, j int) bool {
|
||||
return getStatefulPodOrdinal(&sp[i]) < getStatefulPodOrdinal(&sp[j])
|
||||
}
|
||||
|
||||
// PauseNewPods adds an always-failing ReadinessProbe to the StatefulSet PodTemplate.
|
||||
// This causes all newly-created Pods to stay Unready until they are manually resumed
|
||||
// with ResumeNextPod().
|
||||
// Note that this cannot be used together with SetHTTPProbe().
|
||||
func PauseNewPods(ss *appsv1.StatefulSet) {
|
||||
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = pauseProbe
|
||||
}
|
||||
|
||||
// ResumeNextPod allows the next Pod in the StatefulSet to continue by removing the ReadinessProbe
|
||||
// added by PauseNewPods(), if it's still there.
|
||||
// It fails the test if it finds any pods that are not in phase Running,
|
||||
// or if it finds more than one paused Pod existing at the same time.
|
||||
// This is a no-op if there are no paused pods.
|
||||
func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) {
|
||||
podList := GetPodList(c, ss)
|
||||
resumedPod := ""
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
e2elog.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
|
||||
}
|
||||
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
|
||||
continue
|
||||
}
|
||||
if resumedPod != "" {
|
||||
e2elog.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
|
||||
}
|
||||
_, err := e2efwk.RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
|
||||
e2efwk.ExpectNoError(err)
|
||||
e2elog.Logf("Resumed pod %v", pod.Name)
|
||||
resumedPod = pod.Name
|
||||
}
|
||||
}
|
||||
|
||||
// SortStatefulPods sorts pods by their ordinals
|
||||
func SortStatefulPods(pods *corev1.PodList) {
|
||||
sort.Sort(statefulPodsByOrdinal(pods.Items))
|
||||
}
|
||||
|
||||
var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$")
|
||||
|
||||
func getStatefulPodOrdinal(pod *corev1.Pod) int {
|
||||
ordinal := -1
|
||||
subMatches := statefulPodRegex.FindStringSubmatch(pod.Name)
|
||||
if len(subMatches) < 3 {
|
||||
return ordinal
|
||||
}
|
||||
if i, err := strconv.ParseInt(subMatches[2], 10, 32); err == nil {
|
||||
ordinal = int(i)
|
||||
}
|
||||
return ordinal
|
||||
}
|
351
test/e2e/framework/statefulset/rest.go
Normal file
351
test/e2e/framework/statefulset/rest.go
Normal file
@ -0,0 +1,351 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statefulset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
e2efwk "k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
)
|
||||
|
||||
// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create.
|
||||
func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.StatefulSet {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(manifestPath, file)
|
||||
}
|
||||
|
||||
e2elog.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
|
||||
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
|
||||
e2efwk.ExpectNoError(err)
|
||||
e2elog.Logf("Parsing service from %v", mkpath("service.yaml"))
|
||||
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
|
||||
e2efwk.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
||||
_, err = c.CoreV1().Services(ns).Create(svc)
|
||||
e2efwk.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
||||
_, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
e2efwk.ExpectNoError(err)
|
||||
WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
return ss
|
||||
}
|
||||
|
||||
// GetPodList gets the current Pods in ss.
|
||||
func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *corev1.PodList {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
||||
e2efwk.ExpectNoError(err)
|
||||
podList, err := c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
e2efwk.ExpectNoError(err)
|
||||
return podList
|
||||
}
|
||||
|
||||
// DeleteStatefulPodAtIndex deletes the Pod with ordinal index in ss.
|
||||
func DeleteStatefulPodAtIndex(c clientset.Interface, index int, ss *appsv1.StatefulSet) {
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
noGrace := int64(0)
|
||||
if err := c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
|
||||
e2elog.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
|
||||
func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
ssList, err := c.AppsV1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
e2efwk.ExpectNoError(err)
|
||||
|
||||
// Scale down each statefulset, then delete it completely.
|
||||
// Deleting a pvc without doing this will leak volumes, #25101.
|
||||
errList := []string{}
|
||||
for i := range ssList.Items {
|
||||
ss := &ssList.Items[i]
|
||||
var err error
|
||||
if ss, err = Scale(c, ss, 0); err != nil {
|
||||
errList = append(errList, fmt.Sprintf("%v", err))
|
||||
}
|
||||
WaitForStatusReplicas(c, ss, 0)
|
||||
e2elog.Logf("Deleting statefulset %v", ss.Name)
|
||||
// Use OrphanDependents=false so it's deleted synchronously.
|
||||
// We already made sure the Pods are gone inside Scale().
|
||||
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
|
||||
errList = append(errList, fmt.Sprintf("%v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs.
|
||||
pvNames := sets.NewString()
|
||||
// TODO: Don't assume all pvcs in the ns belong to a statefulset
|
||||
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
e2elog.Logf("WARNING: Failed to list pvcs, retrying %v", err)
|
||||
return false, nil
|
||||
}
|
||||
for _, pvc := range pvcList.Items {
|
||||
pvNames.Insert(pvc.Spec.VolumeName)
|
||||
// TODO: Double check that there are no pods referencing the pvc
|
||||
e2elog.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
|
||||
if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pvcPollErr != nil {
|
||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
|
||||
}
|
||||
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
e2elog.Logf("WARNING: Failed to list pvs, retrying %v", err)
|
||||
return false, nil
|
||||
}
|
||||
waitingFor := []string{}
|
||||
for _, pv := range pvList.Items {
|
||||
if pvNames.Has(pv.Name) {
|
||||
waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
|
||||
}
|
||||
}
|
||||
if len(waitingFor) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
|
||||
return false, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
|
||||
}
|
||||
if len(errList) != 0 {
|
||||
e2efwk.ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateStatefulSetWithRetries updates statfulset template with retries.
|
||||
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1.StatefulSet, err error) {
|
||||
statefulSets := c.AppsV1().StatefulSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(statefulSet)
|
||||
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
|
||||
e2elog.Logf("Updating stateful set %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr)
|
||||
}
|
||||
return statefulSet, pollErr
|
||||
}
|
||||
|
||||
// Scale scales ss to count replicas.
|
||||
func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1.StatefulSet, error) {
|
||||
name := ss.Name
|
||||
ns := ss.Namespace
|
||||
|
||||
e2elog.Logf("Scaling statefulset %s to %d", name, count)
|
||||
ss = update(c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
|
||||
var statefulPodList *corev1.PodList
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
statefulPodList = GetPodList(c, ss)
|
||||
if int32(len(statefulPodList.Items)) == count {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
unhealthy := []string{}
|
||||
for _, statefulPod := range statefulPodList.Items {
|
||||
delTs, phase, readiness := statefulPod.DeletionTimestamp, statefulPod.Status.Phase, podutil.IsPodReady(&statefulPod)
|
||||
if delTs != nil || phase != corev1.PodRunning || !readiness {
|
||||
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", statefulPod.Name, delTs, phase, readiness))
|
||||
}
|
||||
}
|
||||
return ss, fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, StatefulSetTimeout, unhealthy)
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// UpdateReplicas updates the replicas of ss to count.
|
||||
func UpdateReplicas(c clientset.Interface, ss *appsv1.StatefulSet, count int32) {
|
||||
update(c, ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
}
|
||||
|
||||
// Restart scales ss to 0 and then back to its previous number of replicas.
|
||||
func Restart(c clientset.Interface, ss *appsv1.StatefulSet) {
|
||||
oldReplicas := *(ss.Spec.Replicas)
|
||||
ss, err := Scale(c, ss, 0)
|
||||
e2efwk.ExpectNoError(err)
|
||||
// Wait for controller to report the desired number of Pods.
|
||||
// This way we know the controller has observed all Pod deletions
|
||||
// before we scale it back up.
|
||||
WaitForStatusReplicas(c, ss, 0)
|
||||
update(c, ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
|
||||
}
|
||||
|
||||
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count, waiting up to timeout for ss to
|
||||
// to scale to count.
|
||||
func ConfirmStatefulPodCount(c clientset.Interface, count int, ss *appsv1.StatefulSet, timeout time.Duration, hard bool) {
|
||||
start := time.Now()
|
||||
deadline := start.Add(timeout)
|
||||
for t := time.Now(); t.Before(deadline); t = time.Now() {
|
||||
podList := GetPodList(c, ss)
|
||||
statefulPodCount := len(podList.Items)
|
||||
if statefulPodCount != count {
|
||||
e2epod.LogPodStates(podList.Items)
|
||||
if hard {
|
||||
e2elog.Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
|
||||
} else {
|
||||
e2elog.Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
e2elog.Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatefulSet gets the StatefulSet named name in namespace.
|
||||
func GetStatefulSet(c clientset.Interface, namespace, name string) *appsv1.StatefulSet {
|
||||
ss, err := c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// CheckHostname verifies that all Pods in ss have the correct Hostname. If the returned error is not nil than verification failed.
|
||||
func CheckHostname(c clientset.Interface, ss *appsv1.StatefulSet) error {
|
||||
cmd := "printf $(hostname)"
|
||||
podList := GetPodList(c, ss)
|
||||
for _, statefulPod := range podList.Items {
|
||||
hostname, err := e2efwk.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hostname != statefulPod.Name {
|
||||
return fmt.Errorf("unexpected hostname (%s) and stateful pod name (%s) not equal", hostname, statefulPod.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckMount checks that the mount at mountPath is valid for all Pods in ss.
|
||||
func CheckMount(c clientset.Interface, ss *appsv1.StatefulSet, mountPath string) error {
|
||||
for _, cmd := range []string{
|
||||
// Print inode, size etc
|
||||
fmt.Sprintf("ls -idlh %v", mountPath),
|
||||
// Print subdirs
|
||||
fmt.Sprintf("find %v", mountPath),
|
||||
// Try writing
|
||||
fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))),
|
||||
} {
|
||||
if err := ExecInStatefulPods(c, ss, cmd); err != nil {
|
||||
return fmt.Errorf("failed to execute %v, error: %v", cmd, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
|
||||
func CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error {
|
||||
e2elog.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
|
||||
|
||||
if expectedServiceName != ss.Spec.ServiceName {
|
||||
return fmt.Errorf("wrong service name governing statefulset. Expected %s got %s",
|
||||
expectedServiceName, ss.Spec.ServiceName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecInStatefulPods executes cmd in all Pods in ss. If a error occurs it is returned and cmd is not execute in any subsequent Pods.
|
||||
func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd string) error {
|
||||
podList := GetPodList(c, ss)
|
||||
for _, statefulPod := range podList.Items {
|
||||
stdout, err := e2efwk.RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
e2elog.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type updateStatefulSetFunc func(*appsv1.StatefulSet)
|
||||
|
||||
// VerifyStatefulPodFunc is a func that examines a StatefulSetPod.
|
||||
type VerifyStatefulPodFunc func(*corev1.Pod)
|
||||
|
||||
// VerifyPodAtIndex applies a visitor pattern to the Pod at index in ss. verify is applied to the Pod to "visit" it.
|
||||
func VerifyPodAtIndex(c clientset.Interface, index int, ss *appsv1.StatefulSet, verify VerifyStatefulPodFunc) {
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
pod, err := c.CoreV1().Pods(ss.Namespace).Get(name, metav1.GetOptions{})
|
||||
e2efwk.ExpectNoError(err, fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name))
|
||||
verify(pod)
|
||||
}
|
||||
|
||||
// udpate updates a statefulset, and it is only used within rest.go
|
||||
func update(c clientset.Interface, ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet {
|
||||
for i := 0; i < 3; i++ {
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get statefulset %q: %v", name, err)
|
||||
}
|
||||
update(ss)
|
||||
ss, err = c.AppsV1().StatefulSets(ns).Update(ss)
|
||||
if err == nil {
|
||||
return ss
|
||||
}
|
||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
||||
e2elog.Failf("failed to update statefulset %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
e2elog.Failf("too many retries draining statefulset %q", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getStatefulSetPodNameAtIndex gets formated pod name given index.
|
||||
func getStatefulSetPodNameAtIndex(index int, ss *appsv1.StatefulSet) string {
|
||||
// TODO: we won't use "-index" as the name strategy forever,
|
||||
// pull the name out from an identity mapper.
|
||||
return fmt.Sprintf("%v-%v", ss.Name, index)
|
||||
}
|
285
test/e2e/framework/statefulset/wait.go
Normal file
285
test/e2e/framework/statefulset/wait.go
Normal file
@ -0,0 +1,285 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statefulset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
// WaitForPartitionedRollingUpdate waits for all Pods in set to exist and have the correct revision. set must have
|
||||
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
|
||||
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
|
||||
// at its update revision.
|
||||
func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *corev1.PodList) {
|
||||
var pods *corev1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
}
|
||||
if set.Spec.UpdateStrategy.RollingUpdate == nil || set.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
|
||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with nil RollingUpdate or nil Partition",
|
||||
set.Namespace,
|
||||
set.Name)
|
||||
}
|
||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
|
||||
if len(pods.Items) < int(*set.Spec.Replicas) {
|
||||
return false, nil
|
||||
}
|
||||
if partition <= 0 && set.Status.UpdateRevision != set.Status.CurrentRevision {
|
||||
e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
)
|
||||
SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return set, pods
|
||||
}
|
||||
|
||||
// WaitForRunning waits for numPodsRunning in ss to be Running and for the first
|
||||
// numPodsReady ordinals to be Ready.
|
||||
func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, ss *appsv1.StatefulSet) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
podList := GetPodList(c, ss)
|
||||
SortStatefulPods(podList)
|
||||
if int32(len(podList.Items)) < numPodsRunning {
|
||||
e2elog.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning)
|
||||
return false, nil
|
||||
}
|
||||
if int32(len(podList.Items)) > numPodsRunning {
|
||||
return false, fmt.Errorf("too many pods scheduled, expected %d got %d", numPodsRunning, len(podList.Items))
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady)
|
||||
isReady := podutil.IsPodReady(&p)
|
||||
desiredReadiness := shouldBeReady == isReady
|
||||
e2elog.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, corev1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
|
||||
if p.Status.Phase != corev1.PodRunning || !desiredReadiness {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for pods to enter running: %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
|
||||
func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *corev1.PodList) (bool, error)) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
podList := GetPodList(c, ssGet)
|
||||
return until(ssGet, podList)
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for state update: %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
|
||||
// The returned StatefulSet contains such a StatefulSetStatus
|
||||
func WaitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet {
|
||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *corev1.PodList) (bool, error) {
|
||||
if set2.Status.ObservedGeneration >= set.Generation {
|
||||
set = set2
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return set
|
||||
}
|
||||
|
||||
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready.
|
||||
func WaitForRunningAndReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) {
|
||||
WaitForRunning(c, numStatefulPods, numStatefulPods, ss)
|
||||
}
|
||||
|
||||
// WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition.
|
||||
func WaitForPodReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *corev1.PodList) {
|
||||
var pods *corev1.PodList
|
||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Name == podName {
|
||||
return podutil.IsPodReady(&pods.Items[i]), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return set, pods
|
||||
}
|
||||
|
||||
// WaitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition.
|
||||
func WaitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *corev1.PodList) {
|
||||
var pods *corev1.PodList
|
||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Name == podName {
|
||||
return !podutil.IsPodReady(&pods.Items[i]), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return set, pods
|
||||
}
|
||||
|
||||
// WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
|
||||
// complete. set must have a RollingUpdateStatefulSetStrategyType.
|
||||
func WaitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *corev1.PodList) {
|
||||
var pods *corev1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
}
|
||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
if len(pods.Items) < int(*set.Spec.Replicas) {
|
||||
return false, nil
|
||||
}
|
||||
if set.Status.UpdateRevision != set.Status.CurrentRevision {
|
||||
e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
)
|
||||
SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return set, pods
|
||||
}
|
||||
|
||||
// WaitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
|
||||
func WaitForRunningAndNotReady(c clientset.Interface, numStatefulPods int32, ss *appsv1.StatefulSet) {
|
||||
WaitForRunning(c, numStatefulPods, 0, ss)
|
||||
}
|
||||
|
||||
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
|
||||
func WaitForStatusReadyReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) {
|
||||
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.ReadyReplicas != expectedReplicas {
|
||||
e2elog.Logf("Waiting for stateful set status.readyReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.ReadyReplicas)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for stateful set status.readyReplicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
|
||||
func WaitForStatusReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) {
|
||||
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.Replicas != expectedReplicas {
|
||||
e2elog.Logf("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for stateful set status.replicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// Saturate waits for all Pods in ss to become Running and Ready.
|
||||
func Saturate(c clientset.Interface, ss *appsv1.StatefulSet) {
|
||||
var i int32
|
||||
for i = 0; i < *(ss.Spec.Replicas); i++ {
|
||||
e2elog.Logf("Waiting for stateful pod at index %v to enter Running", i)
|
||||
WaitForRunning(c, i+1, i, ss)
|
||||
e2elog.Logf("Resuming stateful pod at index %v", i)
|
||||
ResumeNextPod(c, ss)
|
||||
}
|
||||
}
|
@ -1,895 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
// StatefulSetPoll is a poll interval for StatefulSet tests
|
||||
StatefulSetPoll = 10 * time.Second
|
||||
// StatefulSetTimeout is a timeout interval for StatefulSet operations
|
||||
StatefulSetTimeout = 10 * time.Minute
|
||||
// StatefulPodTimeout is a timeout for stateful pods to change state
|
||||
StatefulPodTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// CreateStatefulSetService creates a Headless Service with Name name and Selector set to match labels.
|
||||
func CreateStatefulSetService(name string, labels map[string]string) *v1.Service {
|
||||
headlessService := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: labels,
|
||||
},
|
||||
}
|
||||
headlessService.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||
}
|
||||
headlessService.Spec.ClusterIP = "None"
|
||||
return headlessService
|
||||
}
|
||||
|
||||
// StatefulSetTester is a struct that contains utility methods for testing StatefulSet related functionality. It uses a
|
||||
// clientset.Interface to communicate with the API server.
|
||||
type StatefulSetTester struct {
|
||||
c clientset.Interface
|
||||
}
|
||||
|
||||
// NewStatefulSetTester creates a StatefulSetTester that uses c to interact with the API server.
|
||||
func NewStatefulSetTester(c clientset.Interface) *StatefulSetTester {
|
||||
return &StatefulSetTester{c}
|
||||
}
|
||||
|
||||
// GetStatefulSet gets the StatefulSet named name in namespace.
|
||||
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1.StatefulSet {
|
||||
ss, err := s.c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create.
|
||||
func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *appsv1.StatefulSet {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(manifestPath, file)
|
||||
}
|
||||
|
||||
e2elog.Logf("Parsing statefulset from %v", mkpath("statefulset.yaml"))
|
||||
ss, err := manifest.StatefulSetFromManifest(mkpath("statefulset.yaml"), ns)
|
||||
ExpectNoError(err)
|
||||
e2elog.Logf("Parsing service from %v", mkpath("service.yaml"))
|
||||
svc, err := manifest.SvcFromManifest(mkpath("service.yaml"))
|
||||
ExpectNoError(err)
|
||||
|
||||
e2elog.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
||||
_, err = s.c.CoreV1().Services(ns).Create(svc)
|
||||
ExpectNoError(err)
|
||||
|
||||
e2elog.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
||||
_, err = s.c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
ExpectNoError(err)
|
||||
s.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
return ss
|
||||
}
|
||||
|
||||
// CheckMount checks that the mount at mountPath is valid for all Pods in ss.
|
||||
func (s *StatefulSetTester) CheckMount(ss *appsv1.StatefulSet, mountPath string) error {
|
||||
for _, cmd := range []string{
|
||||
// Print inode, size etc
|
||||
fmt.Sprintf("ls -idlh %v", mountPath),
|
||||
// Print subdirs
|
||||
fmt.Sprintf("find %v", mountPath),
|
||||
// Try writing
|
||||
fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))),
|
||||
} {
|
||||
if err := s.ExecInStatefulPods(ss, cmd); err != nil {
|
||||
return fmt.Errorf("failed to execute %v, error: %v", cmd, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecInStatefulPods executes cmd in all Pods in ss. If a error occurs it is returned and cmd is not execute in any subsequent Pods.
|
||||
func (s *StatefulSetTester) ExecInStatefulPods(ss *appsv1.StatefulSet, cmd string) error {
|
||||
podList := s.GetPodList(ss)
|
||||
for _, statefulPod := range podList.Items {
|
||||
stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
e2elog.Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckHostname verifies that all Pods in ss have the correct Hostname. If the returned error is not nil than verification failed.
|
||||
func (s *StatefulSetTester) CheckHostname(ss *appsv1.StatefulSet) error {
|
||||
cmd := "printf $(hostname)"
|
||||
podList := s.GetPodList(ss)
|
||||
for _, statefulPod := range podList.Items {
|
||||
hostname, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hostname != statefulPod.Name {
|
||||
return fmt.Errorf("unexpected hostname (%s) and stateful pod name (%s) not equal", hostname, statefulPod.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Saturate waits for all Pods in ss to become Running and Ready.
|
||||
func (s *StatefulSetTester) Saturate(ss *appsv1.StatefulSet) {
|
||||
var i int32
|
||||
for i = 0; i < *(ss.Spec.Replicas); i++ {
|
||||
e2elog.Logf("Waiting for stateful pod at index %v to enter Running", i)
|
||||
s.WaitForRunning(i+1, i, ss)
|
||||
e2elog.Logf("Resuming stateful pod at index %v", i)
|
||||
s.ResumeNextPod(ss)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteStatefulPodAtIndex deletes the Pod with ordinal index in ss.
|
||||
func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1.StatefulSet) {
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
noGrace := int64(0)
|
||||
if err := s.c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
|
||||
e2elog.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyStatefulPodFunc is a func that examines a StatefulSetPod.
|
||||
type VerifyStatefulPodFunc func(*v1.Pod)
|
||||
|
||||
// VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is applied to the Pod to "visit" it.
|
||||
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *appsv1.StatefulSet, verify VerifyStatefulPodFunc) {
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(name, metav1.GetOptions{})
|
||||
ExpectNoError(err, fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name))
|
||||
verify(pod)
|
||||
}
|
||||
|
||||
func getStatefulSetPodNameAtIndex(index int, ss *appsv1.StatefulSet) string {
|
||||
// TODO: we won't use "-index" as the name strategy forever,
|
||||
// pull the name out from an identity mapper.
|
||||
return fmt.Sprintf("%v-%v", ss.Name, index)
|
||||
}
|
||||
|
||||
// Scale scales ss to count replicas.
|
||||
func (s *StatefulSetTester) Scale(ss *appsv1.StatefulSet, count int32) (*appsv1.StatefulSet, error) {
|
||||
name := ss.Name
|
||||
ns := ss.Namespace
|
||||
|
||||
e2elog.Logf("Scaling statefulset %s to %d", name, count)
|
||||
ss = s.update(ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
|
||||
var statefulPodList *v1.PodList
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
statefulPodList = s.GetPodList(ss)
|
||||
if int32(len(statefulPodList.Items)) == count {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
unhealthy := []string{}
|
||||
for _, statefulPod := range statefulPodList.Items {
|
||||
delTs, phase, readiness := statefulPod.DeletionTimestamp, statefulPod.Status.Phase, podutil.IsPodReady(&statefulPod)
|
||||
if delTs != nil || phase != v1.PodRunning || !readiness {
|
||||
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", statefulPod.Name, delTs, phase, readiness))
|
||||
}
|
||||
}
|
||||
return ss, fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, StatefulSetTimeout, unhealthy)
|
||||
}
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// UpdateReplicas updates the replicas of ss to count.
|
||||
func (s *StatefulSetTester) UpdateReplicas(ss *appsv1.StatefulSet, count int32) {
|
||||
s.update(ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
}
|
||||
|
||||
// Restart scales ss to 0 and then back to its previous number of replicas.
|
||||
func (s *StatefulSetTester) Restart(ss *appsv1.StatefulSet) {
|
||||
oldReplicas := *(ss.Spec.Replicas)
|
||||
ss, err := s.Scale(ss, 0)
|
||||
ExpectNoError(err)
|
||||
// Wait for controller to report the desired number of Pods.
|
||||
// This way we know the controller has observed all Pod deletions
|
||||
// before we scale it back up.
|
||||
s.WaitForStatusReplicas(ss, 0)
|
||||
s.update(ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
|
||||
}
|
||||
|
||||
func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet {
|
||||
for i := 0; i < 3; i++ {
|
||||
ss, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to get statefulset %q: %v", name, err)
|
||||
}
|
||||
update(ss)
|
||||
ss, err = s.c.AppsV1().StatefulSets(ns).Update(ss)
|
||||
if err == nil {
|
||||
return ss
|
||||
}
|
||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
||||
e2elog.Failf("failed to update statefulset %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
e2elog.Failf("too many retries draining statefulset %q", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPodList gets the current Pods in ss.
|
||||
func (s *StatefulSetTester) GetPodList(ss *appsv1.StatefulSet) *v1.PodList {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
||||
ExpectNoError(err)
|
||||
podList, err := s.c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
ExpectNoError(err)
|
||||
return podList
|
||||
}
|
||||
|
||||
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count waiting up to timeout for ss to
|
||||
// to scale to count.
|
||||
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1.StatefulSet, timeout time.Duration, hard bool) {
|
||||
start := time.Now()
|
||||
deadline := start.Add(timeout)
|
||||
for t := time.Now(); t.Before(deadline); t = time.Now() {
|
||||
podList := s.GetPodList(ss)
|
||||
statefulPodCount := len(podList.Items)
|
||||
if statefulPodCount != count {
|
||||
e2epod.LogPodStates(podList.Items)
|
||||
if hard {
|
||||
e2elog.Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
|
||||
} else {
|
||||
e2elog.Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
e2elog.Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForRunning waits for numPodsRunning in ss to be Running and for the first
|
||||
// numPodsReady ordinals to be Ready.
|
||||
func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *appsv1.StatefulSet) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
podList := s.GetPodList(ss)
|
||||
s.SortStatefulPods(podList)
|
||||
if int32(len(podList.Items)) < numPodsRunning {
|
||||
e2elog.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning)
|
||||
return false, nil
|
||||
}
|
||||
if int32(len(podList.Items)) > numPodsRunning {
|
||||
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPodsRunning, len(podList.Items))
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady)
|
||||
isReady := podutil.IsPodReady(&p)
|
||||
desiredReadiness := shouldBeReady == isReady
|
||||
e2elog.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
|
||||
if p.Status.Phase != v1.PodRunning || !desiredReadiness {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for pods to enter running: %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
|
||||
func (s *StatefulSetTester) WaitForState(ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
podList := s.GetPodList(ssGet)
|
||||
return until(ssGet, podList)
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for state update: %v", pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
|
||||
// The returned StatefulSet contains such a StatefulSetStatus
|
||||
func (s *StatefulSetTester) WaitForStatus(set *appsv1.StatefulSet) *appsv1.StatefulSet {
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
|
||||
if set2.Status.ObservedGeneration >= set.Generation {
|
||||
set = set2
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return set
|
||||
}
|
||||
|
||||
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready.
|
||||
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *appsv1.StatefulSet) {
|
||||
s.WaitForRunning(numStatefulPods, numStatefulPods, ss)
|
||||
}
|
||||
|
||||
// WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition.
|
||||
func (s *StatefulSetTester) WaitForPodReady(set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Name == podName {
|
||||
return podutil.IsPodReady(&pods.Items[i]), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return set, pods
|
||||
|
||||
}
|
||||
|
||||
// WaitForPodNotReady waist for the Pod named podName in set to exist and to not have a Ready condition.
|
||||
func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Name == podName {
|
||||
return !podutil.IsPodReady(&pods.Items[i]), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return set, pods
|
||||
|
||||
}
|
||||
|
||||
// WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
|
||||
// complete. set must have a RollingUpdateStatefulSetStrategyType.
|
||||
func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
}
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
if len(pods.Items) < int(*set.Spec.Replicas) {
|
||||
return false, nil
|
||||
}
|
||||
if set.Status.UpdateRevision != set.Status.CurrentRevision {
|
||||
e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
)
|
||||
s.SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return set, pods
|
||||
}
|
||||
|
||||
// WaitForPartitionedRollingUpdate waits for all Pods in set to exist and have the correct revision. set must have
|
||||
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
|
||||
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
|
||||
// at its update revision.
|
||||
func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
}
|
||||
if set.Spec.UpdateStrategy.RollingUpdate == nil || set.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
|
||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with nil RollingUpdate or nil Partition",
|
||||
set.Namespace,
|
||||
set.Name)
|
||||
}
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
|
||||
if len(pods.Items) < int(*set.Spec.Replicas) {
|
||||
return false, nil
|
||||
}
|
||||
if partition <= 0 && set.Status.UpdateRevision != set.Status.CurrentRevision {
|
||||
e2elog.Logf("Waiting for StatefulSet %s/%s to complete update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
)
|
||||
s.SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return set, pods
|
||||
}
|
||||
|
||||
// WaitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
|
||||
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *appsv1.StatefulSet) {
|
||||
s.WaitForRunning(numStatefulPods, 0, ss)
|
||||
}
|
||||
|
||||
var httpProbe = &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/index.html",
|
||||
Port: intstr.IntOrString{IntVal: 80},
|
||||
},
|
||||
},
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
|
||||
// SetHTTPProbe sets the pod template's ReadinessProbe for Webserver StatefulSet containers.
|
||||
// This probe can then be controlled with BreakHTTPProbe() and RestoreHTTPProbe().
|
||||
// Note that this cannot be used together with PauseNewPods().
|
||||
func (s *StatefulSetTester) SetHTTPProbe(ss *appsv1.StatefulSet) {
|
||||
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = httpProbe
|
||||
}
|
||||
|
||||
// BreakHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in ss.
|
||||
func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1.StatefulSet) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
|
||||
return s.ExecInStatefulPods(ss, cmd)
|
||||
}
|
||||
|
||||
// BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
|
||||
func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /usr/local/apache2/htdocs%v /tmp/ || true", path)
|
||||
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
// RestoreHTTPProbe restores the readiness probe for Nginx StatefulSet containers in ss.
|
||||
func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1.StatefulSet) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
|
||||
return s.ExecInStatefulPods(ss, cmd)
|
||||
}
|
||||
|
||||
// RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.
|
||||
func (s *StatefulSetTester) RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
}
|
||||
// Ignore 'mv' errors to make this idempotent.
|
||||
cmd := fmt.Sprintf("mv -v /tmp%v /usr/local/apache2/htdocs/ || true", path)
|
||||
stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
e2elog.Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
var pauseProbe = &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
Exec: &v1.ExecAction{Command: []string{"test", "-f", "/data/statefulset-continue"}},
|
||||
},
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
|
||||
func hasPauseProbe(pod *v1.Pod) bool {
|
||||
probe := pod.Spec.Containers[0].ReadinessProbe
|
||||
return probe != nil && reflect.DeepEqual(probe.Exec.Command, pauseProbe.Exec.Command)
|
||||
}
|
||||
|
||||
// PauseNewPods adds an always-failing ReadinessProbe to the StatefulSet PodTemplate.
|
||||
// This causes all newly-created Pods to stay Unready until they are manually resumed
|
||||
// with ResumeNextPod().
|
||||
// Note that this cannot be used together with SetHTTPProbe().
|
||||
func (s *StatefulSetTester) PauseNewPods(ss *appsv1.StatefulSet) {
|
||||
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = pauseProbe
|
||||
}
|
||||
|
||||
// ResumeNextPod allows the next Pod in the StatefulSet to continue by removing the ReadinessProbe
|
||||
// added by PauseNewPods(), if it's still there.
|
||||
// It fails the test if it finds any pods that are not in phase Running,
|
||||
// or if it finds more than one paused Pod existing at the same time.
|
||||
// This is a no-op if there are no paused pods.
|
||||
func (s *StatefulSetTester) ResumeNextPod(ss *appsv1.StatefulSet) {
|
||||
podList := s.GetPodList(ss)
|
||||
resumedPod := ""
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
e2elog.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
|
||||
}
|
||||
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
|
||||
continue
|
||||
}
|
||||
if resumedPod != "" {
|
||||
e2elog.Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
|
||||
}
|
||||
_, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
|
||||
ExpectNoError(err)
|
||||
e2elog.Logf("Resumed pod %v", pod.Name)
|
||||
resumedPod = pod.Name
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
|
||||
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1.StatefulSet, expectedReplicas int32) {
|
||||
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.ReadyReplicas != expectedReplicas {
|
||||
e2elog.Logf("Waiting for stateful set status.readyReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.ReadyReplicas)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for stateful set status.readyReplicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
|
||||
func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1.StatefulSet, expectedReplicas int32) {
|
||||
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.Replicas != expectedReplicas {
|
||||
e2elog.Logf("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
e2elog.Failf("Failed waiting for stateful set status.replicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
|
||||
func (s *StatefulSetTester) CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error {
|
||||
e2elog.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
|
||||
|
||||
if expectedServiceName != ss.Spec.ServiceName {
|
||||
return fmt.Errorf("Wrong service name governing statefulset. Expected %s got %s",
|
||||
expectedServiceName, ss.Spec.ServiceName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SortStatefulPods sorts pods by their ordinals
|
||||
func (s *StatefulSetTester) SortStatefulPods(pods *v1.PodList) {
|
||||
sort.Sort(statefulPodsByOrdinal(pods.Items))
|
||||
}
|
||||
|
||||
// DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns.
|
||||
func DeleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
sst := &StatefulSetTester{c: c}
|
||||
ssList, err := c.AppsV1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
ExpectNoError(err)
|
||||
|
||||
// Scale down each statefulset, then delete it completely.
|
||||
// Deleting a pvc without doing this will leak volumes, #25101.
|
||||
errList := []string{}
|
||||
for i := range ssList.Items {
|
||||
ss := &ssList.Items[i]
|
||||
var err error
|
||||
if ss, err = sst.Scale(ss, 0); err != nil {
|
||||
errList = append(errList, fmt.Sprintf("%v", err))
|
||||
}
|
||||
sst.WaitForStatusReplicas(ss, 0)
|
||||
e2elog.Logf("Deleting statefulset %v", ss.Name)
|
||||
// Use OrphanDependents=false so it's deleted synchronously.
|
||||
// We already made sure the Pods are gone inside Scale().
|
||||
if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil {
|
||||
errList = append(errList, fmt.Sprintf("%v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs.
|
||||
pvNames := sets.NewString()
|
||||
// TODO: Don't assume all pvcs in the ns belong to a statefulset
|
||||
pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
e2elog.Logf("WARNING: Failed to list pvcs, retrying %v", err)
|
||||
return false, nil
|
||||
}
|
||||
for _, pvc := range pvcList.Items {
|
||||
pvNames.Insert(pvc.Spec.VolumeName)
|
||||
// TODO: Double check that there are no pods referencing the pvc
|
||||
e2elog.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
|
||||
if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pvcPollErr != nil {
|
||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
|
||||
}
|
||||
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
e2elog.Logf("WARNING: Failed to list pvs, retrying %v", err)
|
||||
return false, nil
|
||||
}
|
||||
waitingFor := []string{}
|
||||
for _, pv := range pvList.Items {
|
||||
if pvNames.Has(pv.Name) {
|
||||
waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
|
||||
}
|
||||
}
|
||||
if len(waitingFor) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
e2elog.Logf("Still waiting for pvs of statefulset to disappear:\n%v", strings.Join(waitingFor, "\n"))
|
||||
return false, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
|
||||
}
|
||||
if len(errList) != 0 {
|
||||
ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
|
||||
}
|
||||
}
|
||||
|
||||
// NewStatefulSetPVC returns a PersistentVolumeClaim named name, for testing StatefulSets.
|
||||
func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim {
|
||||
return v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewStatefulSet creates a new Webserver StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
|
||||
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
|
||||
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
|
||||
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1.StatefulSet {
|
||||
mounts := append(statefulPodMounts, podMounts...)
|
||||
claims := []v1.PersistentVolumeClaim{}
|
||||
for _, m := range statefulPodMounts {
|
||||
claims = append(claims, NewStatefulSetPVC(m.Name))
|
||||
}
|
||||
|
||||
vols := []v1.Volume{}
|
||||
for _, m := range podMounts {
|
||||
vols = append(vols, v1.Volume{
|
||||
Name: m.Name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: fmt.Sprintf("/tmp/%v", m.Name),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StatefulSet",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "webserver",
|
||||
Image: imageutils.GetE2EImage(imageutils.Httpd),
|
||||
VolumeMounts: mounts,
|
||||
SecurityContext: &v1.SecurityContext{},
|
||||
},
|
||||
},
|
||||
Volumes: vols,
|
||||
},
|
||||
},
|
||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{Type: appsv1.RollingUpdateStatefulSetStrategyType},
|
||||
VolumeClaimTemplates: claims,
|
||||
ServiceName: governingSvcName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewStatefulSetScale creates a new StatefulSet scale subresource and returns it
|
||||
func NewStatefulSetScale(ss *appsv1.StatefulSet) *appsv1beta2.Scale {
|
||||
return &appsv1beta2.Scale{
|
||||
// TODO: Create a variant of ObjectMeta type that only contains the fields below.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ss.Name,
|
||||
Namespace: ss.Namespace,
|
||||
},
|
||||
Spec: appsv1beta2.ScaleSpec{
|
||||
Replicas: *(ss.Spec.Replicas),
|
||||
},
|
||||
Status: appsv1beta2.ScaleStatus{
|
||||
Replicas: ss.Status.Replicas,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$")
|
||||
|
||||
func getStatefulPodOrdinal(pod *v1.Pod) int {
|
||||
ordinal := -1
|
||||
subMatches := statefulPodRegex.FindStringSubmatch(pod.Name)
|
||||
if len(subMatches) < 3 {
|
||||
return ordinal
|
||||
}
|
||||
if i, err := strconv.ParseInt(subMatches[2], 10, 32); err == nil {
|
||||
ordinal = int(i)
|
||||
}
|
||||
return ordinal
|
||||
}
|
||||
|
||||
type statefulPodsByOrdinal []v1.Pod
|
||||
|
||||
func (sp statefulPodsByOrdinal) Len() int {
|
||||
return len(sp)
|
||||
}
|
||||
|
||||
func (sp statefulPodsByOrdinal) Swap(i, j int) {
|
||||
sp[i], sp[j] = sp[j], sp[i]
|
||||
}
|
||||
|
||||
func (sp statefulPodsByOrdinal) Less(i, j int) bool {
|
||||
return getStatefulPodOrdinal(&sp[i]) < getStatefulPodOrdinal(&sp[j])
|
||||
}
|
||||
|
||||
type updateStatefulSetFunc func(*appsv1.StatefulSet)
|
||||
|
||||
// UpdateStatefulSetWithRetries updates statfulset template with retries.
|
||||
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1.StatefulSet, err error) {
|
||||
statefulSets := c.AppsV1().StatefulSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(statefulSet)
|
||||
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
|
||||
e2elog.Logf("Updating stateful set %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr)
|
||||
}
|
||||
return statefulSet, pollErr
|
||||
}
|
@ -72,6 +72,7 @@ go_library(
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/statefulset:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/drivers:go_default_library",
|
||||
|
@ -41,6 +41,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -51,7 +52,6 @@ type localTestConfig struct {
|
||||
node0 *v1.Node
|
||||
client clientset.Interface
|
||||
scName string
|
||||
ssTester *framework.StatefulSetTester
|
||||
discoveryDir string
|
||||
hostExec utils.HostExec
|
||||
ltrMgr utils.LocalTestResourceManager
|
||||
@ -164,7 +164,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
// Choose the first node
|
||||
node0 := &nodes.Items[0]
|
||||
|
||||
ssTester := framework.NewStatefulSetTester(f.ClientSet)
|
||||
hostExec := utils.NewHostExec(f)
|
||||
ltrMgr := utils.NewLocalResourceManager("local-volume-test", hostExec, hostBase)
|
||||
config = &localTestConfig{
|
||||
@ -173,7 +172,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
nodes: nodes.Items[:maxLen],
|
||||
node0: node0,
|
||||
scName: scName,
|
||||
ssTester: ssTester,
|
||||
discoveryDir: filepath.Join(hostBase, f.Namespace.Name),
|
||||
hostExec: hostExec,
|
||||
ltrMgr: ltrMgr,
|
||||
@ -1153,12 +1151,12 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in
|
||||
ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(spec)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
config.ssTester.WaitForRunningAndReady(ssReplicas, ss)
|
||||
e2esset.WaitForRunningAndReady(config.client, ssReplicas, ss)
|
||||
return ss
|
||||
}
|
||||
|
||||
func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti bool) {
|
||||
pods := config.ssTester.GetPodList(ss)
|
||||
pods := e2esset.GetPodList(config.client, ss)
|
||||
|
||||
nodes := sets.NewString()
|
||||
for _, pod := range pods.Items {
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -311,7 +312,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
ginkgo.Context("pods that use multiple volumes", func() {
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
e2esset.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
ginkgo.It("should be reschedulable [Slow]", func() {
|
||||
@ -319,7 +320,6 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure")
|
||||
|
||||
numVols := 4
|
||||
ssTester := framework.NewStatefulSetTester(c)
|
||||
|
||||
ginkgo.By("Creating a StatefulSet pod to initialize data")
|
||||
writeCmd := "true"
|
||||
@ -352,13 +352,13 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(spec)
|
||||
framework.ExpectNoError(err)
|
||||
ssTester.WaitForRunningAndReady(1, ss)
|
||||
e2esset.WaitForRunningAndReady(c, 1, ss)
|
||||
|
||||
ginkgo.By("Deleting the StatefulSet but not the volumes")
|
||||
// Scale down to 0 first so that the Delete is quick
|
||||
ss, err = ssTester.Scale(ss, 0)
|
||||
ss, err = e2esset.Scale(c, ss, 0)
|
||||
framework.ExpectNoError(err)
|
||||
ssTester.WaitForStatusReplicas(ss, 0)
|
||||
e2esset.WaitForStatusReplicas(c, ss, 0)
|
||||
err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -372,7 +372,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe)
|
||||
ss, err = c.AppsV1().StatefulSets(ns).Create(spec)
|
||||
framework.ExpectNoError(err)
|
||||
ssTester.WaitForRunningAndReady(1, ss)
|
||||
e2esset.WaitForRunningAndReady(c, 1, ss)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -56,6 +56,7 @@ go_library(
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/statefulset:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
@ -66,7 +67,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
e2elog.Logf("Deleting all statefulset in namespace: %v", namespace)
|
||||
framework.DeleteAllStatefulSets(client, namespace)
|
||||
e2esset.DeleteAllStatefulSets(client, namespace)
|
||||
})
|
||||
|
||||
ginkgo.It("vsphere statefulset testing", func() {
|
||||
@ -79,13 +80,13 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
|
||||
defer client.StorageV1().StorageClasses().Delete(sc.Name, nil)
|
||||
|
||||
ginkgo.By("Creating statefulset")
|
||||
statefulsetTester := framework.NewStatefulSetTester(client)
|
||||
statefulset := statefulsetTester.CreateStatefulSet(manifestPath, namespace)
|
||||
|
||||
statefulset := e2esset.CreateStatefulSet(client, manifestPath, namespace)
|
||||
replicas := *(statefulset.Spec.Replicas)
|
||||
// Waiting for pods status to be Ready
|
||||
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
|
||||
framework.ExpectNoError(statefulsetTester.CheckMount(statefulset, mountPath))
|
||||
ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset)
|
||||
e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas)
|
||||
framework.ExpectNoError(e2esset.CheckMount(client, statefulset, mountPath))
|
||||
ssPodsBeforeScaleDown := e2esset.GetPodList(client, statefulset)
|
||||
gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
|
||||
gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
|
||||
|
||||
@ -103,9 +104,9 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1))
|
||||
_, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1)
|
||||
_, scaledownErr := e2esset.Scale(client, statefulset, replicas-1)
|
||||
framework.ExpectNoError(scaledownErr)
|
||||
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
|
||||
e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas-1)
|
||||
|
||||
// After scale down, verify vsphere volumes are detached from deleted pods
|
||||
ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
|
||||
@ -124,12 +125,12 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas))
|
||||
_, scaleupErr := statefulsetTester.Scale(statefulset, replicas)
|
||||
_, scaleupErr := e2esset.Scale(client, statefulset, replicas)
|
||||
framework.ExpectNoError(scaleupErr)
|
||||
statefulsetTester.WaitForStatusReplicas(statefulset, replicas)
|
||||
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
|
||||
e2esset.WaitForStatusReplicas(client, statefulset, replicas)
|
||||
e2esset.WaitForStatusReadyReplicas(client, statefulset, replicas)
|
||||
|
||||
ssPodsAfterScaleUp := statefulsetTester.GetPodList(statefulset)
|
||||
ssPodsAfterScaleUp := e2esset.GetPodList(client, statefulset)
|
||||
gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
|
||||
gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
|
||||
|
||||
|
@ -38,6 +38,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/statefulset:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
@ -31,6 +31,7 @@ go_library(
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/e2e/framework/statefulset:go_default_library",
|
||||
"//test/e2e/upgrades:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -24,12 +24,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||
)
|
||||
|
||||
// StatefulSetUpgradeTest implements an upgrade test harness for StatefulSet upgrade testing.
|
||||
type StatefulSetUpgradeTest struct {
|
||||
tester *framework.StatefulSetTester
|
||||
service *v1.Service
|
||||
set *appsv1.StatefulSet
|
||||
}
|
||||
@ -60,11 +60,10 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
|
||||
statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||
ns := f.Namespace.Name
|
||||
t.set = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
|
||||
t.service = framework.CreateStatefulSetService(ssName, labels)
|
||||
t.set = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
|
||||
t.service = e2esset.CreateStatefulSetService(ssName, labels)
|
||||
*(t.set.Spec.Replicas) = 3
|
||||
t.tester = framework.NewStatefulSetTester(f.ClientSet)
|
||||
t.tester.PauseNewPods(t.set)
|
||||
e2esset.PauseNewPods(t.set)
|
||||
|
||||
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
|
||||
_, err := f.ClientSet.CoreV1().Services(ns).Create(t.service)
|
||||
@ -76,40 +75,40 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Saturating stateful set " + t.set.Name)
|
||||
t.tester.Saturate(t.set)
|
||||
t.verify()
|
||||
t.restart()
|
||||
t.verify()
|
||||
e2esset.Saturate(f.ClientSet, t.set)
|
||||
t.verify(f)
|
||||
t.restart(f)
|
||||
t.verify(f)
|
||||
}
|
||||
|
||||
// Test waits for the upgrade to complete and verifies the StatefulSet basic functionality
|
||||
func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
|
||||
<-done
|
||||
t.verify()
|
||||
t.verify(f)
|
||||
}
|
||||
|
||||
// Teardown deletes all StatefulSets
|
||||
func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) {
|
||||
framework.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
|
||||
e2esset.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
|
||||
}
|
||||
|
||||
func (t *StatefulSetUpgradeTest) verify() {
|
||||
func (t *StatefulSetUpgradeTest) verify(f *framework.Framework) {
|
||||
ginkgo.By("Verifying statefulset mounted data directory is usable")
|
||||
framework.ExpectNoError(t.tester.CheckMount(t.set, "/data"))
|
||||
framework.ExpectNoError(e2esset.CheckMount(f.ClientSet, t.set, "/data"))
|
||||
|
||||
ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
|
||||
framework.ExpectNoError(t.tester.CheckHostname(t.set))
|
||||
framework.ExpectNoError(e2esset.CheckHostname(f.ClientSet, t.set))
|
||||
|
||||
ginkgo.By("Verifying statefulset set proper service name")
|
||||
framework.ExpectNoError(t.tester.CheckServiceName(t.set, t.set.Spec.ServiceName))
|
||||
framework.ExpectNoError(e2esset.CheckServiceName(t.set, t.set.Spec.ServiceName))
|
||||
|
||||
cmd := "echo $(hostname) > /data/hostname; sync;"
|
||||
ginkgo.By("Running " + cmd + " in all stateful pods")
|
||||
framework.ExpectNoError(t.tester.ExecInStatefulPods(t.set, cmd))
|
||||
framework.ExpectNoError(e2esset.ExecInStatefulPods(f.ClientSet, t.set, cmd))
|
||||
}
|
||||
|
||||
func (t *StatefulSetUpgradeTest) restart() {
|
||||
func (t *StatefulSetUpgradeTest) restart(f *framework.Framework) {
|
||||
ginkgo.By("Restarting statefulset " + t.set.Name)
|
||||
t.tester.Restart(t.set)
|
||||
t.tester.WaitForRunningAndReady(*t.set.Spec.Replicas, t.set)
|
||||
e2esset.Restart(f.ClientSet, t.set)
|
||||
e2esset.WaitForRunningAndReady(f.ClientSet, *t.set.Spec.Replicas, t.set)
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
)
|
||||
|
||||
@ -43,7 +44,6 @@ const cassandraManifestPath = "test/e2e/testing-manifests/statefulset/cassandra"
|
||||
type CassandraUpgradeTest struct {
|
||||
ip string
|
||||
successfulWrites int
|
||||
ssTester *framework.StatefulSetTester
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
@ -74,13 +74,12 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
statefulsetPoll := 30 * time.Second
|
||||
statefulsetTimeout := 10 * time.Minute
|
||||
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
|
||||
|
||||
ginkgo.By("Creating a PDB")
|
||||
cassandraKubectlCreate(ns, "pdb.yaml")
|
||||
|
||||
ginkgo.By("Creating a Cassandra StatefulSet")
|
||||
t.ssTester.CreateStatefulSet(cassandraManifestPath, ns)
|
||||
e2esset.CreateStatefulSet(f.ClientSet, cassandraManifestPath, ns)
|
||||
|
||||
ginkgo.By("Creating a cassandra-test-server deployment")
|
||||
cassandraKubectlCreate(ns, "tester.yaml")
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
)
|
||||
|
||||
@ -42,7 +43,6 @@ const manifestPath = "test/e2e/testing-manifests/statefulset/etcd"
|
||||
type EtcdUpgradeTest struct {
|
||||
ip string
|
||||
successfulWrites int
|
||||
ssTester *framework.StatefulSetTester
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
@ -69,13 +69,12 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
statefulsetPoll := 30 * time.Second
|
||||
statefulsetTimeout := 10 * time.Minute
|
||||
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
|
||||
|
||||
ginkgo.By("Creating a PDB")
|
||||
kubectlCreate(ns, "pdb.yaml")
|
||||
|
||||
ginkgo.By("Creating an etcd StatefulSet")
|
||||
t.ssTester.CreateStatefulSet(manifestPath, ns)
|
||||
e2esset.CreateStatefulSet(f.ClientSet, manifestPath, ns)
|
||||
|
||||
ginkgo.By("Creating an etcd--test-server deployment")
|
||||
kubectlCreate(ns, "tester.yaml")
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
)
|
||||
|
||||
@ -43,7 +44,6 @@ type MySQLUpgradeTest struct {
|
||||
ip string
|
||||
successfulWrites int
|
||||
nextWrite int
|
||||
ssTester *framework.StatefulSetTester
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
@ -84,13 +84,12 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
statefulsetPoll := 30 * time.Second
|
||||
statefulsetTimeout := 10 * time.Minute
|
||||
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
|
||||
|
||||
ginkgo.By("Creating a configmap")
|
||||
mysqlKubectlCreate(ns, "configmap.yaml")
|
||||
|
||||
ginkgo.By("Creating a mysql StatefulSet")
|
||||
t.ssTester.CreateStatefulSet(mysqlManifestPath, ns)
|
||||
e2esset.CreateStatefulSet(f.ClientSet, mysqlManifestPath, ns)
|
||||
|
||||
ginkgo.By("Creating a mysql-test-server deployment")
|
||||
mysqlKubectlCreate(ns, "tester.yaml")
|
||||
|
Loading…
Reference in New Issue
Block a user