Merge pull request #81478 from oomichi/move-ConfirmStatefulPodCount

Move ConfirmStatefulPodCount to e2e test
This commit is contained in:
Kubernetes Prow Robot 2019-08-16 03:32:31 -07:00 committed by GitHub
commit bb55368fc6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 27 additions and 29 deletions

View File

@ -571,7 +571,7 @@ var _ = SIGDescribe("StatefulSet", func() {
e2esset.WaitForRunningAndNotReady(c, *ss.Spec.Replicas, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
e2esset.UpdateReplicas(c, ss, 3)
e2esset.ConfirmStatefulPodCount(c, 1, ss, 10*time.Second, true)
confirmStatefulPodCount(c, 1, ss, 10*time.Second, true)
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
e2esset.RestoreHTTPProbe(c, ss)
@ -604,7 +604,7 @@ var _ = SIGDescribe("StatefulSet", func() {
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
e2esset.WaitForRunningAndNotReady(c, 3, ss)
e2esset.UpdateReplicas(c, ss, 0)
e2esset.ConfirmStatefulPodCount(c, 3, ss, 10*time.Second, true)
confirmStatefulPodCount(c, 3, ss, 10*time.Second, true)
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
e2esset.RestoreHTTPProbe(c, ss)
@ -651,7 +651,7 @@ var _ = SIGDescribe("StatefulSet", func() {
e2esset.WaitForRunningAndNotReady(c, *ss.Spec.Replicas, ss)
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
e2esset.UpdateReplicas(c, ss, 3)
e2esset.ConfirmStatefulPodCount(c, 3, ss, 10*time.Second, false)
confirmStatefulPodCount(c, 3, ss, 10*time.Second, false)
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
e2esset.RestoreHTTPProbe(c, ss)
@ -662,7 +662,7 @@ var _ = SIGDescribe("StatefulSet", func() {
e2esset.WaitForStatusReadyReplicas(c, ss, 0)
e2esset.WaitForRunningAndNotReady(c, 3, ss)
e2esset.UpdateReplicas(c, ss, 0)
e2esset.ConfirmStatefulPodCount(c, 0, ss, 10*time.Second, false)
confirmStatefulPodCount(c, 0, ss, 10*time.Second, false)
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
e2esset.RestoreHTTPProbe(c, ss)
@ -1163,3 +1163,26 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
priorRevision))
}
}
// confirmStatefulPodCount asserts that the current number of Pods in ss is count, waiting up to timeout for ss to
// to scale to count.
func confirmStatefulPodCount(c clientset.Interface, count int, ss *appsv1.StatefulSet, timeout time.Duration, hard bool) {
start := time.Now()
deadline := start.Add(timeout)
for t := time.Now(); t.Before(deadline); t = time.Now() {
podList := e2esset.GetPodList(c, ss)
statefulPodCount := len(podList.Items)
if statefulPodCount != count {
e2epod.LogPodStates(podList.Items)
if hard {
e2elog.Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
} else {
e2elog.Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
}
time.Sleep(1 * time.Second)
continue
}
e2elog.Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
time.Sleep(1 * time.Second)
}
}

View File

@ -24,7 +24,6 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/utils/image:go_default_library",
],

View File

@ -33,7 +33,6 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
e2efwk "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/manifest"
)
@ -220,29 +219,6 @@ func Restart(c clientset.Interface, ss *appsv1.StatefulSet) {
update(c, ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
}
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count, waiting up to timeout for ss to
// to scale to count.
func ConfirmStatefulPodCount(c clientset.Interface, count int, ss *appsv1.StatefulSet, timeout time.Duration, hard bool) {
start := time.Now()
deadline := start.Add(timeout)
for t := time.Now(); t.Before(deadline); t = time.Now() {
podList := GetPodList(c, ss)
statefulPodCount := len(podList.Items)
if statefulPodCount != count {
e2epod.LogPodStates(podList.Items)
if hard {
e2elog.Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
} else {
e2elog.Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
}
time.Sleep(1 * time.Second)
continue
}
e2elog.Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
time.Sleep(1 * time.Second)
}
}
// GetStatefulSet gets the StatefulSet named name in namespace.
func GetStatefulSet(c clientset.Interface, namespace, name string) *appsv1.StatefulSet {
ss, err := c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})