mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
remove repeat failure command option and increase timeouts in etcd failure test
This commit is contained in:
parent
8b4170ed39
commit
5d8ffbaad9
@ -23,7 +23,6 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -53,7 +52,6 @@ var _ = Describe("Etcd failure", func() {
|
|||||||
framework,
|
framework,
|
||||||
"sudo iptables -A INPUT -p tcp --destination-port 4001 -j DROP",
|
"sudo iptables -A INPUT -p tcp --destination-port 4001 -j DROP",
|
||||||
"sudo iptables -D INPUT -p tcp --destination-port 4001 -j DROP",
|
"sudo iptables -D INPUT -p tcp --destination-port 4001 -j DROP",
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -62,12 +60,11 @@ var _ = Describe("Etcd failure", func() {
|
|||||||
framework,
|
framework,
|
||||||
"pgrep etcd | xargs -I {} sudo kill -9 {}",
|
"pgrep etcd | xargs -I {} sudo kill -9 {}",
|
||||||
"echo 'do nothing. monit should restart etcd.'",
|
"echo 'do nothing. monit should restart etcd.'",
|
||||||
true,
|
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func etcdFailTest(framework Framework, failCommand, fixCommand string, repeat bool) {
|
func etcdFailTest(framework Framework, failCommand, fixCommand string) {
|
||||||
// This test requires SSH, so the provider check should be identical to
|
// This test requires SSH, so the provider check should be identical to
|
||||||
// those tests.
|
// those tests.
|
||||||
if !providerIs("gce") {
|
if !providerIs("gce") {
|
||||||
@ -75,7 +72,7 @@ func etcdFailTest(framework Framework, failCommand, fixCommand string, repeat bo
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
doEtcdFailure(failCommand, fixCommand, repeat)
|
doEtcdFailure(failCommand, fixCommand)
|
||||||
|
|
||||||
checkExistingRCRecovers(framework)
|
checkExistingRCRecovers(framework)
|
||||||
|
|
||||||
@ -89,21 +86,11 @@ func etcdFailTest(framework Framework, failCommand, fixCommand string, repeat bo
|
|||||||
// master and go on to assert that etcd and kubernetes components recover.
|
// master and go on to assert that etcd and kubernetes components recover.
|
||||||
const etcdFailureDuration = 20 * time.Second
|
const etcdFailureDuration = 20 * time.Second
|
||||||
|
|
||||||
func doEtcdFailure(failCommand, fixCommand string, repeat bool) {
|
func doEtcdFailure(failCommand, fixCommand string) {
|
||||||
By("failing etcd")
|
By("failing etcd")
|
||||||
|
|
||||||
if repeat {
|
masterExec(failCommand)
|
||||||
stop := make(chan struct{}, 1)
|
time.Sleep(etcdFailureDuration)
|
||||||
go util.Until(func() {
|
|
||||||
defer GinkgoRecover()
|
|
||||||
masterExec(failCommand)
|
|
||||||
}, 1*time.Second, stop)
|
|
||||||
time.Sleep(etcdFailureDuration)
|
|
||||||
stop <- struct{}{}
|
|
||||||
} else {
|
|
||||||
masterExec(failCommand)
|
|
||||||
time.Sleep(etcdFailureDuration)
|
|
||||||
}
|
|
||||||
masterExec(fixCommand)
|
masterExec(fixCommand)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +108,7 @@ func checkExistingRCRecovers(f Framework) {
|
|||||||
rcSelector := labels.Set{"name": "baz"}.AsSelector()
|
rcSelector := labels.Set{"name": "baz"}.AsSelector()
|
||||||
|
|
||||||
By("deleting pods from existing replication controller")
|
By("deleting pods from existing replication controller")
|
||||||
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||||
pods, err := podClient.List(rcSelector, fields.Everything())
|
pods, err := podClient.List(rcSelector, fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("apiserver returned error, as expected before recovery: %v", err)
|
Logf("apiserver returned error, as expected before recovery: %v", err)
|
||||||
@ -139,7 +126,7 @@ func checkExistingRCRecovers(f Framework) {
|
|||||||
}))
|
}))
|
||||||
|
|
||||||
By("waiting for replication controller to recover")
|
By("waiting for replication controller to recover")
|
||||||
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||||
pods, err := podClient.List(rcSelector, fields.Everything())
|
pods, err := podClient.List(rcSelector, fields.Everything())
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
|
Loading…
Reference in New Issue
Block a user