mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Switch all e2es to the ginkgo wrapper
This commit is contained in:
parent
e1877e36f7
commit
2ab0320745
@ -164,6 +164,7 @@ go_library(
|
|||||||
"//test/e2e/chaosmonkey:go_default_library",
|
"//test/e2e/chaosmonkey:go_default_library",
|
||||||
"//test/e2e/common:go_default_library",
|
"//test/e2e/common:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||||
"//test/e2e/generated:go_default_library",
|
"//test/e2e/generated:go_default_library",
|
||||||
"//test/e2e/perf:go_default_library",
|
"//test/e2e/perf:go_default_library",
|
||||||
"//test/e2e/scheduling:go_default_library",
|
"//test/e2e/scheduling:go_default_library",
|
||||||
|
@ -237,7 +237,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
|||||||
|
|
||||||
It("should be restarted with a docker exec liveness probe with timeout [Conformance]", func() {
|
It("should be restarted with a docker exec liveness probe with timeout [Conformance]", func() {
|
||||||
// TODO: enable this test once the default exec handler supports timeout.
|
// TODO: enable this test once the default exec handler supports timeout.
|
||||||
Skip("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
|
framework.Skipf("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
|
||||||
runLivenessTest(f, &v1.Pod{
|
runLivenessTest(f, &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "liveness-exec",
|
Name: "liveness-exec",
|
||||||
|
@ -196,7 +196,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
framework.Failf("Failed to observe pod creation: %v", event)
|
framework.Failf("Failed to observe pod creation: %v", event)
|
||||||
}
|
}
|
||||||
case <-time.After(framework.PodStartTimeout):
|
case <-time.After(framework.PodStartTimeout):
|
||||||
Fail("Timeout while waiting for pod creation")
|
framework.Failf("Timeout while waiting for pod creation")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to wait for the pod to be running, otherwise the deletion
|
// We need to wait for the pod to be running, otherwise the deletion
|
||||||
@ -245,14 +245,14 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
deleted = true
|
deleted = true
|
||||||
case watch.Error:
|
case watch.Error:
|
||||||
framework.Logf("received a watch error: %v", event.Object)
|
framework.Logf("received a watch error: %v", event.Object)
|
||||||
Fail("watch closed with error")
|
framework.Failf("watch closed with error")
|
||||||
}
|
}
|
||||||
case <-timer:
|
case <-timer:
|
||||||
Fail("timed out waiting for pod deletion")
|
framework.Failf("timed out waiting for pod deletion")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !deleted {
|
if !deleted {
|
||||||
Fail("Failed to observe pod deletion")
|
framework.Failf("Failed to observe pod deletion")
|
||||||
}
|
}
|
||||||
|
|
||||||
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
|
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
|
||||||
|
@ -44,6 +44,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/util/logs"
|
"k8s.io/kubernetes/pkg/util/logs"
|
||||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||||
"k8s.io/kubernetes/test/e2e/generated"
|
"k8s.io/kubernetes/test/e2e/generated"
|
||||||
federationtest "k8s.io/kubernetes/test/e2e_federation"
|
federationtest "k8s.io/kubernetes/test/e2e_federation"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -318,7 +319,7 @@ func RunE2ETests(t *testing.T) {
|
|||||||
logs.InitLogs()
|
logs.InitLogs()
|
||||||
defer logs.FlushLogs()
|
defer logs.FlushLogs()
|
||||||
|
|
||||||
gomega.RegisterFailHandler(ginkgo.Fail)
|
gomega.RegisterFailHandler(ginkgowrapper.Fail)
|
||||||
// Disable skipped tests unless they are explicitly requested.
|
// Disable skipped tests unless they are explicitly requested.
|
||||||
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
|
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
|
||||||
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
|
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
|
||||||
|
@ -86,6 +86,7 @@ go_library(
|
|||||||
"//pkg/volume/util/volumehelper:go_default_library",
|
"//pkg/volume/util/volumehelper:go_default_library",
|
||||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||||
|
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||||
"//test/e2e/generated:go_default_library",
|
"//test/e2e/generated:go_default_library",
|
||||||
"//test/e2e/perftype:go_default_library",
|
"//test/e2e/perftype:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
@ -145,6 +146,9 @@ filegroup(
|
|||||||
|
|
||||||
filegroup(
|
filegroup(
|
||||||
name = "all-srcs",
|
name = "all-srcs",
|
||||||
srcs = [":package-srcs"],
|
srcs = [
|
||||||
|
":package-srcs",
|
||||||
|
"//test/e2e/framework/ginkgowrapper:all-srcs",
|
||||||
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
)
|
)
|
||||||
|
@ -98,6 +98,7 @@ import (
|
|||||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||||
testutil "k8s.io/kubernetes/test/utils"
|
testutil "k8s.io/kubernetes/test/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -293,13 +294,13 @@ func Failf(format string, args ...interface{}) {
|
|||||||
func FailfWithOffset(offset int, format string, args ...interface{}) {
|
func FailfWithOffset(offset int, format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
msg := fmt.Sprintf(format, args...)
|
||||||
log("INFO", msg)
|
log("INFO", msg)
|
||||||
Fail(nowStamp()+": "+msg, 1+offset)
|
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Skipf(format string, args ...interface{}) {
|
func Skipf(format string, args ...interface{}) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
msg := fmt.Sprintf(format, args...)
|
||||||
log("INFO", msg)
|
log("INFO", msg)
|
||||||
Skip(nowStamp() + ": " + msg)
|
ginkgowrapper.Skip(nowStamp() + ": " + msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
|
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
|
||||||
|
@ -958,7 +958,7 @@ metadata:
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if len(uidToPort) > 1 {
|
if len(uidToPort) > 1 {
|
||||||
Fail("Too many endpoints found")
|
framework.Failf("Too many endpoints found")
|
||||||
}
|
}
|
||||||
for _, port := range uidToPort {
|
for _, port := range uidToPort {
|
||||||
if port[0] != redisPort {
|
if port[0] != redisPort {
|
||||||
|
@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("PodPreset", func() {
|
|||||||
framework.Failf("Failed to observe pod creation: %v", event)
|
framework.Failf("Failed to observe pod creation: %v", event)
|
||||||
}
|
}
|
||||||
case <-time.After(framework.PodStartTimeout):
|
case <-time.After(framework.PodStartTimeout):
|
||||||
Fail("Timeout while waiting for pod creation")
|
framework.Failf("Timeout while waiting for pod creation")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to wait for the pod to be running, otherwise the deletion
|
// We need to wait for the pod to be running, otherwise the deletion
|
||||||
@ -233,7 +233,7 @@ var _ = framework.KubeDescribe("PodPreset", func() {
|
|||||||
framework.Failf("Failed to observe pod creation: %v", event)
|
framework.Failf("Failed to observe pod creation: %v", event)
|
||||||
}
|
}
|
||||||
case <-time.After(framework.PodStartTimeout):
|
case <-time.After(framework.PodStartTimeout):
|
||||||
Fail("Timeout while waiting for pod creation")
|
framework.Failf("Timeout while waiting for pod creation")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to wait for the pod to be running, otherwise the deletion
|
// We need to wait for the pod to be running, otherwise the deletion
|
||||||
|
@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
|
|||||||
framework.Failf("Failed to observe pod creation: %v", event)
|
framework.Failf("Failed to observe pod creation: %v", event)
|
||||||
}
|
}
|
||||||
case <-time.After(framework.PodStartTimeout):
|
case <-time.After(framework.PodStartTimeout):
|
||||||
Fail("Timeout while waiting for pod creation")
|
framework.Failf("Timeout while waiting for pod creation")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to wait for the pod to be running, otherwise the deletion
|
// We need to wait for the pod to be running, otherwise the deletion
|
||||||
@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Pods Extended", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !deleted {
|
if !deleted {
|
||||||
Fail("Failed to observe pod deletion")
|
framework.Failf("Failed to observe pod deletion")
|
||||||
}
|
}
|
||||||
|
|
||||||
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
|
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
|
||||||
|
@ -269,7 +269,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
|
|||||||
framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
|
framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
|
||||||
}
|
}
|
||||||
|
|
||||||
Fail(strings.Join(errs, "\n"))
|
framework.Failf(strings.Join(errs, "\n"))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if node == nil {
|
if node == nil {
|
||||||
Fail("unable to select a non-master node")
|
framework.Failf("unable to select a non-master node")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() {
|
|||||||
}
|
}
|
||||||
if n < 2 {
|
if n < 2 {
|
||||||
failing.Insert("Less than two runs succeeded; aborting.")
|
failing.Insert("Less than two runs succeeded; aborting.")
|
||||||
Fail(strings.Join(failing.List(), "\n"))
|
framework.Failf(strings.Join(failing.List(), "\n"))
|
||||||
}
|
}
|
||||||
percentile := func(p int) time.Duration {
|
percentile := func(p int) time.Duration {
|
||||||
est := n * p / 100
|
est := n * p / 100
|
||||||
@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() {
|
|||||||
if failing.Len() > 0 {
|
if failing.Len() > 0 {
|
||||||
errList := strings.Join(failing.List(), "\n")
|
errList := strings.Join(failing.List(), "\n")
|
||||||
helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
|
helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
|
||||||
Fail(errList + helpfulInfo)
|
framework.Failf(errList + helpfulInfo)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user