mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 01:06:27 +00:00
Merge pull request #8873 from piosz/load_generator
Re-enabled load e2e test for Jenkins scalability suite
This commit is contained in:
commit
d7f242b8b0
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -67,6 +68,14 @@ var _ = Describe("Load", func() {
|
|||||||
if err := c.Namespaces().Delete(ns); err != nil {
|
if err := c.Namespaces().Delete(ns); err != nil {
|
||||||
Failf("Couldn't delete ns %s", err)
|
Failf("Couldn't delete ns %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify latency metrics
|
||||||
|
// TODO: Update threshold to 1s once we reach this goal
|
||||||
|
// TODO: We should reset metrics before the test. Currently previous tests influence latency metrics.
|
||||||
|
_, err := HighLatencyRequests(c, 5*time.Second, util.NewStringSet("events"))
|
||||||
|
expectNoError(err)
|
||||||
|
// TODO: uncomment the following line once the test is stable
|
||||||
|
// Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
|
||||||
})
|
})
|
||||||
|
|
||||||
type Load struct {
|
type Load struct {
|
||||||
@ -78,7 +87,7 @@ var _ = Describe("Load", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, testArg := range loadTests {
|
for _, testArg := range loadTests {
|
||||||
name := fmt.Sprintf("[Skipped] should be able to handle %v pods per node", testArg.podsPerNode)
|
name := fmt.Sprintf("[Performance suite] [Skipped] should be able to handle %v pods per node", testArg.podsPerNode)
|
||||||
|
|
||||||
It(name, func() {
|
It(name, func() {
|
||||||
totalPods := testArg.podsPerNode * nodeCount
|
totalPods := testArg.podsPerNode * nodeCount
|
||||||
|
@ -776,7 +776,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) error
|
|||||||
By(fmt.Sprintf("%v Waiting for all %d replicas to be running with a max container failures of %d", time.Now(), replicas, maxContainerFailures))
|
By(fmt.Sprintf("%v Waiting for all %d replicas to be running with a max container failures of %d", time.Now(), replicas, maxContainerFailures))
|
||||||
same = 0
|
same = 0
|
||||||
last = 0
|
last = 0
|
||||||
failCount = 10
|
failCount = 20
|
||||||
current = 0
|
current = 0
|
||||||
oldPods := make([]*api.Pod, 0)
|
oldPods := make([]*api.Pod, 0)
|
||||||
for same < failCount && current < replicas {
|
for same < failCount && current < replicas {
|
||||||
|
Loading…
Reference in New Issue
Block a user