mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
Fix density test
This commit is contained in:
parent
0400339d01
commit
ede2e756cd
@ -211,74 +211,68 @@ var _ = Describe("Density", func() {
|
|||||||
// e2e test suite without --ginkgo.focus & --ginkgo.skip flags.
|
// e2e test suite without --ginkgo.focus & --ginkgo.skip flags.
|
||||||
|
|
||||||
type Density struct {
|
type Density struct {
|
||||||
|
skip bool
|
||||||
|
podsPerMinion int
|
||||||
|
}
|
||||||
|
|
||||||
|
densityTests := []Density{
|
||||||
|
// This test should always run, even if larger densities are skipped.
|
||||||
|
{podsPerMinion: 3, skip: false},
|
||||||
|
// TODO (wojtek-t):don't skip d30 after #6059
|
||||||
|
{podsPerMinion: 30, skip: true},
|
||||||
|
{podsPerMinion: 50, skip: true},
|
||||||
|
{podsPerMinion: 100, skip: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testArg := range densityTests {
|
||||||
|
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerMinion)
|
||||||
|
if testArg.skip {
|
||||||
|
name = "[Skipped] " + name
|
||||||
|
}
|
||||||
|
itArg := testArg
|
||||||
|
It(name, func() {
|
||||||
|
totalPods := itArg.podsPerMinion * minionCount
|
||||||
|
RCName = "my-hostname-density" + strconv.Itoa(totalPods) + "-" + string(util.NewUUID())
|
||||||
|
RunRC(c, RCName, ns, "gcr.io/google_containers/pause:go", totalPods)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Scalability struct {
|
||||||
skip bool
|
skip bool
|
||||||
totalPods int
|
totalPods int
|
||||||
podsPerMinion int
|
podsPerMinion int
|
||||||
rcsPerThread int
|
rcsPerThread int
|
||||||
}
|
}
|
||||||
|
|
||||||
//This test should always run, even if larger densities are skipped.
|
scalabilityTests := []Scalability{
|
||||||
d3 := Density{totalPods: 3, podsPerMinion: 0, rcsPerThread: 1, skip: false}
|
{totalPods: 500, podsPerMinion: 10, rcsPerThread: 5, skip: true},
|
||||||
|
{totalPods: 500, podsPerMinion: 10, rcsPerThread: 25, skip: true},
|
||||||
//These tests are varied and customizable.
|
|
||||||
//TODO (wojtek-t):don't skip d30 after #6059
|
|
||||||
d30 := Density{totalPods: 30, podsPerMinion: 0, rcsPerThread: 1, skip: true}
|
|
||||||
d50 := Density{totalPods: 50, podsPerMinion: 0, rcsPerThread: 1, skip: true}
|
|
||||||
d100 := Density{totalPods: 100, podsPerMinion: 0, rcsPerThread: 1, skip: true}
|
|
||||||
d500t5 := Density{totalPods: 500, podsPerMinion: 10, rcsPerThread: 5, skip: true}
|
|
||||||
d500t25 := Density{totalPods: 500, podsPerMinion: 10, rcsPerThread: 25, skip: true}
|
|
||||||
|
|
||||||
dtests := []Density{d3, d30, d50, d100, d500t5, d500t25}
|
|
||||||
|
|
||||||
//Run each test in the array which isn't skipped.
|
|
||||||
for i := range dtests {
|
|
||||||
|
|
||||||
//cannot do a range iterator over structs.
|
|
||||||
dtest := dtests[i]
|
|
||||||
|
|
||||||
//if ppm==0, its a raw density test.
|
|
||||||
//otherwise, we continue launching n nodes per pod in threads till we meet the totalPods #.
|
|
||||||
if dtest.podsPerMinion == 0 {
|
|
||||||
//basic density tests
|
|
||||||
name := fmt.Sprintf("should allow starting %d pods per node", dtest.totalPods)
|
|
||||||
|
|
||||||
if dtest.skip {
|
|
||||||
name = "[Skipped] " + name
|
|
||||||
}
|
}
|
||||||
It(name, func() {
|
|
||||||
glog.Info("Density test parameters: %v", dtest)
|
for _, testArg := range scalabilityTests {
|
||||||
RCName = "my-hostname-density" + strconv.Itoa(dtest.totalPods) + "-" + string(util.NewUUID())
|
|
||||||
RunRC(c, RCName, ns, "gcr.io/google_containers/pause:go", dtest.totalPods)
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
// # of threads calibrate to totalPods
|
// # of threads calibrate to totalPods
|
||||||
threads := (dtest.totalPods / (dtest.podsPerMinion * dtest.rcsPerThread))
|
threads := (testArg.totalPods / (testArg.podsPerMinion * testArg.rcsPerThread))
|
||||||
|
|
||||||
name := fmt.Sprintf(
|
name := fmt.Sprintf(
|
||||||
"[Skipped] should be able to launch %v pods, %v per minion, in %v rcs/thread.",
|
"should be able to launch %v pods, %v per minion, in %v rcs/thread.",
|
||||||
dtest.totalPods, dtest.podsPerMinion, dtest.rcsPerThread)
|
testArg.totalPods, testArg.podsPerMinion, testArg.rcsPerThread)
|
||||||
|
if testArg.skip {
|
||||||
if dtest.skip {
|
|
||||||
name = "[Skipped] " + name
|
name = "[Skipped] " + name
|
||||||
}
|
}
|
||||||
|
|
||||||
podsLaunched := 0
|
itArg := testArg
|
||||||
It(name, func() {
|
It(name, func() {
|
||||||
|
podsLaunched := 0
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
//count down latch.., once all threads are launched, we wait for
|
|
||||||
//it to decrement down to zero.
|
|
||||||
wg.Add(threads)
|
wg.Add(threads)
|
||||||
|
|
||||||
//create queue of pending requests on the api server.
|
// Create queue of pending requests on the api server.
|
||||||
for i := 0; i < threads; i++ {
|
for i := 0; i < threads; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
// call to wg.Done will serve as a count down latch.
|
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for i := 0; i < dtest.rcsPerThread; i++ {
|
for i := 0; i < itArg.rcsPerThread; i++ {
|
||||||
name := "my-short-lived-pod" + string(util.NewUUID())
|
name := "my-short-lived-pod" + string(util.NewUUID())
|
||||||
n := dtest.podsPerMinion * minionCount
|
n := itArg.podsPerMinion * minionCount
|
||||||
RunRC(c, name, ns, "gcr.io/google_containers/pause:go", n)
|
RunRC(c, name, ns, "gcr.io/google_containers/pause:go", n)
|
||||||
podsLaunched += n
|
podsLaunched += n
|
||||||
glog.Info("Launched %v pods so far...", podsLaunched)
|
glog.Info("Launched %v pods so far...", podsLaunched)
|
||||||
@ -287,8 +281,7 @@ var _ = Describe("Density", func() {
|
|||||||
}
|
}
|
||||||
// Wait for all the pods from all the RC's to return.
|
// Wait for all the pods from all the RC's to return.
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
glog.Info("%v pods out of %v launched", podsLaunched, dtest.totalPods)
|
glog.Info("%v pods out of %v launched", podsLaunched, itArg.totalPods)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user