mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-17 15:50:10 +00:00
Create a config struct for RunRC and allow polling interval to be
configurable. #7572
This commit is contained in:
parent
9e06132ed3
commit
7361f751a6
@ -96,16 +96,18 @@ var _ = Describe("Density", func() {
|
||||
type Density struct {
|
||||
skip bool
|
||||
podsPerMinion int
|
||||
/* Controls how often the apiserver is polled for pods */
|
||||
interval int
|
||||
}
|
||||
|
||||
densityTests := []Density{
|
||||
// This test should always run, even if larger densities are skipped.
|
||||
{podsPerMinion: 3, skip: false},
|
||||
{podsPerMinion: 30, skip: false},
|
||||
{podsPerMinion: 3, skip: false, interval: 10},
|
||||
{podsPerMinion: 30, skip: false, interval: 10},
|
||||
// More than 30 pods per node is outside our v1.0 goals.
|
||||
// We might want to enable those tests in the future.
|
||||
{podsPerMinion: 50, skip: true},
|
||||
{podsPerMinion: 100, skip: true},
|
||||
{podsPerMinion: 50, skip: true, interval: 10},
|
||||
{podsPerMinion: 100, skip: true, interval: 1},
|
||||
}
|
||||
|
||||
for _, testArg := range densityTests {
|
||||
@ -124,6 +126,15 @@ var _ = Describe("Density", func() {
|
||||
expectNoError(err)
|
||||
defer fileHndl.Close()
|
||||
|
||||
config := RCConfig{Client: c,
|
||||
Image: "gcr.io/google_containers/pause:go",
|
||||
Name: RCName,
|
||||
Namespace: ns,
|
||||
PollInterval: itArg.interval,
|
||||
PodStatusFile: fileHndl,
|
||||
Replicas: totalPods,
|
||||
}
|
||||
|
||||
// Create a listener for events.
|
||||
events := make([](*api.Event), 0)
|
||||
_, controller := framework.NewInformer(
|
||||
@ -148,7 +159,7 @@ var _ = Describe("Density", func() {
|
||||
|
||||
// Start the replication controller.
|
||||
startTime := time.Now()
|
||||
expectNoError(RunRC(c, RCName, ns, "gcr.io/google_containers/pause:go", totalPods, fileHndl))
|
||||
expectNoError(RunRC(config))
|
||||
e2eStartupTime := time.Now().Sub(startTime)
|
||||
Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime)
|
||||
|
||||
|
@ -120,7 +120,13 @@ func playWithRC(c *client.Client, wg *sync.WaitGroup, ns, name string, size int)
|
||||
// Once every 1-2 minutes perform resize of RC.
|
||||
for start := time.Now(); time.Since(start) < simulationTime; time.Sleep(time.Duration(60+rand.Intn(60)) * time.Second) {
|
||||
if !rcExist {
|
||||
expectNoError(RunRC(c, name, ns, image, size, nil), fmt.Sprintf("creating rc %s in namespace %s", name, ns))
|
||||
config := RCConfig{Client: c,
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
Image: image,
|
||||
Replicas: size,
|
||||
}
|
||||
expectNoError(RunRC(config), fmt.Sprintf("creating rc %s in namespace %s", name, ns))
|
||||
rcExist = true
|
||||
}
|
||||
// Resize RC to a random size between 0.5x and 1.5x of the original size.
|
||||
|
@ -107,7 +107,15 @@ var _ = Describe("Scale", func() {
|
||||
for i := 0; i < itArg.rcsPerThread; i++ {
|
||||
name := "my-short-lived-pod" + string(util.NewUUID())
|
||||
n := itArg.podsPerMinion * minionCount
|
||||
expectNoError(RunRC(c, name, ns, "gcr.io/google_containers/pause:go", n, nil))
|
||||
|
||||
config := RCConfig{Client: c,
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
Image: "gcr.io/google_containers/pause:go",
|
||||
Replicas: n,
|
||||
}
|
||||
|
||||
expectNoError(RunRC(config))
|
||||
podsLaunched += n
|
||||
Logf("Launched %v pods so far...", podsLaunched)
|
||||
err := DeleteRC(c, ns, name)
|
||||
|
@ -75,6 +75,16 @@ type ContainerFailures struct {
|
||||
restarts int
|
||||
}
|
||||
|
||||
type RCConfig struct {
|
||||
Client *client.Client
|
||||
Image string
|
||||
Name string
|
||||
Namespace string
|
||||
PollInterval int
|
||||
PodStatusFile *os.File
|
||||
Replicas int
|
||||
}
|
||||
|
||||
func Logf(format string, a ...interface{}) {
|
||||
fmt.Fprintf(GinkgoWriter, "INFO: "+format+"\n", a...)
|
||||
}
|
||||
@ -475,8 +485,14 @@ func Diff(oldPods []api.Pod, curPods []api.Pod) PodDiff {
|
||||
// It will waits for all pods it spawns to become "Running".
|
||||
// It's the caller's responsibility to clean up externally (i.e. use the
|
||||
// namespace lifecycle for handling cleanup).
|
||||
func RunRC(c *client.Client, name string, ns, image string, replicas int, podStatusFile *os.File) error {
|
||||
func RunRC(config RCConfig) error {
|
||||
var last int
|
||||
c := config.Client
|
||||
name := config.Name
|
||||
ns := config.Namespace
|
||||
image := config.Image
|
||||
replicas := config.Replicas
|
||||
interval := config.PollInterval
|
||||
|
||||
maxContainerFailures := int(math.Max(1.0, float64(replicas)*.01))
|
||||
current := 0
|
||||
@ -518,7 +534,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int, podSta
|
||||
|
||||
// Create a routine to query for the list of pods
|
||||
stop := make(chan struct{})
|
||||
go func(stop <-chan struct{}, n string, ns string, l labels.Selector) {
|
||||
go func(stop <-chan struct{}, n string, ns string, l labels.Selector, i int) {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
@ -530,16 +546,19 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int, podSta
|
||||
} else {
|
||||
podLists.Push(p.Items)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(time.Duration(i) * time.Second)
|
||||
}
|
||||
}
|
||||
}(stop, name, ns, label)
|
||||
}(stop, name, ns, label, interval)
|
||||
defer close(stop)
|
||||
|
||||
By(fmt.Sprintf("Making sure all %d replicas of rc %s in namespace %s exist", replicas, name, ns))
|
||||
failCount := 5
|
||||
failCount := int(25 / interval)
|
||||
for same < failCount && current < replicas {
|
||||
time.Sleep(2 * time.Second)
|
||||
time.Sleep(time.Duration(interval*2) * time.Second)
|
||||
|
||||
// Greedily read all existing entries in the queue until
|
||||
// all pods are found submitted or the queue is empty
|
||||
for podLists.Len() > 0 && current < replicas {
|
||||
item := podLists.Pop()
|
||||
pods := item.value.([]api.Pod)
|
||||
@ -568,13 +587,16 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int, podSta
|
||||
By(fmt.Sprintf("Waiting for all %d replicas to be running with a max container failures of %d", replicas, maxContainerFailures))
|
||||
same = 0
|
||||
last = 0
|
||||
failCount = 10
|
||||
failCount = int(100 / interval)
|
||||
current = 0
|
||||
var oldPods []api.Pod
|
||||
podLists.Reset()
|
||||
foundAllPods := false
|
||||
for same < failCount && current < replicas {
|
||||
time.Sleep(2 * time.Second)
|
||||
time.Sleep(time.Duration(interval*2) * time.Second)
|
||||
|
||||
// Greedily read all existing entries in the queue until
|
||||
// either all pods are running or the queue is empty
|
||||
for podLists.Len() > 0 && current < replicas {
|
||||
item := podLists.Pop()
|
||||
current = 0
|
||||
@ -603,8 +625,8 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int, podSta
|
||||
}
|
||||
}
|
||||
Logf("Pod States: %d running, %d pending, %d waiting, %d inactive, %d unknown ", current, pending, waiting, inactive, unknown)
|
||||
if podStatusFile != nil {
|
||||
fmt.Fprintf(podStatusFile, "%s, %d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown\n", item.createTime, current, pending, waiting, inactive, unknown)
|
||||
if config.PodStatusFile != nil {
|
||||
fmt.Fprintf(config.PodStatusFile, "%s, %d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown\n", item.createTime, current, pending, waiting, inactive, unknown)
|
||||
}
|
||||
|
||||
if foundAllPods && len(currentPods) != len(oldPods) {
|
||||
@ -652,7 +674,6 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int, podSta
|
||||
}
|
||||
}
|
||||
}
|
||||
close(stop)
|
||||
if current != replicas {
|
||||
return fmt.Errorf("Only %d pods started out of %d", current, replicas)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user