Merge pull request #34605 from gmarek/inject

Automatic merge from submit-queue

Inject logFunc into RunPods

I missed this in previous PR.
This commit is contained in:
Kubernetes Submit Queue 2016-10-12 05:36:26 -07:00 committed by GitHub
commit 2bfeea447b
3 changed files with 11 additions and 9 deletions

View File

@ -2786,7 +2786,8 @@ func (config *RCConfig) start() error {
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) {
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string,
pod api.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
// no pod to start
if replicas < 1 {
panic("StartPods: number of replicas must be non-zero")
@ -2799,14 +2800,15 @@ func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix s
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Pods(namespace).Create(&pod)
ExpectNoError(err)
return err
}
Logf("Waiting for running...")
logFunc("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
ExpectNoError(err, "Error waiting for %d pods to be running - probably a timeout", replicas)
return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
}
return nil
}
type EventsLister func(opts v1.ListOptions, ns string) (*v1.EventList, error)

View File

@ -126,11 +126,11 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// and there is no need to create additional pods.
// StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 {
framework.StartPods(c, podsNeededForSaturation, ns, "maxp",
framework.ExpectNoError(framework.StartPods(c, podsNeededForSaturation, ns, "maxp",
*initPausePod(f, pausePodConfig{
Name: "",
Labels: map[string]string{"name": ""},
}), true)
}), true, framework.Logf))
}
podName := "additional-pod"
createPausePod(f, pausePodConfig{
@ -187,7 +187,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// and there is no need to create additional pods.
// StartPods requires at least one pod to replicate.
if podsNeededForSaturation > 0 {
framework.StartPods(c, podsNeededForSaturation, ns, "overcommit",
framework.ExpectNoError(framework.StartPods(c, podsNeededForSaturation, ns, "overcommit",
*initPausePod(f, pausePodConfig{
Name: "",
Labels: map[string]string{"name": ""},
@ -199,7 +199,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
},
},
}), true)
}), true, framework.Logf))
}
podName := "additional-pod"
createPausePod(f, pausePodConfig{

View File

@ -98,7 +98,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
// Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
// Thus, no need to test for it. Once the precondition changes to zero number of replicas,
// test for replicaCount > 0. Otherwise, StartPods panics.
framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)
framework.ExpectNoError(framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, framework.Logf))
// Wait for all of them to be scheduled
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))