fix golint error in test/e2e/scheduling

This commit is contained in:
danielqsj 2019-05-14 14:18:52 +08:00
parent 4cb4864487
commit ccecc67a5b
10 changed files with 87 additions and 78 deletions

View File

@ -605,7 +605,6 @@ test/e2e/common
test/e2e/framework test/e2e/framework
test/e2e/lifecycle/bootstrap test/e2e/lifecycle/bootstrap
test/e2e/scalability test/e2e/scalability
test/e2e/scheduling
test/e2e/storage/drivers test/e2e/storage/drivers
test/e2e/storage/testsuites test/e2e/storage/testsuites
test/e2e/storage/utils test/e2e/storage/utils

View File

@ -33,6 +33,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert" _ "github.com/stretchr/testify/assert"
) )
@ -92,7 +93,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName} nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName}
ginkgo.By("One pod should be scheduled, the other should be rejected") ginkgo.By("One pod should be scheduled, the other should be rejected")
// CreateNodeSelectorPods creates RC with host port 4312 // CreateNodeSelectorPods creates RC with host port 4321
WaitForSchedulerAfterAction(f, func() error { WaitForSchedulerAfterAction(f, func() error {
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false) err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
return err return err
@ -269,6 +270,7 @@ func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]str
} }
} }
// CreateNodeSelectorPods creates RC with host port 4321 and defines node selector
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error { func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector")) ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))

View File

@ -18,6 +18,7 @@ package scheduling
import "github.com/onsi/ginkgo" import "github.com/onsi/ginkgo"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool { func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-scheduling] "+text, body) return ginkgo.Describe("[sig-scheduling] "+text, body)
} }

View File

@ -42,7 +42,7 @@ const (
var ( var (
gpuResourceName v1.ResourceName gpuResourceName v1.ResourceName
dsYamlUrl string dsYamlURL string
) )
func makeCudaAdditionDevicePluginTestPod() *v1.Pod { func makeCudaAdditionDevicePluginTestPod() *v1.Pod {
@ -116,20 +116,21 @@ func getGPUsAvailable(f *framework.Framework) int64 {
return gpusAvailable return gpusAvailable
} }
// SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes
func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer { func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *framework.ContainerResourceGatherer {
logOSImages(f) logOSImages(f)
dsYamlUrlFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET") dsYamlURLFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET")
if dsYamlUrlFromEnv != "" { if dsYamlURLFromEnv != "" {
dsYamlUrl = dsYamlUrlFromEnv dsYamlURL = dsYamlURLFromEnv
} else { } else {
dsYamlUrl = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml" dsYamlURL = "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml"
} }
gpuResourceName = gpu.NVIDIAGPUResourceName gpuResourceName = gpu.NVIDIAGPUResourceName
e2elog.Logf("Using %v", dsYamlUrl) e2elog.Logf("Using %v", dsYamlURL)
// Creates the DaemonSet that installs Nvidia Drivers. // Creates the DaemonSet that installs Nvidia Drivers.
ds, err := framework.DsFromManifest(dsYamlUrl) ds, err := framework.DsFromManifest(dsYamlURL)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
ds.Namespace = f.Namespace.Name ds.Namespace = f.Namespace.Name
_, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds) _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds)

View File

@ -36,6 +36,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert" _ "github.com/stretchr/testify/assert"
) )
@ -727,9 +728,8 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched
if !printed { if !printed {
printed = true printed = true
return msg return msg
} else {
return ""
} }
return ""
} }
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))) gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
@ -746,9 +746,8 @@ func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expected
if !printed { if !printed {
printed = true printed = true
return msg return msg
} else {
return ""
} }
return ""
} }
gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))) gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
@ -775,6 +774,7 @@ func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, strin
return pod.Spec.NodeName, pod.Name return pod.Spec.NodeName, pod.Name
} }
// GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it
func GetNodeThatCanRunPod(f *framework.Framework) string { func GetNodeThatCanRunPod(f *framework.Framework) string {
ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.") ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"}) return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"})
@ -785,6 +785,7 @@ func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"}) return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"})
} }
// CreateHostPortPods creates RC with host port 4321
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) { func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
ginkgo.By(fmt.Sprintf("Running RC which reserves host port")) ginkgo.By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{ config := &testutils.RCConfig{

View File

@ -39,6 +39,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert" _ "github.com/stretchr/testify/assert"
) )

View File

@ -24,6 +24,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert" _ "github.com/stretchr/testify/assert"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
@ -40,14 +41,15 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
// Resource is a collection of compute resource.
type Resource struct { type Resource struct {
MilliCPU int64 MilliCPU int64
Memory int64 Memory int64
} }
var balancePodLabel map[string]string = map[string]string{"name": "priority-balanced-memory"} var balancePodLabel = map[string]string{"name": "priority-balanced-memory"}
var podRequestedResource *v1.ResourceRequirements = &v1.ResourceRequirements{ var podRequestedResource = &v1.ResourceRequirements{
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"), v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"), v1.ResourceCPU: resource.MustParse("100m"),
@ -265,7 +267,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
var cpuFractionMap = make(map[string]float64) var cpuFractionMap = make(map[string]float64)
var memFractionMap = make(map[string]float64) var memFractionMap = make(map[string]float64)
for _, node := range nodes { for _, node := range nodes {
cpuFraction, memFraction := computeCpuMemFraction(cs, node, requestedResource) cpuFraction, memFraction := computeCPUMemFraction(cs, node, requestedResource)
cpuFractionMap[node.Name] = cpuFraction cpuFractionMap[node.Name] = cpuFraction
memFractionMap[node.Name] = memFraction memFractionMap[node.Name] = memFraction
if cpuFraction > maxCPUFraction { if cpuFraction > maxCPUFraction {
@ -311,15 +313,15 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
for _, node := range nodes { for _, node := range nodes {
ginkgo.By("Compute Cpu, Mem Fraction after create balanced pods.") ginkgo.By("Compute Cpu, Mem Fraction after create balanced pods.")
computeCpuMemFraction(cs, node, requestedResource) computeCPUMemFraction(cs, node, requestedResource)
} }
return nil return nil
} }
func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) { func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
e2elog.Logf("ComputeCpuMemFraction for node: %v", node.Name) e2elog.Logf("ComputeCPUMemFraction for node: %v", node.Name)
totalRequestedCpuResource := resource.Requests.Cpu().MilliValue() totalRequestedCPUResource := resource.Requests.Cpu().MilliValue()
totalRequestedMemResource := resource.Requests.Memory().Value() totalRequestedMemResource := resource.Requests.Memory().Value()
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil { if err != nil {
@ -332,7 +334,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort { if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
continue continue
} }
totalRequestedCpuResource += getNonZeroRequests(&pod).MilliCPU totalRequestedCPUResource += getNonZeroRequests(&pod).MilliCPU
totalRequestedMemResource += getNonZeroRequests(&pod).Memory totalRequestedMemResource += getNonZeroRequests(&pod).Memory
} }
} }
@ -341,7 +343,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
cpuAllocatableMil := cpuAllocatable.MilliValue() cpuAllocatableMil := cpuAllocatable.MilliValue()
floatOne := float64(1) floatOne := float64(1)
cpuFraction := float64(totalRequestedCpuResource) / float64(cpuAllocatableMil) cpuFraction := float64(totalRequestedCPUResource) / float64(cpuAllocatableMil)
if cpuFraction > floatOne { if cpuFraction > floatOne {
cpuFraction = floatOne cpuFraction = floatOne
} }
@ -353,7 +355,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
memFraction = floatOne memFraction = floatOne
} }
e2elog.Logf("Node: %v, totalRequestedCpuResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCpuResource, cpuAllocatableMil, cpuFraction) e2elog.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction)
e2elog.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction) e2elog.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
return cpuFraction, memFraction return cpuFraction, memFraction

View File

@ -20,6 +20,7 @@ import (
"time" "time"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert" _ "github.com/stretchr/testify/assert"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
@ -65,48 +66,46 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName,
}, },
}, },
} }
} else { }
if tolerationSeconds <= 0 { if tolerationSeconds <= 0 {
return &v1.Pod{ return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
Namespace: ns, Namespace: ns,
Labels: map[string]string{"group": podLabel}, Labels: map[string]string{"group": podLabel},
DeletionGracePeriodSeconds: &grace, DeletionGracePeriodSeconds: &grace,
// default - tolerate forever // default - tolerate forever
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "pause", Name: "pause",
Image: "k8s.gcr.io/pause:3.1", Image: "k8s.gcr.io/pause:3.1",
},
}, },
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}},
}, },
} Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}},
} else { },
ts := int64(tolerationSeconds)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Labels: map[string]string{"group": podLabel},
DeletionGracePeriodSeconds: &grace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: "k8s.gcr.io/pause:3.1",
},
},
// default - tolerate forever
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute, TolerationSeconds: &ts}},
},
}
} }
} }
ts := int64(tolerationSeconds)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Labels: map[string]string{"group": podLabel},
DeletionGracePeriodSeconds: &grace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: "k8s.gcr.io/pause:3.1",
},
},
// default - tolerate forever
Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute, TolerationSeconds: &ts}},
},
}
} }
// Creates and starts a controller (informer) that watches updates on a pod in given namespace with given name. It puts a new // Creates and starts a controller (informer) that watches updates on a pod in given namespace with given name. It puts a new
@ -141,8 +140,8 @@ func createTestController(cs clientset.Interface, observedDeletions chan string,
} }
const ( const (
KubeletPodDeletionDelaySeconds = 60 kubeletPodDeletionDelaySeconds = 60
AdditionalWaitPerDeleteSeconds = 5 additionalWaitPerDeleteSeconds = 5
) )
// Tests the behavior of NoExecuteTaintManager. Following scenarios are included: // Tests the behavior of NoExecuteTaintManager. Following scenarios are included:
@ -188,7 +187,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// Wait a bit // Wait a bit
ginkgo.By("Waiting for Pod to be deleted") ginkgo.By("Waiting for Pod to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select { select {
case <-timeoutChannel: case <-timeoutChannel:
framework.Failf("Failed to evict Pod") framework.Failf("Failed to evict Pod")
@ -220,7 +219,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// Wait a bit // Wait a bit
ginkgo.By("Waiting for Pod to be deleted") ginkgo.By("Waiting for Pod to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select { select {
case <-timeoutChannel: case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted. Test successful") e2elog.Logf("Pod wasn't evicted. Test successful")
@ -235,7 +234,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// 4. See if pod will get evicted after toleration time runs out // 4. See if pod will get evicted after toleration time runs out
ginkgo.It("eventually evict pod with finite tolerations from tainted nodes", func() { ginkgo.It("eventually evict pod with finite tolerations from tainted nodes", func() {
podName := "taint-eviction-3" podName := "taint-eviction-3"
pod := createPodForTaintsTest(true, KubeletPodDeletionDelaySeconds+2*AdditionalWaitPerDeleteSeconds, podName, podName, ns) pod := createPodForTaintsTest(true, kubeletPodDeletionDelaySeconds+2*additionalWaitPerDeleteSeconds, podName, podName, ns)
observedDeletions := make(chan string, 100) observedDeletions := make(chan string, 100)
stopCh := make(chan struct{}) stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns) createTestController(cs, observedDeletions, stopCh, podName, ns)
@ -253,7 +252,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// Wait a bit // Wait a bit
ginkgo.By("Waiting to see if a Pod won't be deleted") ginkgo.By("Waiting to see if a Pod won't be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select { select {
case <-timeoutChannel: case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted") e2elog.Logf("Pod wasn't evicted")
@ -262,7 +261,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
return return
} }
ginkgo.By("Waiting for Pod to be deleted") ginkgo.By("Waiting for Pod to be deleted")
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
select { select {
case <-timeoutChannel: case <-timeoutChannel:
framework.Failf("Pod wasn't evicted") framework.Failf("Pod wasn't evicted")
@ -279,7 +278,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// 5. See if Pod won't be evicted. // 5. See if Pod won't be evicted.
ginkgo.It("removing taint cancels eviction", func() { ginkgo.It("removing taint cancels eviction", func() {
podName := "taint-eviction-4" podName := "taint-eviction-4"
pod := createPodForTaintsTest(true, 2*AdditionalWaitPerDeleteSeconds, podName, podName, ns) pod := createPodForTaintsTest(true, 2*additionalWaitPerDeleteSeconds, podName, podName, ns)
observedDeletions := make(chan string, 100) observedDeletions := make(chan string, 100)
stopCh := make(chan struct{}) stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns) createTestController(cs, observedDeletions, stopCh, podName, ns)
@ -302,7 +301,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
// Wait a bit // Wait a bit
ginkgo.By("Waiting short time to make sure Pod is queued for deletion") ginkgo.By("Waiting short time to make sure Pod is queued for deletion")
timeoutChannel := time.NewTimer(AdditionalWaitPerDeleteSeconds).C timeoutChannel := time.NewTimer(additionalWaitPerDeleteSeconds).C
select { select {
case <-timeoutChannel: case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted. Proceeding") e2elog.Logf("Pod wasn't evicted. Proceeding")
@ -314,7 +313,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
framework.RemoveTaintOffNode(cs, nodeName, testTaint) framework.RemoveTaintOffNode(cs, nodeName, testTaint)
taintRemoved = true taintRemoved = true
ginkgo.By("Waiting some time to make sure that toleration time passed.") ginkgo.By("Waiting some time to make sure that toleration time passed.")
timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C
select { select {
case <-timeoutChannel: case <-timeoutChannel:
e2elog.Logf("Pod wasn't evicted. Test successful") e2elog.Logf("Pod wasn't evicted. Test successful")
@ -372,7 +371,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
// Wait a bit // Wait a bit
ginkgo.By("Waiting for Pod1 to be deleted") ginkgo.By("Waiting for Pod1 to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C
var evicted int var evicted int
for { for {
select { select {
@ -404,8 +403,8 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
stopCh := make(chan struct{}) stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podGroup, ns) createTestController(cs, observedDeletions, stopCh, podGroup, ns)
pod1 := createPodForTaintsTest(true, AdditionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns) pod1 := createPodForTaintsTest(true, additionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns)
pod2 := createPodForTaintsTest(true, 5*AdditionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns) pod2 := createPodForTaintsTest(true, 5*additionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns)
ginkgo.By("Starting pods...") ginkgo.By("Starting pods...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
@ -431,7 +430,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
// Wait a bit // Wait a bit
ginkgo.By("Waiting for Pod1 and Pod2 to be deleted") ginkgo.By("Waiting for Pod1 and Pod2 to be deleted")
timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C
var evicted int var evicted int
for evicted != 2 { for evicted != 2 {
select { select {

View File

@ -59,7 +59,8 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
}) })
}) })
// Check that the pods comprising a service get spread evenly across available zones // SpreadServiceOrFail check that the pods comprising a service
// get spread evenly across available zones
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) { func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
// First create the service // First create the service
serviceName := "test-service" serviceName := "test-service"
@ -173,7 +174,8 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str
return true, nil return true, nil
} }
// Check that the pods comprising a replication controller get spread evenly across available zones // SpreadRCOrFail Check that the pods comprising a replication
// controller get spread evenly across available zones
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
name := "ubelite-spread-rc-" + string(uuid.NewUUID()) name := "ubelite-spread-rc-" + string(uuid.NewUUID())
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))

View File

@ -180,7 +180,8 @@ type staticPVTestConfig struct {
pod *v1.Pod pod *v1.Pod
} }
// Check that the pods using statically created PVs get scheduled to the same zone that the PV is in. // PodsUseStaticPVsOrFail Check that the pods using statically
// created PVs get scheduled to the same zone that the PV is in.
func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) { func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) {
var err error var err error
c := f.ClientSet c := f.ClientSet