Merge pull request #78119 from nodo/golint-test-e2e-scalability

Fix golint failures in test/e2e/scalability
This commit is contained in:
Kubernetes Prow Robot 2019-06-28 05:47:38 -07:00 committed by GitHub
commit 967b23c727
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 19 additions and 10 deletions

View File

@ -562,7 +562,6 @@ staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
test/e2e/common
test/e2e/lifecycle/bootstrap
test/e2e/scalability
test/e2e/storage/vsphere
test/e2e_kubeadm
test/e2e_node

View File

@ -54,22 +54,28 @@ import (
)
const (
// PodStartupLatencyThreshold holds the latency threashold for pod startup
PodStartupLatencyThreshold = 5 * time.Second
// MinSaturationThreshold holds the minimum staturation threashold
MinSaturationThreshold = 2 * time.Minute
// MinPodsPerSecondThroughput holds the minimum pod/sec throughput
MinPodsPerSecondThroughput = 8
// DensityPollInterval holds the desity of polling interval
DensityPollInterval = 10 * time.Second
// MinPodStartupMeasurements holds the minimum number of measurements related to pod-startup
MinPodStartupMeasurements = 500
)
// Maximum container failures this test tolerates before failing.
// MaxContainerFailures holds the maximum container failures this test tolerates before failing.
var MaxContainerFailures = 0
// Maximum no. of missing measurements related to pod-startup that the test tolerates.
// MaxMissingPodStartupMeasurements holds the maximum number of missing measurements related to pod-startup that the test tolerates.
var MaxMissingPodStartupMeasurements = 0
// Number of nodes in the cluster (computed inside BeforeEach).
var nodeCount = 0
// DensityTestConfig holds the configurations for e2e scalability tests
type DensityTestConfig struct {
Configs []testutils.RunObjectConfig
ClientSets []clientset.Interface
@ -402,7 +408,7 @@ var _ = SIGDescribe("Density", func() {
var uuid string
var e2eStartupTime time.Duration
var totalPods int
var nodeCpuCapacity int64
var nodeCPUCapacity int64
var nodeMemCapacity int64
var nodes *v1.NodeList
var scheduleThroughputs []float64
@ -508,7 +514,7 @@ var _ = SIGDescribe("Density", func() {
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
// Compute node capacity, leaving some slack for addon pods.
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue() - 100
nodeCPUCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue() - 100
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value() - 100*1024*1024
// Terminating a namespace (deleting the remaining objects from it - which
@ -691,7 +697,7 @@ var _ = SIGDescribe("Density", func() {
Timeout: timeout,
PodStatusFile: fileHndl,
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
CpuRequest: nodeCpuCapacity / 100,
CpuRequest: nodeCPUCapacity / 100,
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
@ -851,7 +857,7 @@ var _ = SIGDescribe("Density", func() {
// Thanks to it we trigger increasing priority function by scheduling
// a pod to a node, which in turn will result in spreading latency pods
// more evenly between nodes.
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
cpuRequest := *resource.NewMilliQuantity(nodeCPUCapacity/5, resource.DecimalSI)
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
if podsPerNode > 30 {
// This is to make them schedulable on high-density tests

View File

@ -18,6 +18,7 @@ package scalability
import "github.com/onsi/ginkgo"
// SIGDescribe is the entry point for the sig-scalability e2e framework
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-scalability] "+text, body)
}

View File

@ -489,6 +489,7 @@ func generateConfigs(
return configs, secretConfigs, configMapConfigs
}
// GenerateConfigsForGroup generates the configuration needed for a group
func GenerateConfigsForGroup(
nss []*v1.Namespace,
groupName string,
@ -720,6 +721,7 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
}
// CreateNamespaces creates a namespace
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) {
defer testPhase.End()
namespaces := []*v1.Namespace{}
@ -733,6 +735,7 @@ func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix str
return namespaces, nil
}
// CreateQuotas creates quotas
func CreateQuotas(f *framework.Framework, namespaces []*v1.Namespace, podCount int, testPhase *timer.Phase) error {
defer testPhase.End()
quotaTemplate := &v1.ResourceQuota{