mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #78119 from nodo/golint-test-e2e-scalability
Fix golint failures in test/e2e/scalability
This commit is contained in:
commit
967b23c727
@ -562,7 +562,6 @@ staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
|
|||||||
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
|
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
|
||||||
test/e2e/common
|
test/e2e/common
|
||||||
test/e2e/lifecycle/bootstrap
|
test/e2e/lifecycle/bootstrap
|
||||||
test/e2e/scalability
|
|
||||||
test/e2e/storage/vsphere
|
test/e2e/storage/vsphere
|
||||||
test/e2e_kubeadm
|
test/e2e_kubeadm
|
||||||
test/e2e_node
|
test/e2e_node
|
||||||
|
@ -54,22 +54,28 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// PodStartupLatencyThreshold holds the latency threashold for pod startup
|
||||||
PodStartupLatencyThreshold = 5 * time.Second
|
PodStartupLatencyThreshold = 5 * time.Second
|
||||||
MinSaturationThreshold = 2 * time.Minute
|
// MinSaturationThreshold holds the minimum staturation threashold
|
||||||
|
MinSaturationThreshold = 2 * time.Minute
|
||||||
|
// MinPodsPerSecondThroughput holds the minimum pod/sec throughput
|
||||||
MinPodsPerSecondThroughput = 8
|
MinPodsPerSecondThroughput = 8
|
||||||
DensityPollInterval = 10 * time.Second
|
// DensityPollInterval holds the desity of polling interval
|
||||||
MinPodStartupMeasurements = 500
|
DensityPollInterval = 10 * time.Second
|
||||||
|
// MinPodStartupMeasurements holds the minimum number of measurements related to pod-startup
|
||||||
|
MinPodStartupMeasurements = 500
|
||||||
)
|
)
|
||||||
|
|
||||||
// Maximum container failures this test tolerates before failing.
|
// MaxContainerFailures holds the maximum container failures this test tolerates before failing.
|
||||||
var MaxContainerFailures = 0
|
var MaxContainerFailures = 0
|
||||||
|
|
||||||
// Maximum no. of missing measurements related to pod-startup that the test tolerates.
|
// MaxMissingPodStartupMeasurements holds the maximum number of missing measurements related to pod-startup that the test tolerates.
|
||||||
var MaxMissingPodStartupMeasurements = 0
|
var MaxMissingPodStartupMeasurements = 0
|
||||||
|
|
||||||
// Number of nodes in the cluster (computed inside BeforeEach).
|
// Number of nodes in the cluster (computed inside BeforeEach).
|
||||||
var nodeCount = 0
|
var nodeCount = 0
|
||||||
|
|
||||||
|
// DensityTestConfig holds the configurations for e2e scalability tests
|
||||||
type DensityTestConfig struct {
|
type DensityTestConfig struct {
|
||||||
Configs []testutils.RunObjectConfig
|
Configs []testutils.RunObjectConfig
|
||||||
ClientSets []clientset.Interface
|
ClientSets []clientset.Interface
|
||||||
@ -402,7 +408,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
var uuid string
|
var uuid string
|
||||||
var e2eStartupTime time.Duration
|
var e2eStartupTime time.Duration
|
||||||
var totalPods int
|
var totalPods int
|
||||||
var nodeCpuCapacity int64
|
var nodeCPUCapacity int64
|
||||||
var nodeMemCapacity int64
|
var nodeMemCapacity int64
|
||||||
var nodes *v1.NodeList
|
var nodes *v1.NodeList
|
||||||
var scheduleThroughputs []float64
|
var scheduleThroughputs []float64
|
||||||
@ -508,7 +514,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
||||||
|
|
||||||
// Compute node capacity, leaving some slack for addon pods.
|
// Compute node capacity, leaving some slack for addon pods.
|
||||||
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue() - 100
|
nodeCPUCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue() - 100
|
||||||
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value() - 100*1024*1024
|
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value() - 100*1024*1024
|
||||||
|
|
||||||
// Terminating a namespace (deleting the remaining objects from it - which
|
// Terminating a namespace (deleting the remaining objects from it - which
|
||||||
@ -691,7 +697,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
Timeout: timeout,
|
Timeout: timeout,
|
||||||
PodStatusFile: fileHndl,
|
PodStatusFile: fileHndl,
|
||||||
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
|
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
|
||||||
CpuRequest: nodeCpuCapacity / 100,
|
CpuRequest: nodeCPUCapacity / 100,
|
||||||
MemRequest: nodeMemCapacity / 100,
|
MemRequest: nodeMemCapacity / 100,
|
||||||
MaxContainerFailures: &MaxContainerFailures,
|
MaxContainerFailures: &MaxContainerFailures,
|
||||||
Silent: true,
|
Silent: true,
|
||||||
@ -851,7 +857,7 @@ var _ = SIGDescribe("Density", func() {
|
|||||||
// Thanks to it we trigger increasing priority function by scheduling
|
// Thanks to it we trigger increasing priority function by scheduling
|
||||||
// a pod to a node, which in turn will result in spreading latency pods
|
// a pod to a node, which in turn will result in spreading latency pods
|
||||||
// more evenly between nodes.
|
// more evenly between nodes.
|
||||||
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
|
cpuRequest := *resource.NewMilliQuantity(nodeCPUCapacity/5, resource.DecimalSI)
|
||||||
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
|
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
|
||||||
if podsPerNode > 30 {
|
if podsPerNode > 30 {
|
||||||
// This is to make them schedulable on high-density tests
|
// This is to make them schedulable on high-density tests
|
||||||
|
@ -18,6 +18,7 @@ package scalability
|
|||||||
|
|
||||||
import "github.com/onsi/ginkgo"
|
import "github.com/onsi/ginkgo"
|
||||||
|
|
||||||
|
// SIGDescribe is the entry point for the sig-scalability e2e framework
|
||||||
func SIGDescribe(text string, body func()) bool {
|
func SIGDescribe(text string, body func()) bool {
|
||||||
return ginkgo.Describe("[sig-scalability] "+text, body)
|
return ginkgo.Describe("[sig-scalability] "+text, body)
|
||||||
}
|
}
|
||||||
|
@ -489,6 +489,7 @@ func generateConfigs(
|
|||||||
return configs, secretConfigs, configMapConfigs
|
return configs, secretConfigs, configMapConfigs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GenerateConfigsForGroup generates the configuration needed for a group
|
||||||
func GenerateConfigsForGroup(
|
func GenerateConfigsForGroup(
|
||||||
nss []*v1.Namespace,
|
nss []*v1.Namespace,
|
||||||
groupName string,
|
groupName string,
|
||||||
@ -720,6 +721,7 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti
|
|||||||
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
|
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateNamespaces creates a namespace
|
||||||
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) {
|
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) {
|
||||||
defer testPhase.End()
|
defer testPhase.End()
|
||||||
namespaces := []*v1.Namespace{}
|
namespaces := []*v1.Namespace{}
|
||||||
@ -733,6 +735,7 @@ func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix str
|
|||||||
return namespaces, nil
|
return namespaces, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateQuotas creates quotas
|
||||||
func CreateQuotas(f *framework.Framework, namespaces []*v1.Namespace, podCount int, testPhase *timer.Phase) error {
|
func CreateQuotas(f *framework.Framework, namespaces []*v1.Namespace, podCount int, testPhase *timer.Phase) error {
|
||||||
defer testPhase.End()
|
defer testPhase.End()
|
||||||
quotaTemplate := &v1.ResourceQuota{
|
quotaTemplate := &v1.ResourceQuota{
|
||||||
|
Loading…
Reference in New Issue
Block a user