mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
Remove deprecated scalability tests
This commit is contained in:
parent
2b4dc315a3
commit
59533f0cd1
@ -89,7 +89,6 @@ test/e2e/lifecycle/bootstrap
|
||||
test/e2e/manifest
|
||||
test/e2e/network
|
||||
test/e2e/node
|
||||
test/e2e/scalability
|
||||
test/e2e/storage
|
||||
test/e2e/storage/drivers
|
||||
test/e2e/storage/testsuites
|
||||
|
@ -30,7 +30,6 @@ go_test(
|
||||
"//test/e2e/lifecycle/bootstrap:go_default_library",
|
||||
"//test/e2e/network:go_default_library",
|
||||
"//test/e2e/node:go_default_library",
|
||||
"//test/e2e/scalability:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/e2e/servicecatalog:go_default_library",
|
||||
"//test/e2e/storage:go_default_library",
|
||||
@ -110,7 +109,6 @@ filegroup(
|
||||
"//test/e2e/network:all-srcs",
|
||||
"//test/e2e/node:all-srcs",
|
||||
"//test/e2e/perftype:all-srcs",
|
||||
"//test/e2e/scalability:all-srcs",
|
||||
"//test/e2e/scheduling:all-srcs",
|
||||
"//test/e2e/servicecatalog:all-srcs",
|
||||
"//test/e2e/storage:all-srcs",
|
||||
|
@ -50,7 +50,6 @@ import (
|
||||
_ "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap"
|
||||
_ "k8s.io/kubernetes/test/e2e/network"
|
||||
_ "k8s.io/kubernetes/test/e2e/node"
|
||||
_ "k8s.io/kubernetes/test/e2e/scalability"
|
||||
_ "k8s.io/kubernetes/test/e2e/scheduling"
|
||||
_ "k8s.io/kubernetes/test/e2e/servicecatalog"
|
||||
_ "k8s.io/kubernetes/test/e2e/storage"
|
||||
|
@ -1,64 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"common.go",
|
||||
"density.go",
|
||||
"framework.go",
|
||||
"load.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/scalability",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/transport:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/node:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/timer:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -1,11 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- gmarek
|
||||
- shyamjvs
|
||||
- wojtek-t
|
||||
reviewers:
|
||||
- gmarek
|
||||
- shyamjvs
|
||||
- timothysc
|
||||
- wojtek-t
|
@ -1,24 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scalability
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
// UnreadyNodeToleration denotes time the node can be unreachable/not ready.
|
||||
UnreadyNodeToleration = 15 * time.Minute
|
||||
)
|
File diff suppressed because it is too large
Load Diff
@ -1,32 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// DEPRECATED.
|
||||
// We already migrated all periodic and presubmit tests to ClusterLoader2.
|
||||
// We are still keeping this directory for optional functionality, but once
|
||||
// this is supported in ClusterLoader2 tests, this test will be removed
|
||||
// (hopefully in 1.16 release).
|
||||
// Please don't add new functionality to this directory and instead see:
|
||||
// https://github.com/kubernetes/perf-tests/tree/master/clusterloader2
|
||||
|
||||
package scalability
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
// SIGDescribe is the entry point for the sig-scalability e2e framework
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-scalability] "+text, body)
|
||||
}
|
@ -1,767 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// DEPRECATED.
|
||||
// We already migrated all periodic and presubmit tests to ClusterLoader2.
|
||||
// We are still keeping this file for optional functionality, but once
|
||||
// this is supported in ClusterLoader2 tests, this test will be removed
|
||||
// (hopefully in 1.16 release).
|
||||
// Please don't add new functionality to this file and instead see:
|
||||
// https://github.com/kubernetes/perf-tests/tree/master/clusterloader2
|
||||
|
||||
package scalability
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
smallGroupSize = 5
|
||||
mediumGroupSize = 30
|
||||
bigGroupSize = 250
|
||||
smallGroupName = "load-small"
|
||||
mediumGroupName = "load-medium"
|
||||
bigGroupName = "load-big"
|
||||
// We start RCs/Services/pods/... in different namespace in this test.
|
||||
// nodeCountPerNamespace determines how many namespaces we will be using
|
||||
// depending on the number of nodes in the underlying cluster.
|
||||
nodeCountPerNamespace = 100
|
||||
// How many threads will be used to create/delete services during this test.
|
||||
serviceOperationsParallelism = 1
|
||||
svcLabelKey = "svc-label"
|
||||
)
|
||||
|
||||
var randomKind = schema.GroupKind{Kind: "Random"}
|
||||
|
||||
var knownKinds = []schema.GroupKind{
|
||||
api.Kind("ReplicationController"),
|
||||
extensions.Kind("Deployment"),
|
||||
// TODO: uncomment when Jobs are fixed: #38497
|
||||
//batch.Kind("Job"),
|
||||
extensions.Kind("ReplicaSet"),
|
||||
}
|
||||
|
||||
// This test suite can take a long time to run, so by default it is added to
|
||||
// the ginkgo.skip list (see driver.go).
|
||||
// To run this suite you must explicitly ask for it by setting the
|
||||
// -t/--test flag or ginkgo.focus flag.
|
||||
var _ = SIGDescribe("Load capacity", func() {
|
||||
var clientset clientset.Interface
|
||||
var nodeCount int
|
||||
var ns string
|
||||
var configs []testutils.RunObjectConfig
|
||||
var secretConfigs []*testutils.SecretConfig
|
||||
var configMapConfigs []*testutils.ConfigMapConfig
|
||||
|
||||
testCaseBaseName := "load"
|
||||
var testPhaseDurations *timer.TestPhaseTimer
|
||||
var profileGathererStopCh chan struct{}
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessSSHKeyPresent()
|
||||
})
|
||||
|
||||
// Gathers metrics before teardown
|
||||
// TODO add flag that allows to skip cleanup on failure
|
||||
ginkgo.AfterEach(func() {
|
||||
// Stop apiserver CPU profile gatherer and gather memory allocations profile.
|
||||
close(profileGathererStopCh)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
framework.GatherMemoryProfile("kube-apiserver", "load", &wg)
|
||||
wg.Wait()
|
||||
|
||||
// Verify latency metrics
|
||||
highLatencyRequests, metrics, err := e2emetrics.HighLatencyRequests(clientset, nodeCount)
|
||||
framework.ExpectNoError(err)
|
||||
if err == nil {
|
||||
summaries := make([]framework.TestDataSummary, 0, 2)
|
||||
summaries = append(summaries, metrics)
|
||||
summaries = append(summaries, testPhaseDurations)
|
||||
framework.PrintSummaries(summaries, testCaseBaseName)
|
||||
gomega.Expect(highLatencyRequests).NotTo(gomega.BeNumerically(">", 0), "There should be no high-latency requests")
|
||||
}
|
||||
})
|
||||
|
||||
// We assume a default throughput of 10 pods/second throughput.
|
||||
// We may want to revisit it in the future.
|
||||
// However, this can be overridden by LOAD_TEST_THROUGHPUT env var.
|
||||
throughput := 10
|
||||
if throughputEnv := os.Getenv("LOAD_TEST_THROUGHPUT"); throughputEnv != "" {
|
||||
if newThroughput, err := strconv.Atoi(throughputEnv); err == nil {
|
||||
throughput = newThroughput
|
||||
}
|
||||
}
|
||||
|
||||
// Explicitly put here, to delete namespace at the end of the test
|
||||
// (after measuring latency metrics, etc.).
|
||||
options := framework.Options{
|
||||
ClientQPS: float32(math.Max(50.0, float64(2*throughput))),
|
||||
ClientBurst: int(math.Max(100.0, float64(4*throughput))),
|
||||
}
|
||||
f := framework.NewFramework(testCaseBaseName, options, nil)
|
||||
f.NamespaceDeletionTimeout = time.Hour
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
testPhaseDurations = timer.NewTestPhaseTimer()
|
||||
clientset = f.ClientSet
|
||||
|
||||
ns = f.Namespace.Name
|
||||
|
||||
_, err := e2enode.GetRandomReadySchedulableNode(clientset)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Terminating a namespace (deleting the remaining objects from it - which
|
||||
// generally means events) can affect the current run. Thus we wait for all
|
||||
// terminating namespace to be finally deleted before starting this test.
|
||||
err = framework.CheckTestingNSDeletedExcept(clientset, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.ExpectNoError(e2emetrics.ResetMetrics(clientset))
|
||||
|
||||
// Start apiserver CPU profile gatherer with frequency based on cluster size.
|
||||
profileGatheringDelay := time.Duration(5+nodeCount/100) * time.Minute
|
||||
profileGathererStopCh = framework.StartCPUProfileGatherer("kube-apiserver", "load", profileGatheringDelay)
|
||||
})
|
||||
|
||||
type Load struct {
|
||||
podsPerNode int
|
||||
image string
|
||||
command []string
|
||||
// What kind of resource we want to create
|
||||
kind schema.GroupKind
|
||||
services bool
|
||||
secretsPerPod int
|
||||
configMapsPerPod int
|
||||
daemonsPerNode int
|
||||
quotas bool
|
||||
}
|
||||
|
||||
serveHostnameCmd := []string{"/agnhost", "serve-hostname"}
|
||||
loadTests := []Load{
|
||||
// The container will consume 1 cpu and 512mb of memory.
|
||||
{podsPerNode: 3, image: "jess/stress", command: []string{"stress", "-c", "1", "-m", "2"}, kind: api.Kind("ReplicationController")},
|
||||
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: api.Kind("ReplicationController")},
|
||||
// Tests for other resource types
|
||||
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: extensions.Kind("Deployment")},
|
||||
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: batch.Kind("Job")},
|
||||
// Test scheduling when daemons are preset
|
||||
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
|
||||
// Test with secrets
|
||||
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
|
||||
// Test with configmaps
|
||||
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: extensions.Kind("Deployment"), configMapsPerPod: 2},
|
||||
// Special test case which randomizes created resources
|
||||
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: randomKind},
|
||||
// Test with quotas
|
||||
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: api.Kind("ReplicationController"), quotas: true},
|
||||
{podsPerNode: 30, image: framework.ServeHostnameImage, command: serveHostnameCmd, kind: randomKind, quotas: true},
|
||||
}
|
||||
|
||||
isCanonical := func(test *Load) bool {
|
||||
return test.podsPerNode == 30 && test.kind == api.Kind("ReplicationController") && test.daemonsPerNode == 0 && test.secretsPerPod == 0 && test.configMapsPerPod == 0 && !test.quotas
|
||||
}
|
||||
|
||||
for _, testArg := range loadTests {
|
||||
feature := "ManualPerformance"
|
||||
if isCanonical(&testArg) {
|
||||
feature = "Performance"
|
||||
}
|
||||
name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v with %v secrets, %v configmaps and %v daemons",
|
||||
feature,
|
||||
testArg.podsPerNode,
|
||||
testArg.kind,
|
||||
testArg.secretsPerPod,
|
||||
testArg.configMapsPerPod,
|
||||
testArg.daemonsPerNode,
|
||||
)
|
||||
if testArg.quotas {
|
||||
name += " with quotas"
|
||||
}
|
||||
itArg := testArg
|
||||
itArg.services = os.Getenv("CREATE_SERVICES") != "false"
|
||||
|
||||
ginkgo.It(name, func() {
|
||||
// Create a number of namespaces.
|
||||
namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
||||
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode), testPhaseDurations.StartPhase(110, "namespace creation"))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount
|
||||
configs, secretConfigs, configMapConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod, itArg.configMapsPerPod)
|
||||
|
||||
if itArg.quotas {
|
||||
framework.ExpectNoError(CreateQuotas(f, namespaces, 2*totalPods, testPhaseDurations.StartPhase(115, "quota creation")))
|
||||
}
|
||||
|
||||
f.AddonResourceConstraints = loadResourceConstraints()
|
||||
|
||||
serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation")
|
||||
defer serviceCreationPhase.End()
|
||||
if itArg.services {
|
||||
framework.Logf("Creating services")
|
||||
services := generateServicesForConfigs(configs)
|
||||
createService := func(i int) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
framework.ExpectNoError(testutils.CreateServiceWithRetries(clientset, services[i].Namespace, services[i]))
|
||||
}
|
||||
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), createService)
|
||||
framework.Logf("%v Services created.", len(services))
|
||||
defer func(services []*v1.Service) {
|
||||
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
|
||||
defer serviceCleanupPhase.End()
|
||||
framework.Logf("Starting to delete services...")
|
||||
deleteService := func(i int) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
framework.ExpectNoError(testutils.DeleteResourceWithRetries(clientset, api.Kind("Service"), services[i].Namespace, services[i].Name, nil))
|
||||
}
|
||||
workqueue.ParallelizeUntil(context.TODO(), serviceOperationsParallelism, len(services), deleteService)
|
||||
framework.Logf("Services deleted")
|
||||
}(services)
|
||||
} else {
|
||||
framework.Logf("Skipping service creation")
|
||||
}
|
||||
serviceCreationPhase.End()
|
||||
// Create all secrets.
|
||||
secretsCreationPhase := testPhaseDurations.StartPhase(130, "secrets creation")
|
||||
defer secretsCreationPhase.End()
|
||||
for i := range secretConfigs {
|
||||
secretConfigs[i].Run()
|
||||
defer secretConfigs[i].Stop()
|
||||
}
|
||||
secretsCreationPhase.End()
|
||||
// Create all configmaps.
|
||||
configMapsCreationPhase := testPhaseDurations.StartPhase(140, "configmaps creation")
|
||||
defer configMapsCreationPhase.End()
|
||||
for i := range configMapConfigs {
|
||||
configMapConfigs[i].Run()
|
||||
defer configMapConfigs[i].Stop()
|
||||
}
|
||||
configMapsCreationPhase.End()
|
||||
// StartDaemon if needed
|
||||
daemonSetCreationPhase := testPhaseDurations.StartPhase(150, "daemonsets creation")
|
||||
defer daemonSetCreationPhase.End()
|
||||
for i := 0; i < itArg.daemonsPerNode; i++ {
|
||||
daemonName := fmt.Sprintf("load-daemon-%v", i)
|
||||
daemonConfig := &testutils.DaemonConfig{
|
||||
Client: f.ClientSet,
|
||||
Name: daemonName,
|
||||
Namespace: f.Namespace.Name,
|
||||
LogFunc: framework.Logf,
|
||||
}
|
||||
daemonConfig.Run()
|
||||
defer func(config *testutils.DaemonConfig) {
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
|
||||
f.ClientSet,
|
||||
extensions.Kind("DaemonSet"),
|
||||
config.Namespace,
|
||||
config.Name,
|
||||
))
|
||||
}(daemonConfig)
|
||||
}
|
||||
daemonSetCreationPhase.End()
|
||||
|
||||
// Simulate lifetime of RC:
|
||||
// * create with initial size
|
||||
// * scale RC to a random size and list all pods
|
||||
// * scale RC to a random size and list all pods
|
||||
// * delete it
|
||||
//
|
||||
// This will generate ~5 creations/deletions per second assuming:
|
||||
// - X small RCs each 5 pods [ 5 * X = totalPods / 2 ]
|
||||
// - Y medium RCs each 30 pods [ 30 * Y = totalPods / 4 ]
|
||||
// - Z big RCs each 250 pods [ 250 * Z = totalPods / 4]
|
||||
|
||||
// We would like to spread creating replication controllers over time
|
||||
// to make it possible to create/schedule them in the meantime.
|
||||
// Currently we assume <throughput> pods/second average throughput.
|
||||
// We may want to revisit it in the future.
|
||||
framework.Logf("Starting to create %v objects...", itArg.kind)
|
||||
creatingTime := time.Duration(totalPods/throughput) * time.Second
|
||||
|
||||
createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation"))
|
||||
ginkgo.By("============================================================================")
|
||||
|
||||
// We would like to spread scaling replication controllers over time
|
||||
// to make it possible to create/schedule & delete them in the meantime.
|
||||
// Currently we assume that <throughput> pods/second average throughput.
|
||||
|
||||
// The expected number of created/deleted pods is totalPods/4 when scaling,
|
||||
// as each RC changes its size from X to a uniform random value in [X/2, 3X/2].
|
||||
scalingTime := time.Duration(totalPods/(4*throughput)) * time.Second
|
||||
framework.Logf("Starting to scale %v objects first time...", itArg.kind)
|
||||
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
|
||||
ginkgo.By("============================================================================")
|
||||
|
||||
// Cleanup all created replication controllers.
|
||||
// Currently we assume <throughput> pods/second average deletion throughput.
|
||||
// We may want to revisit it in the future.
|
||||
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
||||
framework.Logf("Starting to delete %v objects...", itArg.kind)
|
||||
deleteAllResources(configs, deletingTime, testPhaseDurations.StartPhase(500, "load pods deletion"))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
func createClients(numberOfClients int) ([]clientset.Interface, []scaleclient.ScalesGetter, error) {
|
||||
clients := make([]clientset.Interface, numberOfClients)
|
||||
scalesClients := make([]scaleclient.ScalesGetter, numberOfClients)
|
||||
|
||||
for i := 0; i < numberOfClients; i++ {
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
config.QPS = 100
|
||||
config.Burst = 200
|
||||
if framework.TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = framework.TestContext.KubeAPIContentType
|
||||
}
|
||||
|
||||
// For the purpose of this test, we want to force that clients
|
||||
// do not share underlying transport (which is a default behavior
|
||||
// in Kubernetes). Thus, we are explicitly creating transport for
|
||||
// each client here.
|
||||
transportConfig, err := config.TransportConfig()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tlsConfig, err := transport.TLSConfigFor(transportConfig)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
config.Transport = utilnet.SetTransportDefaults(&http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: tlsConfig,
|
||||
MaxIdleConnsPerHost: 100,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
})
|
||||
config.WrapTransport = transportConfig.WrapTransport
|
||||
config.Dial = transportConfig.Dial
|
||||
// Overwrite TLS-related fields from config to avoid collision with
|
||||
// Transport field.
|
||||
config.TLSClientConfig = restclient.TLSClientConfig{}
|
||||
config.AuthProvider = nil
|
||||
config.ExecProvider = nil
|
||||
|
||||
c, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
clients[i] = c
|
||||
|
||||
// create scale client, if GroupVersion or NegotiatedSerializer are not set
|
||||
// assign default values - these fields are mandatory (required by RESTClientFor).
|
||||
if config.GroupVersion == nil {
|
||||
config.GroupVersion = &schema.GroupVersion{}
|
||||
}
|
||||
if config.NegotiatedSerializer == nil {
|
||||
config.NegotiatedSerializer = scheme.Codecs
|
||||
}
|
||||
restClient, err := restclient.RESTClientFor(config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
||||
restMapper.Reset()
|
||||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
scalesClients[i] = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
}
|
||||
return clients, scalesClients, nil
|
||||
}
|
||||
|
||||
func computePodCounts(total int) (int, int, int) {
|
||||
// Small RCs owns ~0.5 of total number of pods, medium and big RCs ~0.25 each.
|
||||
// For example for 3000 pods (100 nodes, 30 pods per node) there are:
|
||||
// - 300 small RCs each 5 pods
|
||||
// - 25 medium RCs each 30 pods
|
||||
// - 3 big RCs each 250 pods
|
||||
bigGroupCount := total / 4 / bigGroupSize
|
||||
total -= bigGroupCount * bigGroupSize
|
||||
mediumGroupCount := total / 3 / mediumGroupSize
|
||||
total -= mediumGroupCount * mediumGroupSize
|
||||
smallGroupCount := total / smallGroupSize
|
||||
return smallGroupCount, mediumGroupCount, bigGroupCount
|
||||
}
|
||||
|
||||
func loadResourceConstraints() map[string]framework.ResourceConstraint {
|
||||
constraints := make(map[string]framework.ResourceConstraint)
|
||||
constraints["coredns"] = framework.ResourceConstraint{
|
||||
CPUConstraint: framework.NoCPUConstraint,
|
||||
MemoryConstraint: 170 * (1024 * 1024),
|
||||
}
|
||||
constraints["kubedns"] = framework.ResourceConstraint{
|
||||
CPUConstraint: framework.NoCPUConstraint,
|
||||
MemoryConstraint: 170 * (1024 * 1024),
|
||||
}
|
||||
return constraints
|
||||
}
|
||||
|
||||
func generateConfigs(
|
||||
totalPods int,
|
||||
image string,
|
||||
command []string,
|
||||
nss []*v1.Namespace,
|
||||
kind schema.GroupKind,
|
||||
secretsPerPod int,
|
||||
configMapsPerPod int,
|
||||
) ([]testutils.RunObjectConfig, []*testutils.SecretConfig, []*testutils.ConfigMapConfig) {
|
||||
configs := make([]testutils.RunObjectConfig, 0)
|
||||
secretConfigs := make([]*testutils.SecretConfig, 0)
|
||||
configMapConfigs := make([]*testutils.ConfigMapConfig, 0)
|
||||
|
||||
smallGroupCount, mediumGroupCount, bigGroupCount := computePodCounts(totalPods)
|
||||
newConfigs, newSecretConfigs, newConfigMapConfigs := GenerateConfigsForGroup(nss, smallGroupName, smallGroupSize, smallGroupCount, image, command, kind, secretsPerPod, configMapsPerPod)
|
||||
configs = append(configs, newConfigs...)
|
||||
secretConfigs = append(secretConfigs, newSecretConfigs...)
|
||||
configMapConfigs = append(configMapConfigs, newConfigMapConfigs...)
|
||||
newConfigs, newSecretConfigs, newConfigMapConfigs = GenerateConfigsForGroup(nss, mediumGroupName, mediumGroupSize, mediumGroupCount, image, command, kind, secretsPerPod, configMapsPerPod)
|
||||
configs = append(configs, newConfigs...)
|
||||
secretConfigs = append(secretConfigs, newSecretConfigs...)
|
||||
configMapConfigs = append(configMapConfigs, newConfigMapConfigs...)
|
||||
newConfigs, newSecretConfigs, newConfigMapConfigs = GenerateConfigsForGroup(nss, bigGroupName, bigGroupSize, bigGroupCount, image, command, kind, secretsPerPod, configMapsPerPod)
|
||||
configs = append(configs, newConfigs...)
|
||||
secretConfigs = append(secretConfigs, newSecretConfigs...)
|
||||
configMapConfigs = append(configMapConfigs, newConfigMapConfigs...)
|
||||
|
||||
// Create a number of clients to better simulate real usecase
|
||||
// where not everyone is using exactly the same client.
|
||||
rcsPerClient := 20
|
||||
clients, scalesClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for i := 0; i < len(configs); i++ {
|
||||
configs[i].SetClient(clients[i%len(clients)])
|
||||
configs[i].SetScalesClient(scalesClients[i%len(clients)])
|
||||
}
|
||||
for i := 0; i < len(secretConfigs); i++ {
|
||||
secretConfigs[i].Client = clients[i%len(clients)]
|
||||
}
|
||||
for i := 0; i < len(configMapConfigs); i++ {
|
||||
configMapConfigs[i].Client = clients[i%len(clients)]
|
||||
}
|
||||
|
||||
return configs, secretConfigs, configMapConfigs
|
||||
}
|
||||
|
||||
// GenerateConfigsForGroup generates the configuration needed for a group
|
||||
func GenerateConfigsForGroup(
|
||||
nss []*v1.Namespace,
|
||||
groupName string,
|
||||
size, count int,
|
||||
image string,
|
||||
command []string,
|
||||
kind schema.GroupKind,
|
||||
secretsPerPod int,
|
||||
configMapsPerPod int,
|
||||
) ([]testutils.RunObjectConfig, []*testutils.SecretConfig, []*testutils.ConfigMapConfig) {
|
||||
configs := make([]testutils.RunObjectConfig, 0, count)
|
||||
secretConfigs := make([]*testutils.SecretConfig, 0, count*secretsPerPod)
|
||||
configMapConfigs := make([]*testutils.ConfigMapConfig, 0, count*configMapsPerPod)
|
||||
savedKind := kind
|
||||
for i := 1; i <= count; i++ {
|
||||
kind = savedKind
|
||||
namespace := nss[i%len(nss)].Name
|
||||
secretNames := make([]string, 0, secretsPerPod)
|
||||
configMapNames := make([]string, 0, configMapsPerPod)
|
||||
|
||||
for j := 0; j < secretsPerPod; j++ {
|
||||
secretName := fmt.Sprintf("%v-%v-secret-%v", groupName, i, j)
|
||||
secretConfigs = append(secretConfigs, &testutils.SecretConfig{
|
||||
Content: map[string]string{"foo": "bar"},
|
||||
Client: nil, // this will be overwritten later
|
||||
Name: secretName,
|
||||
Namespace: namespace,
|
||||
LogFunc: framework.Logf,
|
||||
})
|
||||
secretNames = append(secretNames, secretName)
|
||||
}
|
||||
|
||||
for j := 0; j < configMapsPerPod; j++ {
|
||||
configMapName := fmt.Sprintf("%v-%v-configmap-%v", groupName, i, j)
|
||||
configMapConfigs = append(configMapConfigs, &testutils.ConfigMapConfig{
|
||||
Content: map[string]string{"foo": "bar"},
|
||||
Client: nil, // this will be overwritten later
|
||||
Name: configMapName,
|
||||
Namespace: namespace,
|
||||
LogFunc: framework.Logf,
|
||||
})
|
||||
configMapNames = append(configMapNames, configMapName)
|
||||
}
|
||||
|
||||
baseConfig := &testutils.RCConfig{
|
||||
Client: nil, // this will be overwritten later
|
||||
Name: groupName + "-" + strconv.Itoa(i),
|
||||
Namespace: namespace,
|
||||
Timeout: UnreadyNodeToleration,
|
||||
Image: image,
|
||||
Command: command,
|
||||
Replicas: size,
|
||||
CpuRequest: 10, // 0.01 core
|
||||
MemRequest: 26214400, // 25MB
|
||||
SecretNames: secretNames,
|
||||
ConfigMapNames: configMapNames,
|
||||
// Define a label to group every 2 RCs into one service.
|
||||
Labels: map[string]string{svcLabelKey: groupName + "-" + strconv.Itoa((i+1)/2)},
|
||||
Tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "node.kubernetes.io/not-ready",
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: func(i int64) *int64 { return &i }(int64(UnreadyNodeToleration / time.Second)),
|
||||
}, {
|
||||
Key: "node.kubernetes.io/unreachable",
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TolerationSeconds: func(i int64) *int64 { return &i }(int64(UnreadyNodeToleration / time.Second)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if kind == randomKind {
|
||||
kind = knownKinds[rand.Int()%len(knownKinds)]
|
||||
}
|
||||
|
||||
var config testutils.RunObjectConfig
|
||||
switch kind {
|
||||
case api.Kind("ReplicationController"):
|
||||
config = baseConfig
|
||||
case extensions.Kind("ReplicaSet"):
|
||||
config = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
|
||||
case extensions.Kind("Deployment"):
|
||||
config = &testutils.DeploymentConfig{RCConfig: *baseConfig}
|
||||
case batch.Kind("Job"):
|
||||
config = &testutils.JobConfig{RCConfig: *baseConfig}
|
||||
default:
|
||||
framework.Failf("Unsupported kind for config creation: %v", kind)
|
||||
}
|
||||
configs = append(configs, config)
|
||||
}
|
||||
return configs, secretConfigs, configMapConfigs
|
||||
}
|
||||
|
||||
func generateServicesForConfigs(configs []testutils.RunObjectConfig) []*v1.Service {
|
||||
services := make([]*v1.Service, 0)
|
||||
currentSvcLabel := ""
|
||||
for _, config := range configs {
|
||||
svcLabel, found := config.GetLabelValue(svcLabelKey)
|
||||
if !found || svcLabel == currentSvcLabel {
|
||||
continue
|
||||
}
|
||||
currentSvcLabel = svcLabel
|
||||
serviceName := config.GetName() + "-svc"
|
||||
labels := map[string]string{
|
||||
"name": config.GetName(),
|
||||
svcLabelKey: currentSvcLabel,
|
||||
}
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: config.GetNamespace(),
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: labels,
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(80),
|
||||
}},
|
||||
},
|
||||
}
|
||||
services = append(services, service)
|
||||
}
|
||||
return services
|
||||
}
|
||||
|
||||
func sleepUpTo(d time.Duration) {
|
||||
if d.Nanoseconds() > 0 {
|
||||
time.Sleep(time.Duration(rand.Int63n(d.Nanoseconds())))
|
||||
}
|
||||
}
|
||||
|
||||
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: initialDuration,
|
||||
Factor: 3,
|
||||
Jitter: 0,
|
||||
Steps: 6,
|
||||
}
|
||||
return wait.ExponentialBackoff(backoff, fn)
|
||||
}
|
||||
|
||||
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration, testPhase *timer.Phase) {
|
||||
defer testPhase.End()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(configs))
|
||||
for _, config := range configs {
|
||||
go createResource(&wg, config, creatingTime)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func createResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, creatingTime time.Duration) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
sleepUpTo(creatingTime)
|
||||
framework.ExpectNoError(config.Run(), fmt.Sprintf("creating %v %s", config.GetKind(), config.GetName()))
|
||||
}
|
||||
|
||||
func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration, testPhase *timer.Phase) {
|
||||
defer testPhase.End()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(configs))
|
||||
for _, config := range configs {
|
||||
go scaleResource(&wg, config, scalingTime)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
|
||||
// Scaling happens always based on original size, not the current size.
|
||||
func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scalingTime time.Duration) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
sleepUpTo(scalingTime)
|
||||
newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2)
|
||||
framework.ExpectNoError(framework.ScaleResource(
|
||||
config.GetClient(),
|
||||
config.GetScalesGetter(),
|
||||
config.GetNamespace(),
|
||||
config.GetName(),
|
||||
newSize,
|
||||
true,
|
||||
config.GetKind(),
|
||||
config.GetGroupVersionResource(),
|
||||
),
|
||||
fmt.Sprintf("scaling %v %v", config.GetKind(), config.GetName()))
|
||||
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()}))
|
||||
options := metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: "0",
|
||||
}
|
||||
listResourcePodsFunc := func() (bool, error) {
|
||||
_, err := config.GetClient().CoreV1().Pods(config.GetNamespace()).List(options)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Failed to list pods from %v %v due to: %v", config.GetKind(), config.GetName(), err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to list pods from %v %v with non-retriable error: %v", config.GetKind(), config.GetName(), err)
|
||||
}
|
||||
err := retryWithExponentialBackOff(100*time.Millisecond, listResourcePodsFunc)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration, testPhase *timer.Phase) {
|
||||
defer testPhase.End()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(configs))
|
||||
for _, config := range configs {
|
||||
go deleteResource(&wg, config, deletingTime)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deletingTime time.Duration) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
sleepUpTo(deletingTime)
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
|
||||
config.GetClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
|
||||
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
|
||||
}
|
||||
|
||||
// CreateNamespaces creates a namespace
|
||||
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) {
|
||||
defer testPhase.End()
|
||||
namespaces := []*v1.Namespace{}
|
||||
for i := 1; i <= namespaceCount; i++ {
|
||||
namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)
|
||||
if err != nil {
|
||||
return []*v1.Namespace{}, err
|
||||
}
|
||||
namespaces = append(namespaces, namespace)
|
||||
}
|
||||
return namespaces, nil
|
||||
}
|
||||
|
||||
// CreateQuotas creates quotas
|
||||
func CreateQuotas(f *framework.Framework, namespaces []*v1.Namespace, podCount int, testPhase *timer.Phase) error {
|
||||
defer testPhase.End()
|
||||
quotaTemplate := &v1.ResourceQuota{
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{"pods": *resource.NewQuantity(int64(podCount), resource.DecimalSI)},
|
||||
},
|
||||
}
|
||||
for _, ns := range namespaces {
|
||||
quotaTemplate.Name = ns.Name + "-quota"
|
||||
if err := testutils.CreateResourceQuotaWithRetries(f.ClientSet, ns.Name, quotaTemplate); err != nil {
|
||||
return fmt.Errorf("Error creating quota: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user