mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Add Daemons to Load/Density tests
This commit is contained in:
parent
3d47fcc8ac
commit
bfe2a2b03c
@ -66,6 +66,7 @@ type DensityTestConfig struct {
|
|||||||
// What kind of resource we want to create
|
// What kind of resource we want to create
|
||||||
kind schema.GroupKind
|
kind schema.GroupKind
|
||||||
SecretConfigs []*testutils.SecretConfig
|
SecretConfigs []*testutils.SecretConfig
|
||||||
|
DaemonConfigs []*testutils.DaemonConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
|
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
|
||||||
@ -197,6 +198,11 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||||||
for i := range dtc.SecretConfigs {
|
for i := range dtc.SecretConfigs {
|
||||||
dtc.SecretConfigs[i].Run()
|
dtc.SecretConfigs[i].Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := range dtc.DaemonConfigs {
|
||||||
|
dtc.DaemonConfigs[i].Run()
|
||||||
|
}
|
||||||
|
|
||||||
// Start all replication controllers.
|
// Start all replication controllers.
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
@ -258,7 +264,7 @@ func cleanupDensityTest(dtc DensityTestConfig) {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
} else {
|
} else {
|
||||||
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
|
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
|
||||||
err := framework.DeleteResourceAndPods(dtc.ClientSet, dtc.InternalClientset, kind, dtc.Configs[i].GetNamespace(), name)
|
err := framework.DeleteResourceAndPods(dtc.ClientSet, dtc.InternalClientset, kind, namespace, name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -267,6 +273,16 @@ func cleanupDensityTest(dtc DensityTestConfig) {
|
|||||||
for i := range dtc.SecretConfigs {
|
for i := range dtc.SecretConfigs {
|
||||||
dtc.SecretConfigs[i].Stop()
|
dtc.SecretConfigs[i].Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := range dtc.DaemonConfigs {
|
||||||
|
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
||||||
|
dtc.ClientSet,
|
||||||
|
dtc.InternalClientset,
|
||||||
|
extensions.Kind("DaemonSet"),
|
||||||
|
dtc.DaemonConfigs[i].Namespace,
|
||||||
|
dtc.DaemonConfigs[i].Name,
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test suite can take a long time to run, and can affect or be affected by other tests.
|
// This test suite can take a long time to run, and can affect or be affected by other tests.
|
||||||
@ -367,8 +383,9 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
// Controls how often the apiserver is polled for pods
|
// Controls how often the apiserver is polled for pods
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
// What kind of resource we should be creating. Default: ReplicationController
|
// What kind of resource we should be creating. Default: ReplicationController
|
||||||
kind schema.GroupKind
|
kind schema.GroupKind
|
||||||
secretsPerPod int
|
secretsPerPod int
|
||||||
|
daemonsPerNode int
|
||||||
}
|
}
|
||||||
|
|
||||||
densityTests := []Density{
|
densityTests := []Density{
|
||||||
@ -405,7 +422,7 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
if podsPerNode == 30 {
|
if podsPerNode == 30 {
|
||||||
f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
|
f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
|
||||||
}
|
}
|
||||||
totalPods = podsPerNode * nodeCount
|
totalPods = (podsPerNode - itArg.daemonsPerNode) * nodeCount
|
||||||
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
defer fileHndl.Close()
|
defer fileHndl.Close()
|
||||||
@ -477,6 +494,16 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
kind: itArg.kind,
|
kind: itArg.kind,
|
||||||
SecretConfigs: secretConfigs,
|
SecretConfigs: secretConfigs,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := 0; i < itArg.daemonsPerNode; i++ {
|
||||||
|
dConfig.DaemonConfigs = append(dConfig.DaemonConfigs,
|
||||||
|
&testutils.DaemonConfig{
|
||||||
|
Client: f.ClientSet,
|
||||||
|
Name: fmt.Sprintf("density-daemon-%v", i),
|
||||||
|
Namespace: f.Namespace.Name,
|
||||||
|
LogFunc: framework.Logf,
|
||||||
|
})
|
||||||
|
}
|
||||||
e2eStartupTime = runDensityTest(dConfig)
|
e2eStartupTime = runDensityTest(dConfig)
|
||||||
if itArg.runLatencyTest {
|
if itArg.runLatencyTest {
|
||||||
By("Scheduling additional Pods to measure startup latencies")
|
By("Scheduling additional Pods to measure startup latencies")
|
||||||
|
@ -2779,6 +2779,8 @@ func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, n
|
|||||||
return c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
return c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||||
case extensionsinternal.Kind("Deployment"):
|
case extensionsinternal.Kind("Deployment"):
|
||||||
return c.Extensions().Deployments(ns).Get(name, metav1.GetOptions{})
|
return c.Extensions().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||||
|
case extensionsinternal.Kind("DaemonSet"):
|
||||||
|
return c.Extensions().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
|
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
|
||||||
}
|
}
|
||||||
@ -2805,6 +2807,8 @@ func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
|
|||||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||||
case *extensions.Deployment:
|
case *extensions.Deployment:
|
||||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||||
|
case *extensions.DaemonSet:
|
||||||
|
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
|
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
|
||||||
}
|
}
|
||||||
@ -2840,6 +2844,8 @@ func getReaperForKind(internalClientset internalclientset.Interface, kind schema
|
|||||||
return kubectl.ReaperFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
|
return kubectl.ReaperFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
|
||||||
case extensionsinternal.Kind("Deployment"):
|
case extensionsinternal.Kind("Deployment"):
|
||||||
return kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClientset)
|
return kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClientset)
|
||||||
|
case extensionsinternal.Kind("DaemonSet"):
|
||||||
|
return kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), internalClientset)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Unsupported kind: %v", kind)
|
return nil, fmt.Errorf("Unsupported kind: %v", kind)
|
||||||
}
|
}
|
||||||
|
@ -122,9 +122,10 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
image string
|
image string
|
||||||
command []string
|
command []string
|
||||||
// What kind of resource we want to create
|
// What kind of resource we want to create
|
||||||
kind schema.GroupKind
|
kind schema.GroupKind
|
||||||
services bool
|
services bool
|
||||||
secretsPerPod int
|
secretsPerPod int
|
||||||
|
daemonsPerNode int
|
||||||
}
|
}
|
||||||
|
|
||||||
loadTests := []Load{
|
loadTests := []Load{
|
||||||
@ -149,9 +150,8 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode))
|
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
totalPods := itArg.podsPerNode * nodeCount
|
totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount
|
||||||
configs, secretConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod)
|
configs, secretConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod)
|
||||||
var services []*v1.Service
|
|
||||||
if itArg.services {
|
if itArg.services {
|
||||||
framework.Logf("Creating services")
|
framework.Logf("Creating services")
|
||||||
services := generateServicesForConfigs(configs)
|
services := generateServicesForConfigs(configs)
|
||||||
@ -160,12 +160,41 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
framework.Logf("%v Services created.", len(services))
|
framework.Logf("%v Services created.", len(services))
|
||||||
|
defer func(services []*v1.Service) {
|
||||||
|
framework.Logf("Starting to delete services...")
|
||||||
|
for _, service := range services {
|
||||||
|
err := clientset.Core().Services(ns).Delete(service.Name, nil)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
}
|
||||||
|
framework.Logf("Services deleted")
|
||||||
|
}(services)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("Skipping service creation")
|
framework.Logf("Skipping service creation")
|
||||||
}
|
}
|
||||||
// Create all secrets
|
// Create all secrets
|
||||||
for i := range secretConfigs {
|
for i := range secretConfigs {
|
||||||
secretConfigs[i].Run()
|
secretConfigs[i].Run()
|
||||||
|
defer secretConfigs[i].Stop()
|
||||||
|
}
|
||||||
|
// StartDeamon if needed
|
||||||
|
for i := 0; i < itArg.daemonsPerNode; i++ {
|
||||||
|
daemonName := fmt.Sprintf("load-daemon-%v", i)
|
||||||
|
daemonConfig := &testutils.DaemonConfig{
|
||||||
|
Client: f.ClientSet,
|
||||||
|
Name: daemonName,
|
||||||
|
Namespace: f.Namespace.Name,
|
||||||
|
LogFunc: framework.Logf,
|
||||||
|
}
|
||||||
|
daemonConfig.Run()
|
||||||
|
defer func(config *testutils.DaemonConfig) {
|
||||||
|
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
||||||
|
f.ClientSet,
|
||||||
|
f.InternalClientset,
|
||||||
|
extensions.Kind("DaemonSet"),
|
||||||
|
config.Namespace,
|
||||||
|
config.Name,
|
||||||
|
))
|
||||||
|
}(daemonConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simulate lifetime of RC:
|
// Simulate lifetime of RC:
|
||||||
@ -207,18 +236,6 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
||||||
framework.Logf("Starting to delete ReplicationControllers...")
|
framework.Logf("Starting to delete ReplicationControllers...")
|
||||||
deleteAllResources(configs, deletingTime)
|
deleteAllResources(configs, deletingTime)
|
||||||
// Delete all secrets
|
|
||||||
for i := range secretConfigs {
|
|
||||||
secretConfigs[i].Stop()
|
|
||||||
}
|
|
||||||
if itArg.services {
|
|
||||||
framework.Logf("Starting to delete services...")
|
|
||||||
for _, service := range services {
|
|
||||||
err := clientset.Core().Services(ns).Delete(service.Name, nil)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
}
|
|
||||||
framework.Logf("Services deleted")
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -37,6 +37,7 @@ go_library(
|
|||||||
"//pkg/runtime/schema:go_default_library",
|
"//pkg/runtime/schema:go_default_library",
|
||||||
"//pkg/util/sets:go_default_library",
|
"//pkg/util/sets:go_default_library",
|
||||||
"//pkg/util/uuid:go_default_library",
|
"//pkg/util/uuid:go_default_library",
|
||||||
|
"//pkg/util/wait:go_default_library",
|
||||||
"//pkg/util/workqueue:go_default_library",
|
"//pkg/util/workqueue:go_default_library",
|
||||||
"//pkg/watch:go_default_library",
|
"//pkg/watch:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
|
@ -37,6 +37,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/runtime/schema"
|
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/pkg/util/uuid"
|
"k8s.io/kubernetes/pkg/util/uuid"
|
||||||
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
@ -1000,3 +1001,90 @@ func attachSecrets(template *v1.PodTemplateSpec, secretNames []string) {
|
|||||||
template.Spec.Volumes = volumes
|
template.Spec.Volumes = volumes
|
||||||
template.Spec.Containers[0].VolumeMounts = mounts
|
template.Spec.Containers[0].VolumeMounts = mounts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DaemonConfig struct {
|
||||||
|
Client clientset.Interface
|
||||||
|
Name string
|
||||||
|
Namespace string
|
||||||
|
Image string
|
||||||
|
// If set this function will be used to print log lines instead of glog.
|
||||||
|
LogFunc func(fmt string, args ...interface{})
|
||||||
|
// How long we wait for DaemonSet to become running.
|
||||||
|
Timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *DaemonConfig) Run() error {
|
||||||
|
if config.Image == "" {
|
||||||
|
config.Image = "kubernetes/pause"
|
||||||
|
}
|
||||||
|
nameLabel := map[string]string{
|
||||||
|
"name": config.Name + "-daemon",
|
||||||
|
}
|
||||||
|
daemon := &extensions.DaemonSet{
|
||||||
|
ObjectMeta: v1.ObjectMeta{
|
||||||
|
Name: config.Name,
|
||||||
|
},
|
||||||
|
Spec: extensions.DaemonSetSpec{
|
||||||
|
Template: v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: v1.ObjectMeta{
|
||||||
|
Labels: nameLabel,
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: config.Name,
|
||||||
|
Image: config.Image,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.Client.Extensions().DaemonSets(config.Namespace).Create(daemon)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating DaemonSet %v: %v", config.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nodes *v1.NodeList
|
||||||
|
for i := 0; i < retries; i++ {
|
||||||
|
// Wait for all daemons to be running
|
||||||
|
nodes, err = config.Client.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"})
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
} else if i+1 == retries {
|
||||||
|
return fmt.Errorf("Error listing Nodes while waiting for DaemonSet %v: %v", config.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout := config.Timeout
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = 5 * time.Minute
|
||||||
|
}
|
||||||
|
|
||||||
|
podStore := NewPodStore(config.Client, config.Namespace, labels.SelectorFromSet(nameLabel), fields.Everything())
|
||||||
|
defer podStore.Stop()
|
||||||
|
|
||||||
|
err = wait.Poll(time.Second, timeout, func() (bool, error) {
|
||||||
|
pods := podStore.List()
|
||||||
|
|
||||||
|
nodeHasDaemon := sets.NewString()
|
||||||
|
for _, pod := range pods {
|
||||||
|
podReady, _ := PodRunningReady(pod)
|
||||||
|
if pod.Spec.NodeName != "" && podReady {
|
||||||
|
nodeHasDaemon.Insert(pod.Spec.NodeName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
running := len(nodeHasDaemon)
|
||||||
|
config.LogFunc("Found %v/%v Daemons %v running", running, config.Name, len(nodes.Items))
|
||||||
|
return running == len(nodes.Items), nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
config.LogFunc("Timed out while waiting for DaemonsSet %v/%v to be running.", config.Namespace, config.Name)
|
||||||
|
} else {
|
||||||
|
config.LogFunc("Created Daemon %v/%v", config.Namespace, config.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user