mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Merge pull request #48207 from gmarek/density_clients
Automatic merge from submit-queue (batch tested with PRs 48004, 48205, 48130, 48207) Use multiple clients in the density test Fix #47954
This commit is contained in:
commit
5731e0d6c9
@ -59,11 +59,11 @@ const (
|
|||||||
var MaxContainerFailures = 0
|
var MaxContainerFailures = 0
|
||||||
|
|
||||||
type DensityTestConfig struct {
|
type DensityTestConfig struct {
|
||||||
Configs []testutils.RunObjectConfig
|
Configs []testutils.RunObjectConfig
|
||||||
ClientSet clientset.Interface
|
ClientSets []clientset.Interface
|
||||||
InternalClientset internalclientset.Interface
|
InternalClientsets []internalclientset.Interface
|
||||||
PollInterval time.Duration
|
PollInterval time.Duration
|
||||||
PodCount int
|
PodCount int
|
||||||
// What kind of resource we want to create
|
// What kind of resource we want to create
|
||||||
kind schema.GroupKind
|
kind schema.GroupKind
|
||||||
SecretConfigs []*testutils.SecretConfig
|
SecretConfigs []*testutils.SecretConfig
|
||||||
@ -220,7 +220,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
logStopCh := make(chan struct{})
|
logStopCh := make(chan struct{})
|
||||||
go logPodStartupStatus(dtc.ClientSet, dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
|
go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
startupTime := time.Now().Sub(startTime)
|
startupTime := time.Now().Sub(startTime)
|
||||||
close(logStopCh)
|
close(logStopCh)
|
||||||
@ -229,7 +229,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||||||
|
|
||||||
// Print some data about Pod to Node allocation
|
// Print some data about Pod to Node allocation
|
||||||
By("Printing Pod to Node allocation data")
|
By("Printing Pod to Node allocation data")
|
||||||
podList, err := dtc.ClientSet.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
podList, err := dtc.ClientSets[0].Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
pausePodAllocation := make(map[string]int)
|
pausePodAllocation := make(map[string]int)
|
||||||
systemPodAllocation := make(map[string][]string)
|
systemPodAllocation := make(map[string][]string)
|
||||||
@ -254,6 +254,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||||||
func cleanupDensityTest(dtc DensityTestConfig) {
|
func cleanupDensityTest(dtc DensityTestConfig) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
By("Deleting created Collections")
|
By("Deleting created Collections")
|
||||||
|
numberOfClients := len(dtc.ClientSets)
|
||||||
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
|
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
|
||||||
for i := range dtc.Configs {
|
for i := range dtc.Configs {
|
||||||
name := dtc.Configs[i].GetName()
|
name := dtc.Configs[i].GetName()
|
||||||
@ -261,11 +262,11 @@ func cleanupDensityTest(dtc DensityTestConfig) {
|
|||||||
kind := dtc.Configs[i].GetKind()
|
kind := dtc.Configs[i].GetKind()
|
||||||
if framework.TestContext.GarbageCollectorEnabled && kindSupportsGarbageCollector(kind) {
|
if framework.TestContext.GarbageCollectorEnabled && kindSupportsGarbageCollector(kind) {
|
||||||
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
|
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
|
||||||
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSet, kind, namespace, name)
|
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
} else {
|
} else {
|
||||||
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
|
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
|
||||||
err := framework.DeleteResourceAndPods(dtc.ClientSet, dtc.InternalClientset, kind, namespace, name)
|
err := framework.DeleteResourceAndPods(dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], kind, namespace, name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -279,8 +280,8 @@ func cleanupDensityTest(dtc DensityTestConfig) {
|
|||||||
}
|
}
|
||||||
for i := range dtc.DaemonConfigs {
|
for i := range dtc.DaemonConfigs {
|
||||||
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
||||||
dtc.ClientSet,
|
dtc.ClientSets[i%numberOfClients],
|
||||||
dtc.InternalClientset,
|
dtc.InternalClientsets[i%numberOfClients],
|
||||||
extensions.Kind("DaemonSet"),
|
extensions.Kind("DaemonSet"),
|
||||||
dtc.DaemonConfigs[i].Namespace,
|
dtc.DaemonConfigs[i].Namespace,
|
||||||
dtc.DaemonConfigs[i].Name,
|
dtc.DaemonConfigs[i].Name,
|
||||||
@ -535,15 +536,18 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Single client is running out of http2 connections in delete phase, hence we need more.
|
||||||
|
clients, internalClients, err = createClients(2)
|
||||||
|
|
||||||
dConfig := DensityTestConfig{
|
dConfig := DensityTestConfig{
|
||||||
ClientSet: f.ClientSet,
|
ClientSets: clients,
|
||||||
InternalClientset: f.InternalClientset,
|
InternalClientsets: internalClients,
|
||||||
Configs: configs,
|
Configs: configs,
|
||||||
PodCount: totalPods,
|
PodCount: totalPods,
|
||||||
PollInterval: DensityPollInterval,
|
PollInterval: DensityPollInterval,
|
||||||
kind: itArg.kind,
|
kind: itArg.kind,
|
||||||
SecretConfigs: secretConfigs,
|
SecretConfigs: secretConfigs,
|
||||||
ConfigMapConfigs: configMapConfigs,
|
ConfigMapConfigs: configMapConfigs,
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < itArg.daemonsPerNode; i++ {
|
for i := 0; i < itArg.daemonsPerNode; i++ {
|
||||||
@ -804,7 +808,7 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
dConfig := DensityTestConfig{
|
dConfig := DensityTestConfig{
|
||||||
ClientSet: f.ClientSet,
|
ClientSets: []clientset.Interface{f.ClientSet},
|
||||||
Configs: configs,
|
Configs: configs,
|
||||||
PodCount: totalPods,
|
PodCount: totalPods,
|
||||||
PollInterval: DensityPollInterval,
|
PollInterval: DensityPollInterval,
|
||||||
|
@ -288,9 +288,9 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
func createClients(numberOfClients int) ([]*clientset.Clientset, []*internalclientset.Clientset, error) {
|
func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, error) {
|
||||||
clients := make([]*clientset.Clientset, numberOfClients)
|
clients := make([]clientset.Interface, numberOfClients)
|
||||||
internalClients := make([]*internalclientset.Clientset, numberOfClients)
|
internalClients := make([]internalclientset.Interface, numberOfClients)
|
||||||
for i := 0; i < numberOfClients; i++ {
|
for i := 0; i < numberOfClients; i++ {
|
||||||
config, err := framework.LoadConfig()
|
config, err := framework.LoadConfig()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Loading…
Reference in New Issue
Block a user