mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-28 21:55:48 +00:00
removes custom scalers from kubectl
This commit is contained in:
@@ -35,6 +35,7 @@ import (
|
||||
utiluuid "k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
@@ -66,6 +67,7 @@ type DensityTestConfig struct {
|
||||
Configs []testutils.RunObjectConfig
|
||||
ClientSets []clientset.Interface
|
||||
InternalClientsets []internalclientset.Interface
|
||||
ScaleClients []scaleclient.ScalesGetter
|
||||
PollInterval time.Duration
|
||||
PodCount int
|
||||
// What kind of resource we want to create
|
||||
@@ -116,6 +118,7 @@ func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *t
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
||||
dtc.ClientSets[i%numberOfClients],
|
||||
dtc.InternalClientsets[i%numberOfClients],
|
||||
dtc.ScaleClients[i%numberOfClients],
|
||||
extensions.Kind("DaemonSet"),
|
||||
dtc.DaemonConfigs[i].Namespace,
|
||||
dtc.DaemonConfigs[i].Name,
|
||||
@@ -320,7 +323,7 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
|
||||
err := framework.DeleteResourceAndPods(dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], kind, namespace, name)
|
||||
err := framework.DeleteResourceAndPods(dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], dtc.ScaleClients[i%numberOfClients], kind, namespace, name)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
@@ -613,11 +616,12 @@ var _ = SIGDescribe("Density", func() {
|
||||
}
|
||||
|
||||
// Single client is running out of http2 connections in delete phase, hence we need more.
|
||||
clients, internalClients, _, err = createClients(2)
|
||||
clients, internalClients, scalesClients, err = createClients(2)
|
||||
|
||||
dConfig := DensityTestConfig{
|
||||
ClientSets: clients,
|
||||
InternalClientsets: internalClients,
|
||||
ScaleClients: scalesClients,
|
||||
Configs: configs,
|
||||
PodCount: totalPods,
|
||||
PollInterval: DensityPollInterval,
|
||||
|
@@ -289,6 +289,7 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
||||
f.ClientSet,
|
||||
f.InternalClientset,
|
||||
f.ScalesGetter,
|
||||
extensions.Kind("DaemonSet"),
|
||||
config.Namespace,
|
||||
config.Name,
|
||||
@@ -700,7 +701,7 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti
|
||||
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
|
||||
} else {
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
||||
config.GetClient(), config.GetInternalClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
|
||||
config.GetClient(), config.GetInternalClient(), config.GetScalesGetter(), config.GetKind(), config.GetNamespace(), config.GetName()),
|
||||
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user