remove usages of internal clientset in e2e framework

Signed-off-by: Andrew Sy Kim <kiman@vmware.com>
This commit is contained in:
Andrew Sy Kim 2019-03-22 12:09:07 -07:00
parent da018a6bfa
commit 1470df7a05
29 changed files with 158 additions and 223 deletions

View File

@ -57,6 +57,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/common:go_default_library", "//test/e2e/common:go_default_library",

View File

@ -30,7 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/client-go/kubernetes/scheme"
batchinternal "k8s.io/kubernetes/pkg/apis/batch" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -464,7 +464,7 @@ func waitForEventWithReason(c clientset.Interface, ns, cronJobName string, reaso
if err != nil { if err != nil {
return false, err return false, err
} }
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj) events, err := c.CoreV1().Events(ns).Search(scheme.Scheme, sj)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -200,7 +200,6 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// Additionally some of them might scale the rc during the test. // Additionally some of them might scale the rc during the test.
config = testutils.RCConfig{ config = testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: rcName, Name: rcName,
Namespace: ns, Namespace: ns,
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),

View File

@ -93,7 +93,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024 nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory. memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1 replicas := 1
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset, f.ScalesGetter) resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter)
defer resourceConsumer.CleanUp() defer resourceConsumer.CleanUp()
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough. resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.

View File

@ -409,7 +409,6 @@ func simpleScaleUpTest(f *framework.Framework, config *scaleUpTestConfig) func()
func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabytes int, timeout time.Duration) *testutils.RCConfig { func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabytes int, timeout time.Duration) *testutils.RCConfig {
return &testutils.RCConfig{ return &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: timeout, Timeout: timeout,
@ -469,7 +468,6 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
request := int64(1024 * 1024 * megabytes / replicas) request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: timeout, Timeout: timeout,

View File

@ -1322,7 +1322,6 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
request := int64(1024 * 1024 * megabytes / replicas) request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: timeout, Timeout: timeout,
@ -1565,7 +1564,6 @@ func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error {
func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error { func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error {
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
@ -1591,7 +1589,6 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Affinity: buildAntiAffinity(antiAffinityLabels), Affinity: buildAntiAffinity(antiAffinityLabels),
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id, Name: id,
Namespace: namespace, Namespace: namespace,
Timeout: scaleUpTimeout, Timeout: scaleUpTimeout,
@ -1615,7 +1612,6 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in
Affinity: buildAntiAffinity(antiAffinityLabels), Affinity: buildAntiAffinity(antiAffinityLabels),
Volumes: volumes, Volumes: volumes,
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id, Name: id,
Namespace: namespace, Namespace: namespace,
Timeout: scaleUpTimeout, Timeout: scaleUpTimeout,
@ -1696,7 +1692,6 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
} }
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id, Name: id,
Namespace: namespace, Namespace: namespace,
Timeout: defaultTimeout, Timeout: defaultTimeout,

View File

@ -116,7 +116,7 @@ type HPAScaleTest struct {
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) { func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
const timeToWait = 15 * time.Minute const timeToWait = 15 * time.Minute
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter) rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
defer rc.CleanUp() defer rc.CleanUp()
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name) defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)

View File

@ -45,7 +45,6 @@ go_library(
deps = [ deps = [
"//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/conditions:go_default_library", "//pkg/client/conditions:go_default_library",
"//pkg/kubelet:go_default_library", "//pkg/kubelet:go_default_library",
"//pkg/kubelet/events:go_default_library", "//pkg/kubelet/events:go_default_library",

View File

@ -31,7 +31,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -86,7 +85,6 @@ type ResourceConsumer struct {
kind schema.GroupVersionKind kind schema.GroupVersionKind
nsName string nsName string
clientSet clientset.Interface clientSet clientset.Interface
internalClientset *internalclientset.Clientset
scaleClient scaleclient.ScalesGetter scaleClient scaleclient.ScalesGetter
cpu chan int cpu chan int
mem chan int mem chan int
@ -106,20 +104,20 @@ func GetResourceConsumerImage() string {
return resourceConsumerImage return resourceConsumerImage
} }
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset, scaleClient, nil, nil) dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil)
} }
// TODO this still defaults to replication controller // TODO this still defaults to replication controller
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds, return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset, scaleClient, nil, nil) initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, scaleClient, nil, nil)
} }
func NewMetricExporter(name, nsName string, podAnnotations, serviceAnnotations map[string]string, metricValue int, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { func NewMetricExporter(name, nsName string, podAnnotations, serviceAnnotations map[string]string, metricValue int, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, KindDeployment, 1, 0, 0, metricValue, dynamicConsumptionTimeInSeconds, return newResourceConsumer(name, nsName, KindDeployment, 1, 0, 0, metricValue, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, 100, 100, clientset, internalClientset, scaleClient, podAnnotations, serviceAnnotations) dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, 100, 100, clientset, scaleClient, podAnnotations, serviceAnnotations)
} }
/* /*
@ -130,21 +128,20 @@ memLimit argument is in megabytes, memLimit is a maximum amount of memory that c
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
*/ */
func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer { requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer {
if podAnnotations == nil { if podAnnotations == nil {
podAnnotations = make(map[string]string) podAnnotations = make(map[string]string)
} }
if serviceAnnotations == nil { if serviceAnnotations == nil {
serviceAnnotations = make(map[string]string) serviceAnnotations = make(map[string]string)
} }
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations) runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations)
rc := &ResourceConsumer{ rc := &ResourceConsumer{
name: name, name: name,
controllerName: name + "-ctrl", controllerName: name + "-ctrl",
kind: kind, kind: kind,
nsName: nsName, nsName: nsName,
clientSet: clientset, clientSet: clientset,
internalClientset: internalClientset,
scaleClient: scaleClient, scaleClient: scaleClient,
cpu: make(chan int), cpu: make(chan int),
mem: make(chan int), mem: make(chan int),
@ -436,7 +433,7 @@ func (rc *ResourceConsumer) CleanUp() {
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil)) framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
} }
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) { func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) {
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.CoreV1().Services(ns).Create(&v1.Service{ _, err := c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -458,7 +455,6 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
rcConfig := testutils.RCConfig{ rcConfig := testutils.RCConfig{
Client: c, Client: c,
InternalClient: internalClient,
Image: resourceConsumerImage, Image: resourceConsumerImage,
Name: name, Name: name,
Namespace: ns, Namespace: ns,

View File

@ -47,7 +47,6 @@ go_library(
"//pkg/apis/batch:go_default_library", "//pkg/apis/batch:go_default_library",
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/conditions:go_default_library", "//pkg/client/conditions:go_default_library",
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library", "//pkg/controller/deployment/util:go_default_library",

View File

@ -47,7 +47,6 @@ import (
scaleclient "k8s.io/client-go/scale" scaleclient "k8s.io/client-go/scale"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -75,7 +74,6 @@ type Framework struct {
ClientSet clientset.Interface ClientSet clientset.Interface
KubemarkExternalClusterClientSet clientset.Interface KubemarkExternalClusterClientSet clientset.Interface
InternalClientset *internalclientset.Clientset
AggregatorClient *aggregatorclient.Clientset AggregatorClient *aggregatorclient.Clientset
DynamicClient dynamic.Interface DynamicClient dynamic.Interface
@ -180,8 +178,6 @@ func (f *Framework) BeforeEach() {
} }
f.ClientSet, err = clientset.NewForConfig(config) f.ClientSet, err = clientset.NewForConfig(config)
ExpectNoError(err) ExpectNoError(err)
f.InternalClientset, err = internalclientset.NewForConfig(config)
ExpectNoError(err)
f.AggregatorClient, err = aggregatorclient.NewForConfig(config) f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
ExpectNoError(err) ExpectNoError(err)
f.DynamicClient, err = dynamic.NewForConfig(config) f.DynamicClient, err = dynamic.NewForConfig(config)

View File

@ -39,7 +39,6 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry" "k8s.io/client-go/util/retry"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator" "k8s.io/kubernetes/pkg/registry/core/service/portallocator"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -1321,7 +1320,7 @@ func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin
// StartServeHostnameService creates a replication controller that serves its // StartServeHostnameService creates a replication controller that serves its
// hostname and a service on top of it. // hostname and a service on top of it.
func StartServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) { func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
podNames := make([]string, replicas) podNames := make([]string, replicas)
name := svc.ObjectMeta.Name name := svc.ObjectMeta.Name
By("creating service " + name + " in namespace " + ns) By("creating service " + name + " in namespace " + ns)
@ -1334,7 +1333,6 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli
maxContainerFailures := 0 maxContainerFailures := 0
config := testutils.RCConfig{ config := testutils.RCConfig{
Client: c, Client: c,
InternalClient: internalClient,
Image: ServeHostnameImage, Image: ServeHostnameImage,
Name: name, Name: name,
Namespace: ns, Namespace: ns,

View File

@ -80,7 +80,6 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
@ -2133,13 +2132,6 @@ func LoadConfig() (*restclient.Config, error) {
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig() return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
} }
func LoadInternalClientset() (*internalclientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return internalclientset.NewForConfig(config)
}
func LoadClientset() (*clientset.Clientset, error) { func LoadClientset() (*clientset.Clientset, error) {
config, err := LoadConfig() config, err := LoadConfig()

View File

@ -105,7 +105,7 @@ func prometheusPodCustomMetricQuery(namespace, podNamePrefix string) string {
func consumeCPUResources(f *framework.Framework, consumerName string, cpuUsage int) *common.ResourceConsumer { func consumeCPUResources(f *framework.Framework, consumerName string, cpuUsage int) *common.ResourceConsumer {
return common.NewDynamicResourceConsumer(consumerName, f.Namespace.Name, common.KindDeployment, 1, cpuUsage, return common.NewDynamicResourceConsumer(consumerName, f.Namespace.Name, common.KindDeployment, 1, cpuUsage,
memoryUsed, 0, int64(cpuUsage), memoryLimit, f.ClientSet, f.InternalClientset, f.ScalesGetter) memoryUsed, 0, int64(cpuUsage), memoryLimit, f.ClientSet, f.ScalesGetter)
} }
func exportCustomMetricFromPod(f *framework.Framework, consumerName string, metricValue int) *common.ResourceConsumer { func exportCustomMetricFromPod(f *framework.Framework, consumerName string, metricValue int) *common.ResourceConsumer {
@ -114,7 +114,7 @@ func exportCustomMetricFromPod(f *framework.Framework, consumerName string, metr
"prometheus.io/path": "/metrics", "prometheus.io/path": "/metrics",
"prometheus.io/port": "8080", "prometheus.io/port": "8080",
} }
return common.NewMetricExporter(consumerName, f.Namespace.Name, podAnnotations, nil, metricValue, f.ClientSet, f.InternalClientset, f.ScalesGetter) return common.NewMetricExporter(consumerName, f.Namespace.Name, podAnnotations, nil, metricValue, f.ClientSet, f.ScalesGetter)
} }
func exportCustomMetricFromService(f *framework.Framework, consumerName string, metricValue int) *common.ResourceConsumer { func exportCustomMetricFromService(f *framework.Framework, consumerName string, metricValue int) *common.ResourceConsumer {
@ -123,7 +123,7 @@ func exportCustomMetricFromService(f *framework.Framework, consumerName string,
"prometheus.io/path": "/metrics", "prometheus.io/path": "/metrics",
"prometheus.io/port": "8080", "prometheus.io/port": "8080",
} }
return common.NewMetricExporter(consumerName, f.Namespace.Name, nil, serviceAnnotations, metricValue, f.ClientSet, f.InternalClientset, f.ScalesGetter) return common.NewMetricExporter(consumerName, f.Namespace.Name, nil, serviceAnnotations, metricValue, f.ClientSet, f.ScalesGetter)
} }
func validateMetricAvailableForAllNodes(c clientset.Interface, metric string, expectedNodesNames []string) error { func validateMetricAvailableForAllNodes(c clientset.Interface, metric string, expectedNodesNames []string) error {

View File

@ -101,7 +101,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
framework.ExpectNoError(err) framework.ExpectNoError(err)
rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.InternalClientset, f.ScalesGetter) rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter)
defer rc.CleanUp() defer rc.CleanUp()
rc.WaitForReplicas(pods, 15*time.Minute) rc.WaitForReplicas(pods, 15*time.Minute)

View File

@ -32,7 +32,6 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/network", importpath = "k8s.io/kubernetes/test/e2e/network",
deps = [ deps = [
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller/endpoint:go_default_library", "//pkg/controller/endpoint:go_default_library",
"//pkg/master/ports:go_default_library", "//pkg/master/ports:go_default_library",

View File

@ -123,7 +123,6 @@ var _ = SIGDescribe("Proxy", func() {
pods := []*v1.Pod{} pods := []*v1.Pod{}
cfg := testutils.RCConfig{ cfg := testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Image: imageutils.GetE2EImage(imageutils.Porter), Image: imageutils.GetE2EImage(imageutils.Porter),
Name: service.Name, Name: service.Name,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,

View File

@ -35,7 +35,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -80,12 +79,10 @@ var _ = SIGDescribe("Services", func() {
f := framework.NewDefaultFramework("services") f := framework.NewDefaultFramework("services")
var cs clientset.Interface var cs clientset.Interface
var internalClientset internalclientset.Interface
serviceLBNames := []string{} serviceLBNames := []string{}
BeforeEach(func() { BeforeEach(func() {
cs = f.ClientSet cs = f.ClientSet
internalClientset = f.InternalClientset
}) })
AfterEach(func() { AfterEach(func() {
@ -322,10 +319,10 @@ var _ = SIGDescribe("Services", func() {
numPods, servicePort := 3, defaultServeHostnameServicePort numPods, servicePort := 3, defaultServeHostnameServicePort
By("creating service1 in namespace " + ns) By("creating service1 in namespace " + ns)
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service1"), ns, numPods) podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
By("creating service2 in namespace " + ns) By("creating service2 in namespace " + ns)
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service2"), ns, numPods) podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
hosts, err := framework.NodeSSHHosts(cs) hosts, err := framework.NodeSSHHosts(cs)
@ -352,7 +349,7 @@ var _ = SIGDescribe("Services", func() {
// Start another service and verify both are up. // Start another service and verify both are up.
By("creating service3 in namespace " + ns) By("creating service3 in namespace " + ns)
podNames3, svc3IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service3"), ns, numPods) podNames3, svc3IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service3"), ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns)
if svc2IP == svc3IP { if svc2IP == svc3IP {
@ -379,13 +376,13 @@ var _ = SIGDescribe("Services", func() {
defer func() { defer func() {
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1)) framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1))
}() }()
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService(svc1), ns, numPods) podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
defer func() { defer func() {
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2)) framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2))
}() }()
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService(svc2), ns, numPods) podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
if svc1IP == svc2IP { if svc1IP == svc2IP {
@ -432,7 +429,7 @@ var _ = SIGDescribe("Services", func() {
defer func() { defer func() {
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1")) framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1"))
}() }()
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service1"), ns, numPods) podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
hosts, err := framework.NodeSSHHosts(cs) hosts, err := framework.NodeSSHHosts(cs)
@ -459,7 +456,7 @@ var _ = SIGDescribe("Services", func() {
defer func() { defer func() {
framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service2")) framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service2"))
}() }()
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service2"), ns, numPods) podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
if svc1IP == svc2IP { if svc1IP == svc2IP {
@ -1743,12 +1740,12 @@ var _ = SIGDescribe("Services", func() {
By("creating service-disabled in namespace " + ns) By("creating service-disabled in namespace " + ns)
svcDisabled := getServeHostnameService("service-disabled") svcDisabled := getServeHostnameService("service-disabled")
svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels
_, svcDisabledIP, err := framework.StartServeHostnameService(cs, internalClientset, svcDisabled, ns, numPods) _, svcDisabledIP, err := framework.StartServeHostnameService(cs, svcDisabled, ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns)
By("creating service in namespace " + ns) By("creating service in namespace " + ns)
svcToggled := getServeHostnameService("service") svcToggled := getServeHostnameService("service")
podToggledNames, svcToggledIP, err := framework.StartServeHostnameService(cs, internalClientset, svcToggled, ns, numPods) podToggledNames, svcToggledIP, err := framework.StartServeHostnameService(cs, svcToggled, ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns)
jig := framework.NewServiceTestJig(cs, svcToggled.ObjectMeta.Name) jig := framework.NewServiceTestJig(cs, svcToggled.ObjectMeta.Name)
@ -2211,7 +2208,7 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf
By("creating service in namespace " + ns) By("creating service in namespace " + ns)
serviceType := svc.Spec.Type serviceType := svc.Spec.Type
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
_, _, err := framework.StartServeHostnameService(cs, f.InternalClientset, svc, ns, numPods) _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns)
defer func() { defer func() {
framework.StopServeHostnameService(cs, ns, serviceName) framework.StopServeHostnameService(cs, ns, serviceName)
@ -2262,7 +2259,7 @@ func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface
By("creating service in namespace " + ns) By("creating service in namespace " + ns)
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
_, _, err := framework.StartServeHostnameService(cs, f.InternalClientset, svc, ns, numPods) _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods)
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns)
jig := framework.NewServiceTestJig(cs, serviceName) jig := framework.NewServiceTestJig(cs, serviceName)
By("waiting for loadbalancer for service " + ns + "/" + serviceName) By("waiting for loadbalancer for service " + ns + "/" + serviceName)

View File

@ -129,7 +129,6 @@ var _ = SIGDescribe("Service endpoints latency", func() {
func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) { func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) {
cfg := testutils.RCConfig{ cfg := testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),
Name: "svc-latency-rc", Name: "svc-latency-rc",
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,

View File

@ -316,7 +316,6 @@ var _ = SIGDescribe("kubelet", func() {
Expect(framework.RunRC(testutils.RCConfig{ Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),

View File

@ -71,7 +71,6 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
// TODO: Use a more realistic workload // TODO: Use a more realistic workload
Expect(framework.RunRC(testutils.RCConfig{ Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),

View File

@ -15,7 +15,6 @@ go_library(
"//pkg/apis/batch:go_default_library", "//pkg/apis/batch:go_default_library",
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -42,7 +42,6 @@ import (
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/timer" "k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -72,7 +71,6 @@ var nodeCount = 0
type DensityTestConfig struct { type DensityTestConfig struct {
Configs []testutils.RunObjectConfig Configs []testutils.RunObjectConfig
ClientSets []clientset.Interface ClientSets []clientset.Interface
InternalClientsets []internalclientset.Interface
ScaleClients []scaleclient.ScalesGetter ScaleClients []scaleclient.ScalesGetter
PollInterval time.Duration PollInterval time.Duration
PodCount int PodCount int
@ -644,7 +642,7 @@ var _ = SIGDescribe("Density", func() {
} }
timeout += 3 * time.Minute timeout += 3 * time.Minute
// createClients is defined in load.go // createClients is defined in load.go
clients, internalClients, scalesClients, err := createClients(numberOfCollections) clients, scalesClients, err := createClients(numberOfCollections)
framework.ExpectNoError(err) framework.ExpectNoError(err)
for i := 0; i < numberOfCollections; i++ { for i := 0; i < numberOfCollections; i++ {
nsName := namespaces[i].Name nsName := namespaces[i].Name
@ -675,7 +673,6 @@ var _ = SIGDescribe("Density", func() {
name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid) name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
baseConfig := &testutils.RCConfig{ baseConfig := &testutils.RCConfig{
Client: clients[i], Client: clients[i],
InternalClient: internalClients[i],
ScalesGetter: scalesClients[i], ScalesGetter: scalesClients[i],
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),
Name: name, Name: name,
@ -722,11 +719,10 @@ var _ = SIGDescribe("Density", func() {
} }
// Single client is running out of http2 connections in delete phase, hence we need more. // Single client is running out of http2 connections in delete phase, hence we need more.
clients, internalClients, scalesClients, err = createClients(2) clients, scalesClients, err = createClients(2)
framework.ExpectNoError(err) framework.ExpectNoError(err)
dConfig := DensityTestConfig{ dConfig := DensityTestConfig{
ClientSets: clients, ClientSets: clients,
InternalClientsets: internalClients,
ScaleClients: scalesClients, ScaleClients: scalesClients,
Configs: configs, Configs: configs,
PodCount: totalPods, PodCount: totalPods,

View File

@ -49,7 +49,6 @@ import (
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/timer" "k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -341,9 +340,8 @@ var _ = SIGDescribe("Load capacity", func() {
} }
}) })
func createClients(numberOfClients int) ([]clientset.Interface, []internalclientset.Interface, []scaleclient.ScalesGetter, error) { func createClients(numberOfClients int) ([]clientset.Interface, []scaleclient.ScalesGetter, error) {
clients := make([]clientset.Interface, numberOfClients) clients := make([]clientset.Interface, numberOfClients)
internalClients := make([]internalclientset.Interface, numberOfClients)
scalesClients := make([]scaleclient.ScalesGetter, numberOfClients) scalesClients := make([]scaleclient.ScalesGetter, numberOfClients)
for i := 0; i < numberOfClients; i++ { for i := 0; i < numberOfClients; i++ {
@ -361,11 +359,11 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
// each client here. // each client here.
transportConfig, err := config.TransportConfig() transportConfig, err := config.TransportConfig()
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
tlsConfig, err := transport.TLSConfigFor(transportConfig) tlsConfig, err := transport.TLSConfigFor(transportConfig)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
config.Transport = utilnet.SetTransportDefaults(&http.Transport{ config.Transport = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment, Proxy: http.ProxyFromEnvironment,
@ -387,14 +385,9 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
c, err := clientset.NewForConfig(config) c, err := clientset.NewForConfig(config)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
clients[i] = c clients[i] = c
internalClient, err := internalclientset.NewForConfig(config)
if err != nil {
return nil, nil, nil, err
}
internalClients[i] = internalClient
// create scale client, if GroupVersion or NegotiatedSerializer are not set // create scale client, if GroupVersion or NegotiatedSerializer are not set
// assign default values - these fields are mandatory (required by RESTClientFor). // assign default values - these fields are mandatory (required by RESTClientFor).
@ -406,11 +399,11 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
} }
restClient, err := restclient.RESTClientFor(config) restClient, err := restclient.RESTClientFor(config)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
discoClient, err := discovery.NewDiscoveryClientForConfig(config) discoClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, err
} }
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient) cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient) restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
@ -418,7 +411,7 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient) resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
scalesClients[i] = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) scalesClients[i] = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
} }
return clients, internalClients, scalesClients, nil return clients, scalesClients, nil
} }
func computePodCounts(total int) (int, int, int) { func computePodCounts(total int) (int, int, int) {
@ -478,12 +471,11 @@ func generateConfigs(
// Create a number of clients to better simulate real usecase // Create a number of clients to better simulate real usecase
// where not everyone is using exactly the same client. // where not everyone is using exactly the same client.
rcsPerClient := 20 rcsPerClient := 20
clients, internalClients, scalesClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient) clients, scalesClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
framework.ExpectNoError(err) framework.ExpectNoError(err)
for i := 0; i < len(configs); i++ { for i := 0; i < len(configs); i++ {
configs[i].SetClient(clients[i%len(clients)]) configs[i].SetClient(clients[i%len(clients)])
configs[i].SetInternalClient(internalClients[i%len(internalClients)])
configs[i].SetScalesClient(scalesClients[i%len(clients)]) configs[i].SetScalesClient(scalesClients[i%len(clients)])
} }
for i := 0; i < len(secretConfigs); i++ { for i := 0; i < len(secretConfigs); i++ {
@ -542,7 +534,6 @@ func GenerateConfigsForGroup(
baseConfig := &testutils.RCConfig{ baseConfig := &testutils.RCConfig{
Client: nil, // this will be overwritten later Client: nil, // this will be overwritten later
InternalClient: nil, // this will be overwritten later
Name: groupName + "-" + strconv.Itoa(i), Name: groupName + "-" + strconv.Itoa(i),
Namespace: namespace, Namespace: namespace,
Timeout: UnreadyNodeToleration, Timeout: UnreadyNodeToleration,

View File

@ -273,7 +273,6 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: defaultTimeout, Timeout: defaultTimeout,

View File

@ -788,7 +788,6 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR
By(fmt.Sprintf("Running RC which reserves host port")) By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: defaultTimeout, Timeout: defaultTimeout,

View File

@ -49,7 +49,6 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
500, /* cpuLimit */ 500, /* cpuLimit */
200, /* memLimit */ 200, /* memLimit */
f.ClientSet, f.ClientSet,
f.InternalClientset,
f.ScalesGetter) f.ScalesGetter)
t.hpa = common.CreateCPUHorizontalPodAutoscaler( t.hpa = common.CreateCPUHorizontalPodAutoscaler(
t.rc, t.rc,

View File

@ -30,7 +30,6 @@ go_library(
"//pkg/apis/batch:go_default_library", "//pkg/apis/batch:go_default_library",
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/controller/deployment/util:go_default_library", "//pkg/controller/deployment/util:go_default_library",
"//pkg/kubectl:go_default_library", "//pkg/kubectl:go_default_library",
"//pkg/util/labels:go_default_library", "//pkg/util/labels:go_default_library",

View File

@ -45,7 +45,6 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/klog" "k8s.io/klog"
) )
@ -99,10 +98,8 @@ type RunObjectConfig interface {
GetNamespace() string GetNamespace() string
GetKind() schema.GroupKind GetKind() schema.GroupKind
GetClient() clientset.Interface GetClient() clientset.Interface
GetInternalClient() internalclientset.Interface
GetScalesGetter() scaleclient.ScalesGetter GetScalesGetter() scaleclient.ScalesGetter
SetClient(clientset.Interface) SetClient(clientset.Interface)
SetInternalClient(internalclientset.Interface)
SetScalesClient(scaleclient.ScalesGetter) SetScalesClient(scaleclient.ScalesGetter)
GetReplicas() int GetReplicas() int
GetLabelValue(string) (string, bool) GetLabelValue(string) (string, bool)
@ -112,7 +109,6 @@ type RunObjectConfig interface {
type RCConfig struct { type RCConfig struct {
Affinity *v1.Affinity Affinity *v1.Affinity
Client clientset.Interface Client clientset.Interface
InternalClient internalclientset.Interface
ScalesGetter scaleclient.ScalesGetter ScalesGetter scaleclient.ScalesGetter
Image string Image string
Command []string Command []string
@ -527,10 +523,6 @@ func (config *RCConfig) GetClient() clientset.Interface {
return config.Client return config.Client
} }
func (config *RCConfig) GetInternalClient() internalclientset.Interface {
return config.InternalClient
}
func (config *RCConfig) GetScalesGetter() scaleclient.ScalesGetter { func (config *RCConfig) GetScalesGetter() scaleclient.ScalesGetter {
return config.ScalesGetter return config.ScalesGetter
} }
@ -539,10 +531,6 @@ func (config *RCConfig) SetClient(c clientset.Interface) {
config.Client = c config.Client = c
} }
func (config *RCConfig) SetInternalClient(c internalclientset.Interface) {
config.InternalClient = c
}
func (config *RCConfig) SetScalesClient(getter scaleclient.ScalesGetter) { func (config *RCConfig) SetScalesClient(getter scaleclient.ScalesGetter) {
config.ScalesGetter = getter config.ScalesGetter = getter
} }