Merge pull request #33281 from deads2k/client-12-simplify-adapter

Automatic merge from submit-queue

remove the clientset adapter

This removes the clientset adapter entirely.  There is one focused adapter that remains in a single e2e test.  Every other reference was removed.
This commit is contained in:
Kubernetes Submit Queue 2016-09-26 07:35:08 -07:00 committed by GitHub
commit 69083bcfce
29 changed files with 137 additions and 247 deletions

View File

@ -20,10 +20,11 @@ import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
clientset "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
@ -66,7 +67,7 @@ func (c *HollowNodeConfig) addFlags(fs *pflag.FlagSet) {
fs.StringVar(&c.ContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType of requests sent to apiserver.") fs.StringVar(&c.ContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType of requests sent to apiserver.")
} }
func (c *HollowNodeConfig) createClientFromFile() (*client.Client, error) { func (c *HollowNodeConfig) createClientConfigFromFile() (*restclient.Config, error) {
clientConfig, err := clientcmd.LoadFromFile(c.KubeconfigPath) clientConfig, err := clientcmd.LoadFromFile(c.KubeconfigPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("error while loading kubeconfig from file %v: %v", c.KubeconfigPath, err) return nil, fmt.Errorf("error while loading kubeconfig from file %v: %v", c.KubeconfigPath, err)
@ -76,15 +77,10 @@ func (c *HollowNodeConfig) createClientFromFile() (*client.Client, error) {
return nil, fmt.Errorf("error while creating kubeconfig: %v", err) return nil, fmt.Errorf("error while creating kubeconfig: %v", err)
} }
config.ContentType = c.ContentType config.ContentType = c.ContentType
client, err := client.New(config) return config, nil
if err != nil {
return nil, fmt.Errorf("error while creating client: %v", err)
}
return client, nil
} }
func main() { func main() {
config := HollowNodeConfig{} config := HollowNodeConfig{}
config.addFlags(pflag.CommandLine) config.addFlags(pflag.CommandLine)
flag.InitFlags() flag.InitFlags()
@ -94,10 +90,17 @@ func main() {
} }
// create a client to communicate with API server. // create a client to communicate with API server.
cl, err := config.createClientFromFile() clientConfig, err := config.createClientConfigFromFile()
clientset := clientset.FromUnversionedClient(cl)
if err != nil { if err != nil {
glog.Fatal("Failed to create a Client. Exiting.") glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err)
}
cl, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Failed to create a Client: %v. Exiting.", err)
}
clientset, err := internalclientset.NewForConfig(clientConfig)
if err != nil {
glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err)
} }
if config.Morph == "kubelet" { if config.Morph == "kubelet" {

View File

@ -1,79 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internalclientset
import (
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedauthentication "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/unversioned"
unversionedauthorization "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/unversioned"
unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned"
unversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
unversionedstorage "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/unversioned"
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/unversioned"
)
// FromUnversionedClient adapts a unversioned.Client to a internalclientset.Clientset.
// This function is temporary. We will remove it when everyone has moved to using
// Clientset. New code should NOT use this function.
func FromUnversionedClient(c *unversioned.Client) *internalclientset.Clientset {
var clientset internalclientset.Clientset
if c != nil {
clientset.CoreClient = unversionedcore.New(c.RESTClient)
} else {
clientset.CoreClient = unversionedcore.New(nil)
}
if c != nil && c.ExtensionsClient != nil {
clientset.ExtensionsClient = unversionedextensions.New(c.ExtensionsClient.RESTClient)
} else {
clientset.ExtensionsClient = unversionedextensions.New(nil)
}
if c != nil && c.BatchClient != nil {
clientset.BatchClient = unversionedbatch.New(c.BatchClient.RESTClient)
} else {
clientset.BatchClient = unversionedbatch.New(nil)
}
if c != nil && c.AuthorizationClient != nil {
clientset.AuthorizationClient = unversionedauthorization.New(c.AuthorizationClient.RESTClient)
} else {
clientset.AuthorizationClient = unversionedauthorization.New(nil)
}
if c != nil && c.AutoscalingClient != nil {
clientset.AutoscalingClient = unversionedautoscaling.New(c.AutoscalingClient.RESTClient)
} else {
clientset.AutoscalingClient = unversionedautoscaling.New(nil)
}
if c != nil && c.AuthenticationClient != nil {
clientset.AuthenticationClient = unversionedauthentication.New(c.AuthenticationClient.RESTClient)
} else {
clientset.AuthenticationClient = unversionedauthentication.New(nil)
}
if c != nil && c.DiscoveryClient != nil {
clientset.DiscoveryClient = discovery.NewDiscoveryClient(c.DiscoveryClient.RESTClient)
} else {
clientset.DiscoveryClient = discovery.NewDiscoveryClient(nil)
}
if c != nil && c.StorageClient != nil {
clientset.StorageClient = unversionedstorage.New(c.StorageClient.RESTClient)
} else {
clientset.StorageClient = unversionedstorage.New(nil)
}
return &clientset
}

View File

@ -302,9 +302,9 @@ func (rc *ResourceConsumer) CleanUp() {
rc.stopCustomMetric <- 0 rc.stopCustomMetric <- 0
// Wait some time to ensure all child goroutines are finished. // Wait some time to ensure all child goroutines are finished.
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.Namespace.Name, rc.name)) framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.ClientSet, rc.framework.Namespace.Name, rc.name))
framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name)) framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.Namespace.Name, rc.controllerName)) framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.Client, rc.framework.ClientSet, rc.framework.Namespace.Name, rc.controllerName))
framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.controllerName)) framework.ExpectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.controllerName))
} }

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -129,7 +128,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job up") By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client)) scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -154,7 +153,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job down") By("scale job down")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client)) scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -177,7 +176,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("delete a job") By("delete a job")
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client)) reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0)) err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))

View File

@ -97,7 +97,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("Creating unschedulable pod") By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false) ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation")
By("Waiting for scale up hoping it won't happen") By("Waiting for scale up hoping it won't happen")
// Verfiy, that the appropreate event was generated. // Verfiy, that the appropreate event was generated.
@ -124,7 +124,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased // Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased // Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
CreateHostPortPods(f, "host-port", nodeCount+2, false) CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "host-port") defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
@ -217,7 +217,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "node-selector")) framework.ExpectNoError(framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "node-selector"))
}) })
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool") By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false) ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "memory-reservation")
// Apparently GKE master is restarted couple minutes after the node pool is added // Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround // reseting all the timers in scale down code. Adding 5 extra minutes to workaround

View File

@ -261,7 +261,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number // to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status. // and awaits it to be observed and reported back in the RC's status.
framework.ScaleRC(f.Client, ns, rcName, numPods, true) framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods, true)
// Only check the keys, the pods can be different if the kubelet updated it. // Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really? // TODO: Can it really?
@ -292,9 +292,9 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
restarter.kill() restarter.kill()
// This is best effort to try and create pods while the scheduler is down, // This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal. // since we don't know exactly when it is restarted after the kill signal.
framework.ExpectNoError(framework.ScaleRC(f.Client, ns, rcName, numPods+5, false)) framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods+5, false))
restarter.waitUp() restarter.waitUp()
framework.ExpectNoError(framework.ScaleRC(f.Client, ns, rcName, numPods+5, true)) framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods+5, true))
}) })
It("Kubelet should not restart containers across restart", func() { It("Kubelet should not restart containers across restart", func() {

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -116,7 +115,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
framework.Logf("Check that reaper kills all daemon pods for %s", dsName) framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), clientsetadapter.FromUnversionedClient(c)) dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = dsReaper.Stop(ns, dsName, 0, nil) err = dsReaper.Stop(ns, dsName, 0, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -54,6 +55,7 @@ var MaxContainerFailures = 0
type DensityTestConfig struct { type DensityTestConfig struct {
Configs []framework.RCConfig Configs []framework.RCConfig
Client *client.Client Client *client.Client
ClientSet internalclientset.Interface
Namespace string Namespace string
PollInterval time.Duration PollInterval time.Duration
PodCount int PodCount int
@ -328,7 +330,7 @@ func cleanupDensityTest(dtc DensityTestConfig) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} else { } else {
By("Cleaning up the replication controller and pods") By("Cleaning up the replication controller and pods")
err := framework.DeleteRCAndPods(dtc.Client, dtc.Namespace, rcName) err := framework.DeleteRCAndPods(dtc.Client, dtc.ClientSet, dtc.Namespace, rcName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
} }
@ -487,7 +489,9 @@ var _ = framework.KubeDescribe("Density", func() {
} }
} }
dConfig := DensityTestConfig{Client: c, dConfig := DensityTestConfig{
Client: c,
ClientSet: f.ClientSet,
Configs: RCConfigs, Configs: RCConfigs,
PodCount: totalPods, PodCount: totalPods,
Namespace: ns, Namespace: ns,
@ -705,7 +709,9 @@ var _ = framework.KubeDescribe("Density", func() {
Silent: true, Silent: true,
} }
} }
dConfig := DensityTestConfig{Client: c, dConfig := DensityTestConfig{
Client: c,
ClientSet: f.ClientSet,
Configs: RCConfigs, Configs: RCConfigs,
PodCount: totalPods, PodCount: totalPods,
Namespace: ns, Namespace: ns,

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -160,7 +159,7 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
} }
// checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected. // checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected.
func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) { func checkDeploymentRevision(c clientset.Interface, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check revision of the new replica set of this deployment // Check revision of the new replica set of this deployment
@ -182,15 +181,15 @@ func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revisio
return deployment, newRS return deployment, newRS
} }
func stopDeploymentOverlap(c *clientset.Clientset, oldC client.Interface, ns, deploymentName, overlapWith string) { func stopDeploymentOverlap(c clientset.Interface, oldC client.Interface, ns, deploymentName, overlapWith string) {
stopDeploymentMaybeOverlap(c, oldC, ns, deploymentName, overlapWith) stopDeploymentMaybeOverlap(c, oldC, ns, deploymentName, overlapWith)
} }
func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymentName string) { func stopDeployment(c clientset.Interface, oldC client.Interface, ns, deploymentName string) {
stopDeploymentMaybeOverlap(c, oldC, ns, deploymentName, "") stopDeploymentMaybeOverlap(c, oldC, ns, deploymentName, "")
} }
func stopDeploymentMaybeOverlap(c *clientset.Clientset, oldC client.Interface, ns, deploymentName, overlapWith string) { func stopDeploymentMaybeOverlap(c clientset.Interface, oldC client.Interface, ns, deploymentName, overlapWith string) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -254,7 +253,7 @@ func testNewDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
c := adapter.FromUnversionedClient(f.Client) c := f.ClientSet
deploymentName := "test-new-deployment" deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": nginxImageName} podLabels := map[string]string{"name": nginxImageName}
@ -289,7 +288,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient) c := f.ClientSet
// Create nginx pods. // Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"} deploymentPodLabels := map[string]string{"name": "sample-pod"}
rsPodLabels := map[string]string{ rsPodLabels := map[string]string{
@ -339,7 +338,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient) c := f.ClientSet
// Create nginx pods. // Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-2"} deploymentPodLabels := map[string]string{"name": "sample-pod-2"}
rsPodLabels := map[string]string{ rsPodLabels := map[string]string{
@ -401,7 +400,7 @@ func testRecreateDeployment(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient) c := f.ClientSet
// Create nginx pods. // Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-3"} deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
rsPodLabels := map[string]string{ rsPodLabels := map[string]string{
@ -456,7 +455,7 @@ func testRecreateDeployment(f *framework.Framework) {
func testDeploymentCleanUpPolicy(f *framework.Framework) { func testDeploymentCleanUpPolicy(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
unversionedClient := f.Client unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient) c := f.ClientSet
// Create nginx pods. // Create nginx pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"} deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
rsPodLabels := map[string]string{ rsPodLabels := map[string]string{
@ -480,7 +479,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
deploymentName := "test-cleanup-deployment" deploymentName := "test-cleanup-deployment"
framework.Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) pods, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err) Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
} }
@ -488,7 +487,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
ResourceVersion: pods.ListMeta.ResourceVersion, ResourceVersion: pods.ListMeta.ResourceVersion,
} }
stopCh := make(chan struct{}) stopCh := make(chan struct{})
w, err := c.Pods(ns).Watch(options) w, err := c.Core().Pods(ns).Watch(options)
go func() { go func() {
// There should be only one pod being created, which is the pod with the redis image. // There should be only one pod being created, which is the pod with the redis image.
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector. // The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
@ -531,7 +530,7 @@ func testRolloverDeployment(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient) c := f.ClientSet
podName := "rollover-pod" podName := "rollover-pod"
deploymentPodLabels := map[string]string{"name": podName} deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{ rsPodLabels := map[string]string{
@ -599,8 +598,7 @@ func testPausedDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client c := f.ClientSet
c := adapter.FromUnversionedClient(unversionedClient)
deploymentName := "test-paused-deployment" deploymentName := "test-paused-deployment"
podLabels := map[string]string{"name": nginxImageName} podLabels := map[string]string{"name": nginxImageName}
d := newDeployment(deploymentName, 1, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) d := newDeployment(deploymentName, 1, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil)
@ -697,8 +695,7 @@ func testPausedDeployment(f *framework.Framework) {
// and then rollback to last revision. // and then rollback to last revision.
func testRollbackDeployment(f *framework.Framework) { func testRollbackDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
unversionedClient := f.Client c := f.ClientSet
c := adapter.FromUnversionedClient(unversionedClient)
podName := "nginx" podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName} deploymentPodLabels := map[string]string{"name": podName}
@ -806,7 +803,7 @@ func testRollbackDeployment(f *framework.Framework) {
// TODO: When we finished reporting rollback status in deployment status, check the rollback status here in each case. // TODO: When we finished reporting rollback status in deployment status, check the rollback status here in each case.
func testRollbackDeploymentRSNoRevision(f *framework.Framework) { func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := adapter.FromUnversionedClient(f.Client) c := f.ClientSet
podName := "nginx" podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName} deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{ rsPodLabels := map[string]string{
@ -943,7 +940,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient) c := f.ClientSet
// Create nginx pods. // Create nginx pods.
podName := "nginx" podName := "nginx"
podLabels := map[string]string{"name": podName} podLabels := map[string]string{"name": podName}
@ -998,7 +995,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
func testScalePausedDeployment(f *framework.Framework) { func testScalePausedDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := adapter.FromUnversionedClient(f.Client) c := f.ClientSet
podLabels := map[string]string{"name": nginxImageName} podLabels := map[string]string{"name": nginxImageName}
replicas := int32(3) replicas := int32(3)
@ -1049,7 +1046,7 @@ func testScalePausedDeployment(f *framework.Framework) {
func testScaledRolloutDeployment(f *framework.Framework) { func testScaledRolloutDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := adapter.FromUnversionedClient(f.Client) c := f.ClientSet
podLabels := map[string]string{"name": nginxImageName} podLabels := map[string]string{"name": nginxImageName}
replicas := int32(10) replicas := int32(10)
@ -1216,7 +1213,7 @@ func testOverlappingDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some // TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client. // functions like verifyPod still expects a unversioned#Client.
c := adapter.FromUnversionedClient(f.Client) c := f.ClientSet
deploymentName := "first-deployment" deploymentName := "first-deployment"
podLabels := map[string]string{"name": redisImageName} podLabels := map[string]string{"name": redisImageName}

View File

@ -377,7 +377,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum
Expect(err).NotTo(HaveOccurred(), "error creating replication controller") Expect(err).NotTo(HaveOccurred(), "error creating replication controller")
defer func() { defer func() {
err := framework.DeleteRCAndPods(f.Client, f.Namespace.Name, rcName) err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()

View File

@ -503,7 +503,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scaling rethinkdb") By("scaling rethinkdb")
framework.ScaleRC(c, ns, "rethinkdb-rc", 2, true) framework.ScaleRC(c, f.ClientSet, ns, "rethinkdb-rc", 2, true)
checkDbInstances() checkDbInstances()
By("starting admin") By("starting admin")
@ -546,7 +546,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scaling hazelcast") By("scaling hazelcast")
framework.ScaleRC(c, ns, "hazelcast", 2, true) framework.ScaleRC(c, f.ClientSet, ns, "hazelcast", 2, true)
forEachPod("name", "hazelcast", func(pod api.Pod) { forEachPod("name", "hazelcast", func(pod api.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
@ -59,8 +60,13 @@ const (
type Framework struct { type Framework struct {
BaseName string BaseName string
// Client is manually created and should not be used unless absolutely necessary. Use ClientSet_1_5
// where possible.
Client *client.Client Client *client.Client
Clientset_1_5 *release_1_5.Clientset // ClientSet uses internal objects, you should use ClientSet_1_5 where possible.
ClientSet internalclientset.Interface
ClientSet_1_5 *release_1_5.Clientset
StagingClient *staging.Clientset StagingClient *staging.Clientset
ClientPool dynamic.ClientPool ClientPool dynamic.ClientPool
@ -193,7 +199,9 @@ func (f *Framework) BeforeEach() {
c, err := loadClientFromConfig(config) c, err := loadClientFromConfig(config)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
f.Client = c f.Client = c
f.Clientset_1_5, err = release_1_5.NewForConfig(config) f.ClientSet, err = internalclientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
clientRepoConfig := getClientRepoConfig(config) clientRepoConfig := getClientRepoConfig(config)
f.StagingClient, err = staging.NewForConfig(clientRepoConfig) f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
@ -358,7 +366,7 @@ func (f *Framework) AfterEach() {
// Print events if the test failed. // Print events if the test failed.
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client. // Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
DumpAllNamespaceInfo(f.Client, f.Clientset_1_5, f.Namespace.Name) DumpAllNamespaceInfo(f.Client, f.ClientSet_1_5, f.Namespace.Name)
By(fmt.Sprintf("Dumping a list of prepulled images on each node")) By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller") LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller")
if f.federated { if f.federated {

View File

@ -57,7 +57,6 @@ import (
"k8s.io/kubernetes/pkg/client/typed/discovery" "k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
@ -3224,9 +3223,9 @@ func RemoveTaintOffNode(c *client.Client, nodeName string, taint api.Taint) {
} }
} }
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error { func ScaleRC(c *client.Client, clientset clientset.Interface, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size)) By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(c)) scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
if err != nil { if err != nil {
return err return err
} }
@ -3333,7 +3332,7 @@ func WaitForPodsWithLabel(c *client.Client, ns string, label labels.Selector) (p
} }
// DeleteRCAndPods a Replication Controller and all pods it spawned // DeleteRCAndPods a Replication Controller and all pods it spawned
func DeleteRCAndPods(c *client.Client, ns, name string) error { func DeleteRCAndPods(c *client.Client, clientset clientset.Interface, ns, name string) error {
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns)) By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
rc, err := c.ReplicationControllers(ns).Get(name) rc, err := c.ReplicationControllers(ns).Get(name)
if err != nil { if err != nil {
@ -3343,7 +3342,7 @@ func DeleteRCAndPods(c *client.Client, ns, name string) error {
} }
return err return err
} }
reaper, err := kubectl.ReaperForReplicationController(clientsetadapter.FromUnversionedClient(c).Core(), 10*time.Minute) reaper, err := kubectl.ReaperForReplicationController(clientset.Core(), 10*time.Minute)
if err != nil { if err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err) Logf("RC %s was already deleted: %v", name, err)
@ -3481,7 +3480,7 @@ func waitForPodsGone(ps *PodStore, interval, timeout time.Duration) error {
} }
// Delete a ReplicaSet and all pods it spawned // Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(c *client.Client, ns, name string) error { func DeleteReplicaSet(c *client.Client, clientset clientset.Interface, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns)) By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := c.Extensions().ReplicaSets(ns).Get(name) rc, err := c.Extensions().ReplicaSets(ns).Get(name)
if err != nil { if err != nil {
@ -3491,7 +3490,7 @@ func DeleteReplicaSet(c *client.Client, ns, name string) error {
} }
return err return err
} }
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), clientsetadapter.FromUnversionedClient(c)) reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), clientset)
if err != nil { if err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err) Logf("ReplicaSet %s was already deleted: %v", name, err)
@ -3770,11 +3769,11 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp
return nil return nil
} }
func WaitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds int) error { func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := api.ListOptions{LabelSelector: label} options := api.ListOptions{LabelSelector: label}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.Pods(ns).List(options) pods, err := c.Core().Pods(ns).List(options)
if err != nil { if err != nil {
return false, nil return false, nil
} }
@ -3788,7 +3787,7 @@ func WaitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds i
} }
// Waits for the deployment to clean up old rcs. // Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c *clientset.Clientset, ns, deploymentName string, desiredRSNum int) error { func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil { if err != nil {
@ -3814,7 +3813,7 @@ func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*
} }
} }
func WaitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string, desiredGeneration int64) error { func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute) return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute)
} }
@ -3875,7 +3874,7 @@ func WaitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object,
type updateDeploymentFunc func(d *extensions.Deployment) type updateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c *clientset.Clientset, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) { func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) {
deployments := c.Extensions().Deployments(namespace) deployments := c.Extensions().Deployments(namespace)
err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if deployment, err = deployments.Get(name); err != nil { if deployment, err = deployments.Get(name); err != nil {
@ -4722,7 +4721,7 @@ func GetNodePortURL(client *client.Client, ns, name string, svcPort int) (string
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till // ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do. // none are running, otherwise it does what a synchronous scale operation would do.
func ScaleRCByLabels(client *client.Client, ns string, l map[string]string, replicas uint) error { func ScaleRCByLabels(client *client.Client, clientset clientset.Interface, ns string, l map[string]string, replicas uint) error {
listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))} listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))}
rcs, err := client.ReplicationControllers(ns).List(listOpts) rcs, err := client.ReplicationControllers(ns).List(listOpts)
if err != nil { if err != nil {
@ -4734,7 +4733,7 @@ func ScaleRCByLabels(client *client.Client, ns string, l map[string]string, repl
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas) Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items { for _, labelRC := range rcs.Items {
name := labelRC.Name name := labelRC.Name
if err := ScaleRC(client, ns, name, replicas, false); err != nil { if err := ScaleRC(client, clientset, ns, name, replicas, false); err != nil {
return err return err
} }
rc, err := client.ReplicationControllers(ns).Get(name) rc, err := client.ReplicationControllers(ns).Get(name)

View File

@ -118,7 +118,7 @@ func gatherMetrics(f *framework.Framework) {
var _ = framework.KubeDescribe("Garbage collector", func() { var _ = framework.KubeDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc") f := framework.NewDefaultFramework("gc")
It("[Feature:GarbageCollector] should delete pods created by rc when not orphaning", func() { It("[Feature:GarbageCollector] should delete pods created by rc when not orphaning", func() {
clientSet := f.Clientset_1_5 clientSet := f.ClientSet_1_5
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name) podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc" rcName := "simpletest.rc"
@ -169,7 +169,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
}) })
It("[Feature:GarbageCollector] should orphan pods created by rc if delete options say so", func() { It("[Feature:GarbageCollector] should orphan pods created by rc if delete options say so", func() {
clientSet := f.Clientset_1_5 clientSet := f.ClientSet_1_5
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name) podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc" rcName := "simpletest.rc"
@ -231,7 +231,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
}) })
It("[Feature:GarbageCollector] should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() { It("[Feature:GarbageCollector] should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
clientSet := f.Clientset_1_5 clientSet := f.ClientSet_1_5
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name) podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc" rcName := "simpletest.rc"

View File

@ -122,7 +122,7 @@ func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
f := framework.NewDefaultFramework("clientset") f := framework.NewDefaultFramework("clientset")
It("should create pods, delete pods, watch pods", func() { It("should create pods, delete pods, watch pods", func() {
podClient := f.Clientset_1_5.Core().Pods(f.Namespace.Name) podClient := f.ClientSet_1_5.Core().Pods(f.Namespace.Name)
By("constructing the pod") By("constructing the pod")
name := "pod" + string(uuid.NewUUID()) name := "pod" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())

View File

@ -23,7 +23,6 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -125,7 +124,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job up") By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client)) scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -150,7 +149,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job down") By("scale job down")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client)) scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -173,7 +172,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("delete a job") By("delete a job")
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client)) reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0)) err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))

View File

@ -47,7 +47,6 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -199,7 +198,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout) pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast { if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it // TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(c, f.Clientset_1_5, ns) framework.DumpAllNamespaceInfo(c, f.ClientSet_1_5, ns)
framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err) framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
} }
} }
@ -429,8 +428,8 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
WithStdinData("abcd1234\n"). WithStdinData("abcd1234\n").
ExecOrDie() ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed")) Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } g := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := util.GetFirstPod(clientsetadapter.FromUnversionedClient(c), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, f) runTestPod, _, err := util.GetFirstPod(f.ClientSet.Core(), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, g)
if err != nil { if err != nil {
os.Exit(1) os.Exit(1)
} }

View File

@ -205,7 +205,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
} }
By("Deleting the RC") By("Deleting the RC")
framework.DeleteRCAndPods(f.Client, f.Namespace.Name, rcName) framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the // Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its // node. The /runningpods handler checks the container runtime (or its
// cache) and returns a list of running pods. Some possible causes of // cache) and returns a list of running pods. Some possible causes of

View File

@ -115,7 +115,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
verifyCPULimits(expectedCPU, cpuSummary) verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC") By("Deleting the RC")
framework.DeleteRCAndPods(f.Client, f.Namespace.Name, rcName) framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, rcName)
} }
func verifyMemoryLimits(c *client.Client, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { func verifyMemoryLimits(c *client.Client, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {

View File

@ -26,6 +26,8 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@ -321,7 +323,7 @@ func scaleRC(wg *sync.WaitGroup, config *framework.RCConfig, scalingTime time.Du
sleepUpTo(scalingTime) sleepUpTo(scalingTime)
newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2) newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
framework.ExpectNoError(framework.ScaleRC(config.Client, config.Namespace, config.Name, newSize, true), framework.ExpectNoError(framework.ScaleRC(config.Client, coreClientSetFromUnversioned(config.Client), config.Namespace, config.Name, newSize, true),
fmt.Sprintf("scaling rc %s for the first time", config.Name)) fmt.Sprintf("scaling rc %s for the first time", config.Name))
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
options := api.ListOptions{ options := api.ListOptions{
@ -349,6 +351,17 @@ func deleteRC(wg *sync.WaitGroup, config *framework.RCConfig, deletingTime time.
if framework.TestContext.GarbageCollectorEnabled { if framework.TestContext.GarbageCollectorEnabled {
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
} else { } else {
framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, coreClientSetFromUnversioned(config.Client), config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
} }
} }
// coreClientSetFromUnversioned adapts just enough of a a unversioned.Client to work with the scale RC function
func coreClientSetFromUnversioned(c *client.Client) internalclientset.Interface {
var clientset internalclientset.Clientset
if c != nil {
clientset.CoreClient = unversionedcore.New(c.RESTClient)
} else {
clientset.CoreClient = unversionedcore.New(nil)
}
return &clientset
}

View File

@ -154,7 +154,7 @@ func proxyContext(version string) {
CreatedPods: &pods, CreatedPods: &pods,
} }
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, cfg.Name) defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, cfg.Name)
Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred()) Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())

View File

@ -86,7 +86,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndPods(f.Client, f.Namespace.Name, controller.Name); err != nil { if err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
} }
}() }()

View File

@ -86,7 +86,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
// Cleanup the ReplicaSet when we are done. // Cleanup the ReplicaSet when we are done.
defer func() { defer func() {
// Resize the ReplicaSet to zero to get rid of pods. // Resize the ReplicaSet to zero to get rid of pods.
if err := framework.DeleteReplicaSet(f.Client, f.Namespace.Name, rs.Name); err != nil { if err := framework.DeleteReplicaSet(f.Client, f.ClientSet, f.Namespace.Name, rs.Name); err != nil {
framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err) framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err)
} }
}() }()

View File

@ -48,7 +48,7 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
It("should ensure that critical pod is scheduled in case there is no resources available", func() { It("should ensure that critical pod is scheduled in case there is no resources available", func() {
By("reserving all available cpu") By("reserving all available cpu")
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores) err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
defer framework.DeleteRCAndPods(f.Client, ns, "reserve-all-cpu") defer framework.DeleteRCAndPods(f.Client, f.ClientSet, ns, "reserve-all-cpu")
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("creating a new instance of DNS and waiting for DNS to be scheduled") By("creating a new instance of DNS and waiting for DNS to be scheduled")
@ -61,8 +61,8 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
rc := rcs.Items[0] rc := rcs.Items[0]
replicas := uint(rc.Spec.Replicas) replicas := uint(rc.Spec.Replicas)
err = framework.ScaleRC(f.Client, api.NamespaceSystem, rc.Name, replicas+1, true) err = framework.ScaleRC(f.Client, f.ClientSet, api.NamespaceSystem, rc.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleRC(f.Client, api.NamespaceSystem, rc.Name, replicas, true)) defer framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, api.NamespaceSystem, rc.Name, replicas, true))
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })
@ -72,7 +72,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
replicas := millicores / 100 replicas := millicores / 100
ReserveCpu(f, id, 1, 100) ReserveCpu(f, id, 1, 100)
framework.ExpectNoError(framework.ScaleRC(f.Client, f.Namespace.Name, id, uint(replicas), false)) framework.ExpectNoError(framework.ScaleRC(f.Client, f.ClientSet, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
pods, err := framework.GetPodsInNamespace(f.Client, f.Namespace.Name, framework.ImagePullerLabels) pods, err := framework.GetPodsInNamespace(f.Client, f.Namespace.Name, framework.ImagePullerLabels)

View File

@ -186,19 +186,18 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
var totalPodCapacity int64 var totalPodCapacity int64
var RCName string var RCName string
var ns string var ns string
f := framework.NewDefaultFramework("sched-pred")
ignoreLabels := framework.ImagePullerLabels ignoreLabels := framework.ImagePullerLabels
AfterEach(func() { AfterEach(func() {
rc, err := c.ReplicationControllers(ns).Get(RCName) rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 { if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller") By("Cleaning up the replication controller")
err := framework.DeleteRCAndPods(c, ns, RCName) err := framework.DeleteRCAndPods(c, f.ClientSet, ns, RCName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
}) })
f := framework.NewDefaultFramework("sched-pred")
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.Client
ns = f.Namespace.Name ns = f.Namespace.Name
@ -957,7 +956,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// cannot be scheduled onto it. // cannot be scheduled onto it.
By("Launching two pods on two distinct nodes to get two node names") By("Launching two pods on two distinct nodes to get two node names")
CreateHostPortPods(f, "host-port", 2, true) CreateHostPortPods(f, "host-port", 2, true)
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, "host-port") defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, "host-port")
podList, err := c.Pods(ns).List(api.ListOptions{}) podList, err := c.Pods(ns).List(api.ListOptions{})
ExpectNoError(err) ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(2)) Expect(len(podList.Items)).To(Equal(2))

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/service" "k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -309,7 +310,7 @@ var _ = framework.KubeDescribe("Services", func() {
// Stop service 1 and make sure it is gone. // Stop service 1 and make sure it is gone.
By("stopping service1") By("stopping service1")
framework.ExpectNoError(stopServeHostnameService(c, ns, "service1")) framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service1"))
By("verifying service1 is not up") By("verifying service1 is not up")
framework.ExpectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort))
@ -342,11 +343,11 @@ var _ = framework.KubeDescribe("Services", func() {
svc1 := "service1" svc1 := "service1"
svc2 := "service2" svc2 := "service2"
defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, svc1)) }() defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, svc1)) }()
podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods) podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, svc2)) }() defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, svc2)) }()
podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods) podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -391,7 +392,7 @@ var _ = framework.KubeDescribe("Services", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
numPods, servicePort := 3, 80 numPods, servicePort := 3, 80
defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, "service1")) }() defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service1")) }()
podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -416,7 +417,7 @@ var _ = framework.KubeDescribe("Services", func() {
framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort))
// Create a new service and check if it's not reusing IP. // Create a new service and check if it's not reusing IP.
defer func() { framework.ExpectNoError(stopServeHostnameService(c, ns, "service2")) }() defer func() { framework.ExpectNoError(stopServeHostnameService(c, f.ClientSet, ns, "service2")) }()
podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -1647,8 +1648,8 @@ func startServeHostnameService(c *client.Client, ns, name string, port, replicas
return podNames, serviceIP, nil return podNames, serviceIP, nil
} }
func stopServeHostnameService(c *client.Client, ns, name string) error { func stopServeHostnameService(c *client.Client, clientset internalclientset.Interface, ns, name string) error {
if err := framework.DeleteRCAndPods(c, ns, name); err != nil { if err := framework.DeleteRCAndPods(c, clientset, ns, name); err != nil {
return err return err
} }
if err := c.Services(ns).Delete(name); err != nil { if err := c.Services(ns).Delete(name); err != nil {

View File

@ -126,7 +126,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int) (output
if err := framework.RunRC(cfg); err != nil { if err := framework.RunRC(cfg); err != nil {
return nil, err return nil, err
} }
defer framework.DeleteRCAndPods(f.Client, f.Namespace.Name, cfg.Name) defer framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, cfg.Name)
// Run a single watcher, to reduce the number of API calls we have to // Run a single watcher, to reduce the number of API calls we have to
// make; this is to minimize the timing error. It's how kube-proxy // make; this is to minimize the timing error. It's how kube-proxy

View File

@ -219,7 +219,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndPods(f.Client, f.Namespace.Name, controller.Name); err != nil { if err := framework.DeleteRCAndPods(f.Client, f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
} }
}() }()

View File

@ -42,10 +42,8 @@ import (
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/genericapiserver" "k8s.io/kubernetes/pkg/genericapiserver"
"k8s.io/kubernetes/pkg/genericapiserver/authorizer" "k8s.io/kubernetes/pkg/genericapiserver/authorizer"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
@ -300,8 +298,8 @@ func RCFromManifest(fileName string) *api.ReplicationController {
} }
// StopRC stops the rc via kubectl's stop library // StopRC stops the rc via kubectl's stop library
func StopRC(rc *api.ReplicationController, restClient *client.Client) error { func StopRC(rc *api.ReplicationController, clientset clientset.Interface) error {
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(restClient)) reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
if err != nil || reaper == nil { if err != nil || reaper == nil {
return err return err
} }
@ -313,8 +311,8 @@ func StopRC(rc *api.ReplicationController, restClient *client.Client) error {
} }
// ScaleRC scales the given rc to the given replicas. // ScaleRC scales the given rc to the given replicas.
func ScaleRC(name, ns string, replicas int32, restClient *client.Client) (*api.ReplicationController, error) { func ScaleRC(name, ns string, replicas int32, clientset clientset.Interface) (*api.ReplicationController, error) {
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(restClient)) scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -324,64 +322,13 @@ func ScaleRC(name, ns string, replicas int32, restClient *client.Client) (*api.R
if err != nil { if err != nil {
return nil, err return nil, err
} }
scaled, err := restClient.ReplicationControllers(ns).Get(name) scaled, err := clientset.Core().ReplicationControllers(ns).Get(name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return scaled, nil return scaled, nil
} }
// StartRC creates given rc if it doesn't already exist, then updates it via kubectl's scaler.
func StartRC(controller *api.ReplicationController, restClient *client.Client) (*api.ReplicationController, error) {
created, err := restClient.ReplicationControllers(controller.Namespace).Get(controller.Name)
if err != nil {
glog.Infof("Rc %v doesn't exist, creating", controller.Name)
created, err = restClient.ReplicationControllers(controller.Namespace).Create(controller)
if err != nil {
return nil, err
}
}
// If we just created an rc, wait till it creates its replicas.
return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
}
// StartPods check for numPods in namespace. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(namespace string, numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}()
hostField := fields.OneTermEqualSelector(api.PodHostField, host)
options := api.ListOptions{FieldSelector: hostField}
pods, err := restClient.Pods(namespace).List(options)
if err != nil || len(pods.Items) == numPods {
return err
}
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in namespace have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest)
// Overwrite namespace
controller.ObjectMeta.Namespace = namespace
controller.Spec.Template.ObjectMeta.Namespace = namespace
// Make the rc unique to the given host.
controller.Spec.Replicas = int32(numPods)
controller.Spec.Template.Spec.NodeName = host
controller.Name = controller.Name + host
controller.Spec.Selector["host"] = host
controller.Spec.Template.Labels["host"] = host
if rc, err := StartRC(controller, restClient); err != nil {
return err
} else {
// Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(namespace).Delete(rc.Name, nil)
}
}
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server) { func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server) {
if masterConfig == nil { if masterConfig == nil {
masterConfig = NewMasterConfig() masterConfig = NewMasterConfig()