remove almost all usages of clientadapter

This commit is contained in:
deads2k 2016-09-22 11:00:19 -04:00
parent c19e08ebbc
commit c22f076561
8 changed files with 56 additions and 52 deletions

View File

@ -20,10 +20,11 @@ import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientset "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm"
@ -66,7 +67,7 @@ func (c *HollowNodeConfig) addFlags(fs *pflag.FlagSet) {
fs.StringVar(&c.ContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType of requests sent to apiserver.")
}
func (c *HollowNodeConfig) createClientFromFile() (*client.Client, error) {
func (c *HollowNodeConfig) createClientConfigFromFile() (*restclient.Config, error) {
clientConfig, err := clientcmd.LoadFromFile(c.KubeconfigPath)
if err != nil {
return nil, fmt.Errorf("error while loading kubeconfig from file %v: %v", c.KubeconfigPath, err)
@ -76,15 +77,10 @@ func (c *HollowNodeConfig) createClientFromFile() (*client.Client, error) {
return nil, fmt.Errorf("error while creating kubeconfig: %v", err)
}
config.ContentType = c.ContentType
client, err := client.New(config)
if err != nil {
return nil, fmt.Errorf("error while creating client: %v", err)
}
return client, nil
return config, nil
}
func main() {
config := HollowNodeConfig{}
config.addFlags(pflag.CommandLine)
flag.InitFlags()
@ -94,10 +90,17 @@ func main() {
}
// create a client to communicate with API server.
cl, err := config.createClientFromFile()
clientset := clientset.FromUnversionedClient(cl)
clientConfig, err := config.createClientConfigFromFile()
if err != nil {
glog.Fatal("Failed to create a Client. Exiting.")
glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err)
}
cl, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Failed to create a Client: %v. Exiting.", err)
}
clientset, err := internalclientset.NewForConfig(clientConfig)
if err != nil {
glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err)
}
if config.Morph == "kubelet" {

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
@ -129,7 +128,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -154,7 +153,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job down")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -177,7 +176,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred())
By("delete a job")
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@ -116,7 +115,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), clientsetadapter.FromUnversionedClient(c))
dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), f.ClientSet)
Expect(err).NotTo(HaveOccurred())
err = dsReaper.Stop(ns, dsName, 0, nil)
Expect(err).NotTo(HaveOccurred())

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
@ -160,7 +159,7 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
}
// checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected.
func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) {
func checkDeploymentRevision(c clientset.Interface, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
// Check revision of the new replica set of this deployment
@ -182,15 +181,15 @@ func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revisio
return deployment, newRS
}
func stopDeploymentOverlap(c *clientset.Clientset, oldC client.Interface, ns, deploymentName, overlapWith string) {
func stopDeploymentOverlap(c clientset.Interface, oldC client.Interface, ns, deploymentName, overlapWith string) {
stopDeploymentMaybeOverlap(c, oldC, ns, deploymentName, overlapWith)
}
func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymentName string) {
func stopDeployment(c clientset.Interface, oldC client.Interface, ns, deploymentName string) {
stopDeploymentMaybeOverlap(c, oldC, ns, deploymentName, "")
}
func stopDeploymentMaybeOverlap(c *clientset.Clientset, oldC client.Interface, ns, deploymentName, overlapWith string) {
func stopDeploymentMaybeOverlap(c clientset.Interface, oldC client.Interface, ns, deploymentName, overlapWith string) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
@ -254,7 +253,7 @@ func testNewDeployment(f *framework.Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
c := adapter.FromUnversionedClient(f.Client)
c := f.ClientSet
deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": nginxImageName}
@ -289,7 +288,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient)
c := f.ClientSet
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod"}
rsPodLabels := map[string]string{
@ -339,7 +338,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient)
c := f.ClientSet
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-2"}
rsPodLabels := map[string]string{
@ -401,7 +400,7 @@ func testRecreateDeployment(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient)
c := f.ClientSet
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
rsPodLabels := map[string]string{
@ -456,7 +455,7 @@ func testRecreateDeployment(f *framework.Framework) {
func testDeploymentCleanUpPolicy(f *framework.Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient)
c := f.ClientSet
// Create nginx pods.
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
rsPodLabels := map[string]string{
@ -480,7 +479,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
deploymentName := "test-cleanup-deployment"
framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
pods, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil {
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
}
@ -488,7 +487,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
ResourceVersion: pods.ListMeta.ResourceVersion,
}
stopCh := make(chan struct{})
w, err := c.Pods(ns).Watch(options)
w, err := c.Core().Pods(ns).Watch(options)
go func() {
// There should be only one pod being created, which is the pod with the redis image.
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
@ -531,7 +530,7 @@ func testRolloverDeployment(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient)
c := f.ClientSet
podName := "rollover-pod"
deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{
@ -599,8 +598,7 @@ func testPausedDeployment(f *framework.Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient)
c := f.ClientSet
deploymentName := "test-paused-deployment"
podLabels := map[string]string{"name": nginxImageName}
d := newDeployment(deploymentName, 1, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil)
@ -697,8 +695,7 @@ func testPausedDeployment(f *framework.Framework) {
// and then rollback to last revision.
func testRollbackDeployment(f *framework.Framework) {
ns := f.Namespace.Name
unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient)
c := f.ClientSet
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
@ -806,7 +803,7 @@ func testRollbackDeployment(f *framework.Framework) {
// TODO: When we finished reporting rollback status in deployment status, check the rollback status here in each case.
func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
ns := f.Namespace.Name
c := adapter.FromUnversionedClient(f.Client)
c := f.ClientSet
podName := "nginx"
deploymentPodLabels := map[string]string{"name": podName}
rsPodLabels := map[string]string{
@ -943,7 +940,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
unversionedClient := f.Client
c := adapter.FromUnversionedClient(unversionedClient)
c := f.ClientSet
// Create nginx pods.
podName := "nginx"
podLabels := map[string]string{"name": podName}
@ -998,7 +995,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
func testScalePausedDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := adapter.FromUnversionedClient(f.Client)
c := f.ClientSet
podLabels := map[string]string{"name": nginxImageName}
replicas := int32(3)
@ -1049,7 +1046,7 @@ func testScalePausedDeployment(f *framework.Framework) {
func testScaledRolloutDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := adapter.FromUnversionedClient(f.Client)
c := f.ClientSet
podLabels := map[string]string{"name": nginxImageName}
replicas := int32(10)
@ -1216,7 +1213,7 @@ func testOverlappingDeployment(f *framework.Framework) {
ns := f.Namespace.Name
// TODO: remove unversionedClient when the refactoring is done. Currently some
// functions like verifyPod still expects a unversioned#Client.
c := adapter.FromUnversionedClient(f.Client)
c := f.ClientSet
deploymentName := "first-deployment"
podLabels := map[string]string{"name": redisImageName}

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
@ -59,7 +60,12 @@ const (
type Framework struct {
BaseName string
Client *client.Client
// Client is manually created and should not be used unless absolutely necessary. Use Clientset_1_5
// where possible.
Client *client.Client
// ClientSet uses internal objects, you should use Clientset_1_5 where possible.
ClientSet internalclientset.Interface
Clientset_1_5 *release_1_5.Clientset
StagingClient *staging.Clientset
ClientPool dynamic.ClientPool
@ -193,6 +199,8 @@ func (f *Framework) BeforeEach() {
c, err := loadClientFromConfig(config)
Expect(err).NotTo(HaveOccurred())
f.Client = c
f.ClientSet, err = internalclientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.Clientset_1_5, err = release_1_5.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
clientRepoConfig := getClientRepoConfig(config)

View File

@ -3770,11 +3770,11 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp
return nil
}
func WaitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds int) error {
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := api.ListOptions{LabelSelector: label}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.Pods(ns).List(options)
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, nil
}
@ -3788,7 +3788,7 @@ func WaitForPodsReady(c *clientset.Clientset, ns, name string, minReadySeconds i
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c *clientset.Clientset, ns, deploymentName string, desiredRSNum int) error {
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
@ -3814,7 +3814,7 @@ func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*
}
}
func WaitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string, desiredGeneration int64) error {
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute)
}
@ -3875,7 +3875,7 @@ func WaitForPartialEvents(c *client.Client, ns string, objOrRef runtime.Object,
type updateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c *clientset.Clientset, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) {
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) {
deployments := c.Extensions().Deployments(namespace)
err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if deployment, err = deployments.Get(name); err != nil {

View File

@ -23,7 +23,6 @@ import (
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
@ -125,7 +124,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -150,7 +149,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job down")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -173,7 +172,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("delete a job")
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.ClientSet)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))

View File

@ -47,7 +47,6 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned"
clientsetadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/labels"
@ -429,8 +428,8 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
WithStdinData("abcd1234\n").
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := util.GetFirstPod(clientsetadapter.FromUnversionedClient(c), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, f)
g := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := util.GetFirstPod(f.ClientSet.Core(), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, g)
if err != nil {
os.Exit(1)
}