Replace hand-written informers with generated ones

Replace existing uses of hand-written informers with generated ones.
 Follow-up commits will switch the use of one-off informers to shared
 informers.
This commit is contained in:
Andy Goldstein
2017-02-06 13:35:50 -05:00
parent cb758738f9
commit 70c6087600
55 changed files with 936 additions and 823 deletions

View File

@@ -24,10 +24,12 @@ go_library(
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/legacylisters:go_default_library",
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
"//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library",
"//pkg/client/listers/core/v1:go_default_library",
"//pkg/client/listers/extensions/v1beta1:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/util/labels:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor:github.com/golang/glog",
@@ -63,9 +65,9 @@ go_test(
"//pkg/api/v1:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//pkg/client/informers/informers_generated:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/informers:go_default_library",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",

View File

@@ -44,10 +44,12 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/legacylisters"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1"
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/util/metrics"
)
@@ -79,12 +81,12 @@ type DeploymentController struct {
// used for unit testing
enqueueDeployment func(deployment *extensions.Deployment)
// A store of deployments, populated by the dController
dLister *listers.StoreToDeploymentLister
// A store of ReplicaSets, populated by the rsController
rsLister *listers.StoreToReplicaSetLister
// A store of pods, populated by the podController
podLister *listers.StoreToPodLister
// dLister can list/get deployments from the shared informer's store
dLister extensionslisters.DeploymentLister
// rsLister can list/get replica sets from the shared informer's store
rsLister extensionslisters.ReplicaSetLister
// podLister can list/get pods from the shared informer's store
podLister corelisters.PodLister
// dListerSynced returns true if the Deployment store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
@@ -103,7 +105,7 @@ type DeploymentController struct {
}
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer informers.ReplicaSetInformer, podInformer informers.PodInformer, client clientset.Interface) *DeploymentController {
func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) *DeploymentController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
@@ -159,6 +161,7 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
glog.Infof("Starting deployment controller")
if !cache.WaitForCacheSync(stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
return
}
@@ -497,17 +500,20 @@ func (dc *DeploymentController) syncDeployment(key string) error {
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := dc.dLister.Indexer.GetByKey(key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
deployment, err := dc.dLister.Deployments(namespace).Get(name)
if errors.IsNotFound(err) {
glog.Infof("Deployment has been deleted %v", key)
return nil
}
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to retrieve deployment %v from store: %v", key, err))
return err
}
if !exists {
glog.Infof("Deployment has been deleted %v", key)
return nil
}
deployment := obj.(*extensions.Deployment)
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
d, err := util.DeploymentDeepCopy(deployment)
@@ -766,15 +772,18 @@ func (dc *DeploymentController) checkNextItemForProgress() bool {
// checkForProgress checks the progress for the provided deployment. Meant to be called
// by the progressWorker and work on items synced in a secondary queue.
func (dc *DeploymentController) checkForProgress(key string) (bool, error) {
obj, exists, err := dc.dLister.Indexer.GetByKey(key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return false, err
}
deployment, err := dc.dLister.Deployments(namespace).Get(name)
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
glog.V(2).Infof("Cannot retrieve deployment %q found in the secondary queue: %#v", key, err)
return false, err
}
if !exists {
return false, nil
}
deployment := obj.(*extensions.Deployment)
cond := util.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing)
// Already marked with a terminal reason - no need to add it back to the main queue.
if cond != nil && (cond.Reason == util.TimedOutReason || cond.Reason == util.NewRSAvailableReason) {

View File

@@ -32,9 +32,9 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/informers"
)
var (
@@ -166,20 +166,20 @@ func newFixture(t *testing.T) *fixture {
func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory) {
f.client = fake.NewSimpleClientset(f.objects...)
informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
informers := informers.NewSharedInformerFactory(nil, f.client, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), f.client)
c.eventRecorder = &record.FakeRecorder{}
c.dListerSynced = alwaysReady
c.rsListerSynced = alwaysReady
c.podListerSynced = alwaysReady
for _, d := range f.dLister {
c.dLister.Indexer.Add(d)
informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d)
}
for _, rs := range f.rsLister {
c.rsLister.Indexer.Add(rs)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
}
for _, pod := range f.podLister {
c.podLister.Indexer.Add(pod)
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
}
return c, informers
}
@@ -246,8 +246,8 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
// issue: https://github.com/kubernetes/kubernetes/issues/23218
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
informers := informers.NewSharedInformerFactory(nil, fake, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake)
controller.eventRecorder = &record.FakeRecorder{}
controller.dListerSynced = alwaysReady
controller.rsListerSynced = alwaysReady
@@ -260,7 +260,7 @@ func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
empty := metav1.LabelSelector{}
d.Spec.Selector = &empty
controller.dLister.Indexer.Add(d)
informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d)
// We expect the deployment controller to not take action here since it's configuration
// is invalid, even though no replicasets exist that match it's selector.
controller.syncDeployment(fmt.Sprintf("%s/%s", d.ObjectMeta.Namespace, d.ObjectMeta.Name))

View File

@@ -25,8 +25,8 @@ import (
"k8s.io/kubernetes/pkg/api"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
)
func TestScaleDownOldReplicaSets(t *testing.T) {
@@ -68,8 +68,8 @@ func TestScaleDownOldReplicaSets(t *testing.T) {
}
kc := fake.NewSimpleClientset(expected...)
informers := informers.NewSharedInformerFactory(kc, nil, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), kc)
informers := informers.NewSharedInformerFactory(nil, kc, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), kc)
c.eventRecorder = &record.FakeRecorder{}
c.scaleDownOldReplicaSetsForRecreate(oldRSs, test.d)

View File

@@ -26,9 +26,9 @@ import (
"k8s.io/client-go/tools/record"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/informers"
)
func maxSurge(val int) *intstr.IntOrString {
@@ -371,15 +371,15 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
for i := range tests {
test := tests[i]
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
informers := informers.NewSharedInformerFactory(nil, fake, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake)
controller.eventRecorder = &record.FakeRecorder{}
controller.dListerSynced = alwaysReady
controller.rsListerSynced = alwaysReady
controller.podListerSynced = alwaysReady
for _, rs := range test.oldRSs {
controller.rsLister.Indexer.Add(rs)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
}
stopCh := make(chan struct{})

View File

@@ -25,7 +25,8 @@ go_library(
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
"//pkg/client/legacylisters:go_default_library",
"//pkg/client/listers/core/v1:go_default_library",
"//pkg/client/listers/extensions/v1beta1:go_default_library",
"//pkg/client/retry:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/util/hash:go_default_library",

View File

@@ -40,7 +40,7 @@ import (
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/legacylisters"
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
"k8s.io/kubernetes/pkg/controller"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
@@ -685,7 +685,7 @@ func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, na
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
// The returned bool value can be used to tell if all pods are actually labeled.
func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister *listers.StoreToPodLister, namespace, name, hash string) error {
func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister corelisters.PodLister, namespace, name, hash string) error {
for _, pod := range podList.Items {
// Only label the pod that doesn't already have the new hash
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {

View File

@@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
"k8s.io/kubernetes/pkg/client/legacylisters"
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
"k8s.io/kubernetes/pkg/client/retry"
hashutil "k8s.io/kubernetes/pkg/util/hash"
)
@@ -56,7 +56,7 @@ type updatePodFunc func(pod *v1.Pod) error
// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored.
// The returned bool value can be used to tell if the pod is actually updated.
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister *listers.StoreToPodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister corelisters.PodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
var pod *v1.Pod
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {

View File

@@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/legacylisters"
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/retry"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
@@ -37,7 +37,7 @@ type updateRSFunc func(rs *extensions.ReplicaSet) error
// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored.
// The returned bool value can be used to tell if the RS is actually updated.
func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister *listers.StoreToReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) {
func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister extensionslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) {
var rs *extensions.ReplicaSet
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {