Replace hand-written informers with generated ones

Replace existing uses of hand-written informers with generated ones.
 Follow-up commits will switch the use of one-off informers to shared
 informers.
This commit is contained in:
Andy Goldstein
2017-02-06 13:35:50 -05:00
parent cb758738f9
commit 70c6087600
55 changed files with 936 additions and 823 deletions

View File

@@ -18,9 +18,8 @@ go_library(
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/legacylisters:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/informers:go_default_library",
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
"//pkg/client/listers/core/v1:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
@@ -39,7 +38,11 @@ go_test(
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
"//pkg/client/informers/informers_generated:go_default_library",
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/node/testutil:go_default_library",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/labels",

View File

@@ -17,6 +17,7 @@ limitations under the License.
package podgc
import (
"fmt"
"sort"
"sync"
"time"
@@ -29,9 +30,8 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/legacylisters"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
"k8s.io/kubernetes/pkg/util/metrics"
"github.com/golang/glog"
@@ -44,21 +44,14 @@ const (
type PodGCController struct {
kubeClient clientset.Interface
// internalPodInformer is used to hold a personal informer. If we're using
// a normal shared informer, then the informer will be started for us. If
// we have a personal informer, we must start it ourselves. If you start
// the controller using NewPodGC(..., passing SharedInformer, ...), this
// will be null
internalPodInformer cache.SharedIndexInformer
podStore listers.StoreToPodLister
podController cache.Controller
podLister corelisters.PodLister
podListerSynced cache.InformerSynced
deletePod func(namespace, name string) error
terminatedPodThreshold int
}
func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInformer, terminatedPodThreshold int) *PodGCController {
func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer, terminatedPodThreshold int) *PodGCController {
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter())
}
@@ -71,36 +64,24 @@ func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInfor
},
}
gcc.podStore.Indexer = podInformer.GetIndexer()
gcc.podController = podInformer.GetController()
gcc.podLister = podInformer.Lister()
gcc.podListerSynced = podInformer.Informer().HasSynced
return gcc
}
func NewFromClient(
kubeClient clientset.Interface,
terminatedPodThreshold int,
) *PodGCController {
podInformer := informers.NewPodInformer(kubeClient, controller.NoResyncPeriodFunc())
controller := NewPodGC(kubeClient, podInformer, terminatedPodThreshold)
controller.internalPodInformer = podInformer
return controller
}
func (gcc *PodGCController) Run(stop <-chan struct{}) {
if gcc.internalPodInformer != nil {
go gcc.podController.Run(stop)
if !cache.WaitForCacheSync(stop, gcc.podListerSynced) {
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
return
}
go wait.Until(gcc.gc, gcCheckPeriod, stop)
<-stop
}
func (gcc *PodGCController) gc() {
if !gcc.podController.HasSynced() {
glog.V(2).Infof("PodGCController is waiting for informer sync...")
return
}
pods, err := gcc.podStore.List(labels.Everything())
pods, err := gcc.podLister.List(labels.Everything())
if err != nil {
glog.Errorf("Error while listing all Pods: %v", err)
return

View File

@@ -25,7 +25,11 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/node/testutil"
)
@@ -41,6 +45,16 @@ func (*FakeController) LastSyncResourceVersion() string {
return ""
}
func alwaysReady() bool { return true }
func NewFromClient(kubeClient clientset.Interface, terminatedPodThreshold int) (*PodGCController, coreinformers.PodInformer) {
informerFactory := informers.NewSharedInformerFactory(nil, kubeClient, controller.NoResyncPeriodFunc())
podInformer := informerFactory.Core().V1().Pods()
controller := NewPodGC(kubeClient, podInformer, terminatedPodThreshold)
controller.podListerSynced = alwaysReady
return controller, podInformer
}
func TestGCTerminated(t *testing.T) {
type nameToPhase struct {
name string
@@ -99,7 +113,7 @@ func TestGCTerminated(t *testing.T) {
for i, test := range testCases {
client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*testutil.NewNode("node")}})
gcc := NewFromClient(client, test.threshold)
gcc, podInformer := NewFromClient(client, test.threshold)
deletedPodNames := make([]string, 0)
var lock sync.Mutex
gcc.deletePod = func(_, name string) error {
@@ -112,15 +126,13 @@ func TestGCTerminated(t *testing.T) {
creationTime := time.Unix(0, 0)
for _, pod := range test.pods {
creationTime = creationTime.Add(1 * time.Hour)
gcc.podStore.Indexer.Add(&v1.Pod{
podInformer.Informer().GetStore().Add(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
Status: v1.PodStatus{Phase: pod.phase},
Spec: v1.PodSpec{NodeName: "node"},
})
}
gcc.podController = &FakeController{}
gcc.gc()
pass := true
@@ -168,7 +180,7 @@ func TestGCOrphaned(t *testing.T) {
for i, test := range testCases {
client := fake.NewSimpleClientset()
gcc := NewFromClient(client, test.threshold)
gcc, podInformer := NewFromClient(client, test.threshold)
deletedPodNames := make([]string, 0)
var lock sync.Mutex
gcc.deletePod = func(_, name string) error {
@@ -181,16 +193,14 @@ func TestGCOrphaned(t *testing.T) {
creationTime := time.Unix(0, 0)
for _, pod := range test.pods {
creationTime = creationTime.Add(1 * time.Hour)
gcc.podStore.Indexer.Add(&v1.Pod{
podInformer.Informer().GetStore().Add(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
Status: v1.PodStatus{Phase: pod.phase},
Spec: v1.PodSpec{NodeName: "node"},
})
}
gcc.podController = &FakeController{}
pods, err := gcc.podStore.List(labels.Everything())
pods, err := podInformer.Lister().List(labels.Everything())
if err != nil {
t.Errorf("Error while listing all Pods: %v", err)
return
@@ -247,7 +257,7 @@ func TestGCUnscheduledTerminating(t *testing.T) {
for i, test := range testCases {
client := fake.NewSimpleClientset()
gcc := NewFromClient(client, -1)
gcc, podInformer := NewFromClient(client, -1)
deletedPodNames := make([]string, 0)
var lock sync.Mutex
gcc.deletePod = func(_, name string) error {
@@ -260,7 +270,7 @@ func TestGCUnscheduledTerminating(t *testing.T) {
creationTime := time.Unix(0, 0)
for _, pod := range test.pods {
creationTime = creationTime.Add(1 * time.Hour)
gcc.podStore.Indexer.Add(&v1.Pod{
podInformer.Informer().GetStore().Add(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime},
DeletionTimestamp: pod.deletionTimeStamp},
Status: v1.PodStatus{Phase: pod.phase},
@@ -268,9 +278,7 @@ func TestGCUnscheduledTerminating(t *testing.T) {
})
}
gcc.podController = &FakeController{}
pods, err := gcc.podStore.List(labels.Everything())
pods, err := podInformer.Lister().List(labels.Everything())
if err != nil {
t.Errorf("Error while listing all Pods: %v", err)
return