Replace hand-written informers with generated ones

Replace existing uses of hand-written informers with generated ones.
 Follow-up commits will switch the use of one-off informers to shared
 informers.
This commit is contained in:
Andy Goldstein
2017-02-06 13:35:50 -05:00
parent cb758738f9
commit 70c6087600
55 changed files with 936 additions and 823 deletions

View File

@@ -29,15 +29,16 @@ import (
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/wait"
testcore "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1"
"k8s.io/kubernetes/pkg/cloudprovider"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/node/testutil"
"k8s.io/kubernetes/pkg/util/node"
)
@@ -51,6 +52,14 @@ const (
testUnhealtyThreshold = float32(0.55)
)
func alwaysReady() bool { return true }
type nodeController struct {
*NodeController
nodeInformer coreinformers.NodeInformer
daemonSetInformer extensionsinformers.DaemonSetInformer
}
func NewNodeControllerFromClient(
cloud cloudprovider.Interface,
kubeClient clientset.Interface,
@@ -65,21 +74,44 @@ func NewNodeControllerFromClient(
clusterCIDR *net.IPNet,
serviceCIDR *net.IPNet,
nodeCIDRMaskSize int,
allocateNodeCIDRs bool) (*NodeController, error) {
allocateNodeCIDRs bool) (*nodeController, error) {
factory := informers.NewSharedInformerFactory(kubeClient, nil, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(nil, kubeClient, controller.NoResyncPeriodFunc())
nc, err := NewNodeController(factory.Pods(), factory.Nodes(), factory.DaemonSets(), cloud, kubeClient, podEvictionTimeout, evictionLimiterQPS, secondaryEvictionLimiterQPS,
largeClusterThreshold, unhealthyZoneThreshold, nodeMonitorGracePeriod, nodeStartupGracePeriod, nodeMonitorPeriod, clusterCIDR,
serviceCIDR, nodeCIDRMaskSize, allocateNodeCIDRs)
nodeInformer := factory.Core().V1().Nodes()
daemonSetInformer := factory.Extensions().V1beta1().DaemonSets()
nc, err := NewNodeController(
factory.Core().V1().Pods(),
nodeInformer,
daemonSetInformer,
cloud,
kubeClient,
podEvictionTimeout,
evictionLimiterQPS,
secondaryEvictionLimiterQPS,
largeClusterThreshold,
unhealthyZoneThreshold,
nodeMonitorGracePeriod,
nodeStartupGracePeriod,
nodeMonitorPeriod,
clusterCIDR,
serviceCIDR,
nodeCIDRMaskSize,
allocateNodeCIDRs,
)
if err != nil {
return nil, err
}
return nc, nil
nc.podInformerSynced = alwaysReady
nc.nodeInformerSynced = alwaysReady
nc.daemonSetInformerSynced = alwaysReady
return &nodeController{nc, nodeInformer, daemonSetInformer}, nil
}
func syncNodeStore(nc *NodeController, fakeNodeHandler *testutil.FakeNodeHandler) error {
func syncNodeStore(nc *nodeController, fakeNodeHandler *testutil.FakeNodeHandler) error {
nodes, err := fakeNodeHandler.List(metav1.ListOptions{})
if err != nil {
return err
@@ -88,7 +120,7 @@ func syncNodeStore(nc *NodeController, fakeNodeHandler *testutil.FakeNodeHandler
for i := range nodes.Items {
newElems = append(newElems, &nodes.Items[i])
}
return nc.nodeStore.Replace(newElems, "newRV")
return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV")
}
func TestMonitorNodeStatusEvictPods(t *testing.T) {
@@ -518,7 +550,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
nodeController.now = func() metav1.Time { return fakeNow }
for _, ds := range item.daemonSets {
nodeController.daemonSetStore.Add(&ds)
nodeController.daemonSetInformer.Informer().GetStore().Add(&ds)
}
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
@@ -541,7 +573,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
for _, zone := range zones {
nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) {
nodeUid, _ := value.UID.(string)
deletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUid, nodeController.daemonSetStore)
deletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUid, nodeController.daemonSetInformer.Lister())
return true, 0
})
}
@@ -1910,8 +1942,7 @@ func TestCheckPod(t *testing.T) {
}
nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false)
nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc)
nc.nodeStore.Store.Add(&v1.Node{
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "new",
},
@@ -1921,7 +1952,7 @@ func TestCheckPod(t *testing.T) {
},
},
})
nc.nodeStore.Store.Add(&v1.Node{
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "old",
},
@@ -1931,7 +1962,7 @@ func TestCheckPod(t *testing.T) {
},
},
})
nc.nodeStore.Store.Add(&v1.Node{
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "older",
},
@@ -1941,7 +1972,7 @@ func TestCheckPod(t *testing.T) {
},
},
})
nc.nodeStore.Store.Add(&v1.Node{
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "oldest",
},