add namespace index to rc and pod

This commit is contained in:
mqliang 2016-04-07 20:15:21 +08:00
parent 7e462c2310
commit 9011207f18
25 changed files with 263 additions and 168 deletions

View File

@ -38,6 +38,7 @@ import (
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
@ -194,7 +195,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
eventBroadcaster.StartRecordingToSink(cl.Events("")) eventBroadcaster.StartRecordingToSink(cl.Events(""))
scheduler.New(schedulerConfig).Run() scheduler.New(schedulerConfig).Run()
podInformer := informers.CreateSharedPodInformer(clientset, controller.NoResyncPeriodFunc()) podInformer := informers.CreateSharedIndexPodInformer(clientset, controller.NoResyncPeriodFunc(), cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
// ensure the service endpoints are sync'd several times within the window that the integration tests wait // ensure the service endpoints are sync'd several times within the window that the integration tests wait
go endpointcontroller.NewEndpointController(podInformer, clientset). go endpointcontroller.NewEndpointController(podInformer, clientset).

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options" "k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/leaderelection" "k8s.io/kubernetes/pkg/client/leaderelection"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
@ -194,8 +195,8 @@ func Run(s *options.CMServer) error {
} }
func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *restclient.Config, stop <-chan struct{}) error { func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *restclient.Config, stop <-chan struct{}) error {
podInformer := informers.CreateSharedPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pod-informer")), ResyncPeriod(s)()) podInformer := informers.CreateSharedIndexPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pod-informer")), ResyncPeriod(s)(), cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
informers := map[reflect.Type]framework.SharedInformer{} informers := map[reflect.Type]framework.SharedIndexInformer{}
informers[reflect.TypeOf(&api.Pod{})] = podInformer informers[reflect.TypeOf(&api.Pod{})] = podInformer
go endpointcontroller.NewEndpointController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))). go endpointcontroller.NewEndpointController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))).

View File

@ -76,7 +76,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
}, },
) )
e.podStore.Store, e.podController = framework.NewInformer( e.podStore.Indexer, e.podController = framework.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Pods(api.NamespaceAll).List(options) return e.client.Core().Pods(api.NamespaceAll).List(options)
@ -92,6 +92,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
UpdateFunc: e.updatePod, UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod, DeleteFunc: e.deletePod,
}, },
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
return e return e
} }

View File

@ -53,6 +53,10 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
} }
} }
const (
NamespaceIndex string = "namespace"
)
// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace // MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace
func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
meta, err := meta.Accessor(obj) meta, err := meta.Accessor(obj)

View File

@ -41,7 +41,7 @@ import (
// l := StoreToPodLister{s} // l := StoreToPodLister{s}
// l.List() // l.List()
type StoreToPodLister struct { type StoreToPodLister struct {
Store Indexer
} }
// Please note that selector is filtering among the pods that have gotten into // Please note that selector is filtering among the pods that have gotten into
@ -54,7 +54,7 @@ func (s *StoreToPodLister) List(selector labels.Selector) (pods []*api.Pod, err
// s.Pods(api.NamespaceAll).List(selector), however then we'd have to // s.Pods(api.NamespaceAll).List(selector), however then we'd have to
// remake the list.Items as a []*api.Pod. So leave this separate for // remake the list.Items as a []*api.Pod. So leave this separate for
// now. // now.
for _, m := range s.Store.List() { for _, m := range s.Indexer.List() {
pod := m.(*api.Pod) pod := m.(*api.Pod)
if selector.Matches(labels.Set(pod.Labels)) { if selector.Matches(labels.Set(pod.Labels)) {
pods = append(pods, pod) pods = append(pods, pod)
@ -65,11 +65,11 @@ func (s *StoreToPodLister) List(selector labels.Selector) (pods []*api.Pod, err
// Pods is taking baby steps to be more like the api in pkg/client // Pods is taking baby steps to be more like the api in pkg/client
func (s *StoreToPodLister) Pods(namespace string) storePodsNamespacer { func (s *StoreToPodLister) Pods(namespace string) storePodsNamespacer {
return storePodsNamespacer{s.Store, namespace} return storePodsNamespacer{s.Indexer, namespace}
} }
type storePodsNamespacer struct { type storePodsNamespacer struct {
store Store indexer Indexer
namespace string namespace string
} }
@ -78,20 +78,33 @@ type storePodsNamespacer struct {
// that. // that.
func (s storePodsNamespacer) List(selector labels.Selector) (pods api.PodList, err error) { func (s storePodsNamespacer) List(selector labels.Selector) (pods api.PodList, err error) {
list := api.PodList{} list := api.PodList{}
for _, m := range s.store.List() { if s.namespace == api.NamespaceAll {
pod := m.(*api.Pod) for _, m := range s.indexer.List() {
if s.namespace == api.NamespaceAll || s.namespace == pod.Namespace { pod := m.(*api.Pod)
if selector.Matches(labels.Set(pod.Labels)) { if selector.Matches(labels.Set(pod.Labels)) {
list.Items = append(list.Items, *pod) list.Items = append(list.Items, *pod)
} }
} }
return list, nil
}
key := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}}
items, err := s.indexer.Index(NamespaceIndex, key)
if err != nil {
return api.PodList{}, err
}
for _, m := range items {
pod := m.(*api.Pod)
if selector.Matches(labels.Set(pod.Labels)) {
list.Items = append(list.Items, *pod)
}
} }
return list, nil return list, nil
} }
// Exists returns true if a pod matching the namespace/name of the given pod exists in the store. // Exists returns true if a pod matching the namespace/name of the given pod exists in the store.
func (s *StoreToPodLister) Exists(pod *api.Pod) (bool, error) { func (s *StoreToPodLister) Exists(pod *api.Pod) (bool, error) {
_, exists, err := s.Store.Get(pod) _, exists, err := s.Indexer.Get(pod)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -143,12 +156,12 @@ func (s storeToNodeConditionLister) List() (nodes api.NodeList, err error) {
// StoreToReplicationControllerLister gives a store List and Exists methods. The store must contain only ReplicationControllers. // StoreToReplicationControllerLister gives a store List and Exists methods. The store must contain only ReplicationControllers.
type StoreToReplicationControllerLister struct { type StoreToReplicationControllerLister struct {
Store Indexer
} }
// Exists checks if the given rc exists in the store. // Exists checks if the given rc exists in the store.
func (s *StoreToReplicationControllerLister) Exists(controller *api.ReplicationController) (bool, error) { func (s *StoreToReplicationControllerLister) Exists(controller *api.ReplicationController) (bool, error) {
_, exists, err := s.Store.Get(controller) _, exists, err := s.Indexer.Get(controller)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -158,29 +171,42 @@ func (s *StoreToReplicationControllerLister) Exists(controller *api.ReplicationC
// StoreToReplicationControllerLister lists all controllers in the store. // StoreToReplicationControllerLister lists all controllers in the store.
// TODO: converge on the interface in pkg/client // TODO: converge on the interface in pkg/client
func (s *StoreToReplicationControllerLister) List() (controllers []api.ReplicationController, err error) { func (s *StoreToReplicationControllerLister) List() (controllers []api.ReplicationController, err error) {
for _, c := range s.Store.List() { for _, c := range s.Indexer.List() {
controllers = append(controllers, *(c.(*api.ReplicationController))) controllers = append(controllers, *(c.(*api.ReplicationController)))
} }
return controllers, nil return controllers, nil
} }
func (s *StoreToReplicationControllerLister) ReplicationControllers(namespace string) storeReplicationControllersNamespacer { func (s *StoreToReplicationControllerLister) ReplicationControllers(namespace string) storeReplicationControllersNamespacer {
return storeReplicationControllersNamespacer{s.Store, namespace} return storeReplicationControllersNamespacer{s.Indexer, namespace}
} }
type storeReplicationControllersNamespacer struct { type storeReplicationControllersNamespacer struct {
store Store indexer Indexer
namespace string namespace string
} }
func (s storeReplicationControllersNamespacer) List(selector labels.Selector) (controllers []api.ReplicationController, err error) { func (s storeReplicationControllersNamespacer) List(selector labels.Selector) (controllers []api.ReplicationController, err error) {
for _, c := range s.store.List() { if s.namespace == api.NamespaceAll {
rc := *(c.(*api.ReplicationController)) for _, m := range s.indexer.List() {
if s.namespace == api.NamespaceAll || s.namespace == rc.Namespace { rc := *(m.(*api.ReplicationController))
if selector.Matches(labels.Set(rc.Labels)) { if selector.Matches(labels.Set(rc.Labels)) {
controllers = append(controllers, rc) controllers = append(controllers, rc)
} }
} }
return
}
key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}}
items, err := s.indexer.Index(NamespaceIndex, key)
if err != nil {
return
}
for _, m := range items {
rc := *(m.(*api.ReplicationController))
if selector.Matches(labels.Set(rc.Labels)) {
controllers = append(controllers, rc)
}
} }
return return
} }
@ -195,11 +221,14 @@ func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (co
return return
} }
for _, m := range s.Store.List() { key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace}}
items, err := s.Indexer.Index(NamespaceIndex, key)
if err != nil {
return
}
for _, m := range items {
rc = *m.(*api.ReplicationController) rc = *m.(*api.ReplicationController)
if rc.Namespace != pod.Namespace {
continue
}
labelSet := labels.Set(rc.Spec.Selector) labelSet := labels.Set(rc.Spec.Selector)
selector = labels.Set(rc.Spec.Selector).AsSelector() selector = labels.Set(rc.Spec.Selector).AsSelector()

View File

@ -124,7 +124,7 @@ func TestStoreToNodeConditionLister(t *testing.T) {
} }
func TestStoreToReplicationControllerLister(t *testing.T) { func TestStoreToReplicationControllerLister(t *testing.T) {
store := NewStore(MetaNamespaceKeyFunc) store := NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc})
lister := StoreToReplicationControllerLister{store} lister := StoreToReplicationControllerLister{store}
testCases := []struct { testCases := []struct {
inRCs []*api.ReplicationController inRCs []*api.ReplicationController
@ -645,7 +645,7 @@ func TestStoreToJobLister(t *testing.T) {
} }
func TestStoreToPodLister(t *testing.T) { func TestStoreToPodLister(t *testing.T) {
store := NewStore(MetaNamespaceKeyFunc) store := NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc})
ids := []string{"foo", "bar", "baz"} ids := []string{"foo", "bar", "baz"}
for _, id := range ids { for _, id := range ids {
store.Add(&api.Pod{ store.Add(&api.Pod{

View File

@ -108,7 +108,7 @@ type DaemonSetsController struct {
queue *workqueue.Type queue *workqueue.Type
} }
func NewDaemonSetsController(podInformer framework.SharedInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController { func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset. // TODO: remove the wrapper when every clients have moved to use the clientset.
@ -183,7 +183,7 @@ func NewDaemonSetsController(podInformer framework.SharedInformer, kubeClient cl
UpdateFunc: dsc.updatePod, UpdateFunc: dsc.updatePod,
DeleteFunc: dsc.deletePod, DeleteFunc: dsc.deletePod,
}) })
dsc.podStore.Store = podInformer.GetStore() dsc.podStore.Indexer = podInformer.GetIndexer()
dsc.podController = podInformer.GetController() dsc.podController = podInformer.GetController()
dsc.podStoreSynced = podInformer.HasSynced dsc.podStoreSynced = podInformer.HasSynced
@ -210,7 +210,7 @@ func NewDaemonSetsController(podInformer framework.SharedInformer, kubeClient cl
} }
func NewDaemonSetsControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController { func NewDaemonSetsControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
podInformer := informers.CreateSharedPodInformer(kubeClient, resyncPeriod()) podInformer := informers.CreateSharedIndexPodInformer(kubeClient, resyncPeriod(), cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
dsc := NewDaemonSetsController(podInformer, kubeClient, resyncPeriod, lookupCacheSize) dsc := NewDaemonSetsController(podInformer, kubeClient, resyncPeriod, lookupCacheSize)
dsc.internalPodInformer = podInformer dsc.internalPodInformer = podInformer
@ -686,7 +686,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte
newPod.Spec.NodeName = node.Name newPod.Spec.NodeName = node.Name
pods := []*api.Pod{newPod} pods := []*api.Pod{newPod}
for _, m := range dsc.podStore.Store.List() { for _, m := range dsc.podStore.Indexer.List() {
pod := m.(*api.Pod) pod := m.(*api.Pod)
if pod.Spec.NodeName != node.Name { if pod.Spec.NodeName != node.Name {
continue continue

View File

@ -419,10 +419,10 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
func TestDealsWithExistingPods(t *testing.T) { func TestDealsWithExistingPods(t *testing.T) {
manager, podControl := newTestController() manager, podControl := newTestController()
addNodes(manager.nodeStore.Store, 0, 5, nil) addNodes(manager.nodeStore.Store, 0, 5, nil)
addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel, 1) addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1)
addPods(manager.podStore.Store, "node-2", simpleDaemonSetLabel, 2) addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 2)
addPods(manager.podStore.Store, "node-3", simpleDaemonSetLabel, 5) addPods(manager.podStore.Indexer, "node-3", simpleDaemonSetLabel, 5)
addPods(manager.podStore.Store, "node-4", simpleDaemonSetLabel2, 2) addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel2, 2)
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5) syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
@ -444,10 +444,10 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
manager, podControl := newTestController() manager, podControl := newTestController()
addNodes(manager.nodeStore.Store, 0, 5, nil) addNodes(manager.nodeStore.Store, 0, 5, nil)
addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel) addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
addPods(manager.podStore.Store, "node-0", simpleDaemonSetLabel2, 2) addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel2, 2)
addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel, 3) addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel2, 1) addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 1)
addPods(manager.podStore.Store, "node-4", simpleDaemonSetLabel, 1) addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel, 1)
daemon := newDaemonSet("foo") daemon := newDaemonSet("foo")
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager.dsStore.Add(daemon) manager.dsStore.Add(daemon)
@ -459,14 +459,14 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
manager, podControl := newTestController() manager, podControl := newTestController()
addNodes(manager.nodeStore.Store, 0, 5, nil) addNodes(manager.nodeStore.Store, 0, 5, nil)
addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel) addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
addPods(manager.podStore.Store, "node-0", simpleDaemonSetLabel, 1) addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel, 3) addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
addPods(manager.podStore.Store, "node-1", simpleDaemonSetLabel2, 2) addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 2)
addPods(manager.podStore.Store, "node-2", simpleDaemonSetLabel, 4) addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 4)
addPods(manager.podStore.Store, "node-6", simpleDaemonSetLabel, 13) addPods(manager.podStore.Indexer, "node-6", simpleDaemonSetLabel, 13)
addPods(manager.podStore.Store, "node-7", simpleDaemonSetLabel2, 4) addPods(manager.podStore.Indexer, "node-7", simpleDaemonSetLabel2, 4)
addPods(manager.podStore.Store, "node-9", simpleDaemonSetLabel, 1) addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel, 1)
addPods(manager.podStore.Store, "node-9", simpleDaemonSetLabel2, 1) addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel2, 1)
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager.dsStore.Add(ds) manager.dsStore.Add(ds)

View File

@ -144,7 +144,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
}, },
) )
dc.podStore.Store, dc.podController = framework.NewInformer( dc.podStore.Indexer, dc.podController = framework.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.client.Core().Pods(api.NamespaceAll).List(options) return dc.client.Core().Pods(api.NamespaceAll).List(options)
@ -160,6 +160,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
UpdateFunc: dc.updatePod, UpdateFunc: dc.updatePod,
DeleteFunc: dc.deletePod, DeleteFunc: dc.deletePod,
}, },
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
dc.syncHandler = dc.syncDeployment dc.syncHandler = dc.syncDeployment

View File

@ -746,7 +746,7 @@ func (f *fixture) run(deploymentName string) {
c.rsStore.Store.Add(rs) c.rsStore.Store.Add(rs)
} }
for _, pod := range f.podStore { for _, pod := range f.podStore {
c.podStore.Store.Add(pod) c.podStore.Indexer.Add(pod)
} }
err := c.syncDeployment(deploymentName) err := c.syncDeployment(deploymentName)

View File

@ -60,7 +60,7 @@ var (
) )
// NewEndpointController returns a new *EndpointController. // NewEndpointController returns a new *EndpointController.
func NewEndpointController(podInformer framework.SharedInformer, client *clientset.Clientset) *EndpointController { func NewEndpointController(podInformer framework.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil { if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
} }
@ -95,7 +95,7 @@ func NewEndpointController(podInformer framework.SharedInformer, client *clients
UpdateFunc: e.updatePod, UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod, DeleteFunc: e.deletePod,
}) })
e.podStore.Store = podInformer.GetStore() e.podStore.Indexer = podInformer.GetIndexer()
e.podController = podInformer.GetController() e.podController = podInformer.GetController()
e.podStoreSynced = podInformer.HasSynced e.podStoreSynced = podInformer.HasSynced
@ -104,7 +104,7 @@ func NewEndpointController(podInformer framework.SharedInformer, client *clients
// NewEndpointControllerFromClient returns a new *EndpointController that runs its own informer. // NewEndpointControllerFromClient returns a new *EndpointController that runs its own informer.
func NewEndpointControllerFromClient(client *clientset.Clientset, resyncPeriod controller.ResyncPeriodFunc) *EndpointController { func NewEndpointControllerFromClient(client *clientset.Clientset, resyncPeriod controller.ResyncPeriodFunc) *EndpointController {
podInformer := informers.CreateSharedPodInformer(client, resyncPeriod()) podInformer := informers.CreateSharedIndexPodInformer(client, resyncPeriod(), cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
e := NewEndpointController(podInformer, client) e := NewEndpointController(podInformer, client)
e.internalPodInformer = podInformer e.internalPodInformer = podInformer
@ -123,7 +123,7 @@ type EndpointController struct {
// we have a personal informer, we must start it ourselves. If you start // we have a personal informer, we must start it ourselves. If you start
// the controller using NewEndpointController(passing SharedInformer), this // the controller using NewEndpointController(passing SharedInformer), this
// will be null // will be null
internalPodInformer framework.SharedInformer internalPodInformer framework.SharedIndexInformer
// Services that need to be updated. A channel is inappropriate here, // Services that need to be updated. A channel is inappropriate here,
// because it allows services with lots of pods to be serviced much // because it allows services with lots of pods to be serviced much

View File

@ -172,7 +172,7 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
@ -214,7 +214,7 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
@ -253,7 +253,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
@ -291,7 +291,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, ns, 0, 1, 1) addPods(endpoints.podStore.Indexer, ns, 0, 1, 1)
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
@ -329,7 +329,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, ns, 1, 1, 1) addPods(endpoints.podStore.Indexer, ns, 1, 1, 1)
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
@ -371,7 +371,7 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
@ -412,7 +412,7 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1, 0) addPods(endpoints.podStore.Indexer, api.NamespaceDefault, 1, 1, 0)
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
@ -432,8 +432,8 @@ func TestSyncEndpointsItems(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, ns, 3, 2, 0) addPods(endpoints.podStore.Indexer, ns, 3, 2, 0)
addPods(endpoints.podStore.Store, "blah", 5, 2, 0) // make sure these aren't found! addPods(endpoints.podStore.Indexer, "blah", 5, 2, 0) // make sure these aren't found!
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: api.ServiceSpec{
@ -475,7 +475,7 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, ns, 3, 2, 0) addPods(endpoints.podStore.Indexer, ns, 3, 2, 0)
serviceLabels := map[string]string{"foo": "bar"} serviceLabels := map[string]string{"foo": "bar"}
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@ -536,7 +536,7 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
serviceLabels := map[string]string{"baz": "blah"} serviceLabels := map[string]string{"baz": "blah"}
endpoints.serviceStore.Store.Add(&api.Service{ endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{

View File

@ -42,3 +42,19 @@ func CreateSharedPodInformer(client clientset.Interface, resyncPeriod time.Durat
return sharedInformer return sharedInformer
} }
// CreateSharedIndexPodInformer returns a SharedIndexInformer that lists and watches all pods
func CreateSharedIndexPodInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) framework.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().Pods(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.Core().Pods(api.NamespaceAll).Watch(options)
},
},
&api.Pod{}, resyncPeriod, indexers)
return sharedIndexInformer
}

View File

@ -48,8 +48,7 @@ type SharedInformer interface {
type SharedIndexInformer interface { type SharedIndexInformer interface {
SharedInformer SharedInformer
AddIndexers(indexers cache.Indexers) error
AddIndexer(indexer cache.Indexer) error
GetIndexer() cache.Indexer GetIndexer() cache.Indexer
} }
@ -57,12 +56,12 @@ type SharedIndexInformer interface {
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can // TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
// be shared amongst all consumers. // be shared amongst all consumers.
func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer { func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
sharedInformer := &sharedInformer{ sharedInformer := &sharedIndexInformer{
processor: &sharedProcessor{}, processor: &sharedProcessor{},
store: cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc), indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{}),
} }
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, sharedInformer.store) fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, sharedInformer.indexer)
cfg := &Config{ cfg := &Config{
Queue: fifo, Queue: fifo,
@ -78,8 +77,33 @@ func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPer
return sharedInformer return sharedInformer
} }
type sharedInformer struct { /// NewSharedIndexInformer creates a new instance for the listwatcher.
store cache.Store // TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
// be shared amongst all consumers.
func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer {
sharedIndexInformer := &sharedIndexInformer{
processor: &sharedProcessor{},
indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
}
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, sharedIndexInformer.indexer)
cfg := &Config{
Queue: fifo,
ListerWatcher: lw,
ObjectType: objType,
FullResyncPeriod: resyncPeriod,
RetryOnError: false,
Process: sharedIndexInformer.HandleDeltas,
}
sharedIndexInformer.controller = New(cfg)
return sharedIndexInformer
}
type sharedIndexInformer struct {
indexer cache.Indexer
controller *Controller controller *Controller
processor *sharedProcessor processor *sharedProcessor
@ -94,7 +118,7 @@ type sharedInformer struct {
// Because returning information back is always asynchronous, the legacy callers shouldn't // Because returning information back is always asynchronous, the legacy callers shouldn't
// notice any change in behavior. // notice any change in behavior.
type dummyController struct { type dummyController struct {
informer *sharedInformer informer *sharedIndexInformer
} }
func (v *dummyController) Run(stopCh <-chan struct{}) { func (v *dummyController) Run(stopCh <-chan struct{}) {
@ -117,7 +141,7 @@ type deleteNotification struct {
oldObj interface{} oldObj interface{}
} }
func (s *sharedInformer) Run(stopCh <-chan struct{}) { func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
func() { func() {
@ -130,25 +154,34 @@ func (s *sharedInformer) Run(stopCh <-chan struct{}) {
s.controller.Run(stopCh) s.controller.Run(stopCh)
} }
func (s *sharedInformer) isStarted() bool { func (s *sharedIndexInformer) isStarted() bool {
s.startedLock.Lock() s.startedLock.Lock()
defer s.startedLock.Unlock() defer s.startedLock.Unlock()
return s.started return s.started
} }
func (s *sharedInformer) HasSynced() bool { func (s *sharedIndexInformer) HasSynced() bool {
return s.controller.HasSynced() return s.controller.HasSynced()
} }
func (s *sharedInformer) GetStore() cache.Store { func (s *sharedIndexInformer) GetStore() cache.Store {
return s.store return s.indexer
} }
func (s *sharedInformer) GetController() ControllerInterface { func (s *sharedIndexInformer) GetIndexer() cache.Indexer {
return s.indexer
}
// TODO(mqliang): implement this
func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error {
panic("has not implemeted yet")
}
func (s *sharedIndexInformer) GetController() ControllerInterface {
return &dummyController{informer: s} return &dummyController{informer: s}
} }
func (s *sharedInformer) AddEventHandler(handler ResourceEventHandler) error { func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error {
s.startedLock.Lock() s.startedLock.Lock()
defer s.startedLock.Unlock() defer s.startedLock.Unlock()
@ -161,24 +194,24 @@ func (s *sharedInformer) AddEventHandler(handler ResourceEventHandler) error {
return nil return nil
} }
func (s *sharedInformer) HandleDeltas(obj interface{}) error { func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
// from oldest to newest // from oldest to newest
for _, d := range obj.(cache.Deltas) { for _, d := range obj.(cache.Deltas) {
switch d.Type { switch d.Type {
case cache.Sync, cache.Added, cache.Updated: case cache.Sync, cache.Added, cache.Updated:
if old, exists, err := s.store.Get(d.Object); err == nil && exists { if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
if err := s.store.Update(d.Object); err != nil { if err := s.indexer.Update(d.Object); err != nil {
return err return err
} }
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}) s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object})
} else { } else {
if err := s.store.Add(d.Object); err != nil { if err := s.indexer.Add(d.Object); err != nil {
return err return err
} }
s.processor.distribute(addNotification{newObj: d.Object}) s.processor.distribute(addNotification{newObj: d.Object})
} }
case cache.Deleted: case cache.Deleted:
if err := s.store.Delete(d.Object); err != nil { if err := s.indexer.Delete(d.Object); err != nil {
return err return err
} }
s.processor.distribute(deleteNotification{oldObj: d.Object}) s.processor.distribute(deleteNotification{oldObj: d.Object})

View File

@ -63,7 +63,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown)) terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))
gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer( gcc.podStore.Indexer, gcc.podStoreSyncer = framework.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = terminatedSelector options.FieldSelector = terminatedSelector
@ -77,6 +77,8 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
&api.Pod{}, &api.Pod{},
resyncPeriod(), resyncPeriod(),
framework.ResourceEventHandlerFuncs{}, framework.ResourceEventHandlerFuncs{},
// We don't need to build a index for podStore here
cache.Indexers{},
) )
return gcc return gcc
} }

View File

@ -80,7 +80,7 @@ func TestGC(t *testing.T) {
creationTime := time.Unix(0, 0) creationTime := time.Unix(0, 0)
for _, pod := range test.pods { for _, pod := range test.pods {
creationTime = creationTime.Add(1 * time.Hour) creationTime = creationTime.Add(1 * time.Hour)
gcc.podStore.Store.Add(&api.Pod{ gcc.podStore.Indexer.Add(&api.Pod{
ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}}, ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}},
Status: api.PodStatus{Phase: pod.phase}, Status: api.PodStatus{Phase: pod.phase},
}) })

View File

@ -77,7 +77,7 @@ type JobController struct {
recorder record.EventRecorder recorder record.EventRecorder
} }
func NewJobController(podInformer framework.SharedInformer, kubeClient clientset.Interface) *JobController { func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset. // TODO: remove the wrapper when every clients have moved to use the clientset.
@ -126,7 +126,7 @@ func NewJobController(podInformer framework.SharedInformer, kubeClient clientset
UpdateFunc: jm.updatePod, UpdateFunc: jm.updatePod,
DeleteFunc: jm.deletePod, DeleteFunc: jm.deletePod,
}) })
jm.podStore.Store = podInformer.GetStore() jm.podStore.Indexer = podInformer.GetIndexer()
jm.podStoreSynced = podInformer.HasSynced jm.podStoreSynced = podInformer.HasSynced
jm.updateHandler = jm.updateJobStatus jm.updateHandler = jm.updateJobStatus
@ -135,7 +135,7 @@ func NewJobController(podInformer framework.SharedInformer, kubeClient clientset
} }
func NewJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *JobController { func NewJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *JobController {
podInformer := informers.CreateSharedPodInformer(kubeClient, resyncPeriod()) podInformer := informers.CreateSharedIndexPodInformer(kubeClient, resyncPeriod(), cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
jm := NewJobController(podInformer, kubeClient) jm := NewJobController(podInformer, kubeClient)
jm.internalPodInformer = podInformer jm.internalPodInformer = podInformer

View File

@ -221,13 +221,13 @@ func TestControllerSyncJob(t *testing.T) {
job := newJob(tc.parallelism, tc.completions) job := newJob(tc.parallelism, tc.completions)
manager.jobStore.Store.Add(job) manager.jobStore.Store.Add(job)
for _, pod := range newPodList(tc.activePods, api.PodRunning, job) { for _, pod := range newPodList(tc.activePods, api.PodRunning, job) {
manager.podStore.Store.Add(&pod) manager.podStore.Indexer.Add(&pod)
} }
for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) { for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) {
manager.podStore.Store.Add(&pod) manager.podStore.Indexer.Add(&pod)
} }
for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) { for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) {
manager.podStore.Store.Add(&pod) manager.podStore.Indexer.Add(&pod)
} }
// run // run
@ -319,13 +319,13 @@ func TestSyncJobPastDeadline(t *testing.T) {
job.Status.StartTime = &start job.Status.StartTime = &start
manager.jobStore.Store.Add(job) manager.jobStore.Store.Add(job)
for _, pod := range newPodList(tc.activePods, api.PodRunning, job) { for _, pod := range newPodList(tc.activePods, api.PodRunning, job) {
manager.podStore.Store.Add(&pod) manager.podStore.Indexer.Add(&pod)
} }
for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) { for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) {
manager.podStore.Store.Add(&pod) manager.podStore.Indexer.Add(&pod)
} }
for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) { for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) {
manager.podStore.Store.Add(&pod) manager.podStore.Indexer.Add(&pod)
} }
// run // run
@ -571,14 +571,14 @@ func TestSyncJobExpectations(t *testing.T) {
job := newJob(2, 2) job := newJob(2, 2)
manager.jobStore.Store.Add(job) manager.jobStore.Store.Add(job)
pods := newPodList(2, api.PodPending, job) pods := newPodList(2, api.PodPending, job)
manager.podStore.Store.Add(&pods[0]) manager.podStore.Indexer.Add(&pods[0])
manager.expectations = FakeJobExpectations{ manager.expectations = FakeJobExpectations{
controller.NewControllerExpectations(), true, func() { controller.NewControllerExpectations(), true, func() {
// If we check active pods before checking expectataions, the job // If we check active pods before checking expectataions, the job
// will create a new replica because it doesn't see this pod, but // will create a new replica because it doesn't see this pod, but
// has fulfilled its expectations. // has fulfilled its expectations.
manager.podStore.Store.Add(&pods[1]) manager.podStore.Indexer.Add(&pods[1])
}, },
} }
manager.syncJob(getKey(job, t)) manager.syncJob(getKey(job, t))

View File

@ -178,7 +178,7 @@ func NewNodeController(
nodeExistsInCloudProvider: func(nodeName string) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) }, nodeExistsInCloudProvider: func(nodeName string) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
} }
nc.podStore.Store, nc.podController = framework.NewInformer( nc.podStore.Indexer, nc.podController = framework.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return nc.kubeClient.Core().Pods(api.NamespaceAll).List(options) return nc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
@ -193,6 +193,8 @@ func NewNodeController(
AddFunc: nc.maybeDeleteTerminatingPod, AddFunc: nc.maybeDeleteTerminatingPod,
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) }, UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
}, },
// We don't need to build a index for podStore here
cache.Indexers{},
) )
nc.nodeStore.Store, nc.nodeController = framework.NewInformer( nc.nodeStore.Store, nc.nodeController = framework.NewInformer(
&cache.ListWatch{ &cache.ListWatch{

View File

@ -1139,7 +1139,7 @@ func TestCleanupOrphanedPods(t *testing.T) {
nc.nodeStore.Store.Add(newNode("bar")) nc.nodeStore.Store.Add(newNode("bar"))
for _, pod := range pods { for _, pod := range pods {
p := pod p := pod
nc.podStore.Store.Add(&p) nc.podStore.Indexer.Add(&p)
} }
var deleteCalls int var deleteCalls int

View File

@ -173,7 +173,7 @@ func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod contro
}, },
) )
rsc.podStore.Store, rsc.podController = framework.NewInformer( rsc.podStore.Indexer, rsc.podController = framework.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rsc.kubeClient.Core().Pods(api.NamespaceAll).List(options) return rsc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
@ -192,6 +192,7 @@ func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod contro
UpdateFunc: rsc.updatePod, UpdateFunc: rsc.updatePod,
DeleteFunc: rsc.deletePod, DeleteFunc: rsc.deletePod,
}, },
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
rsc.syncHandler = rsc.syncReplicaSet rsc.syncHandler = rsc.syncReplicaSet

View File

@ -146,7 +146,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(2, labelMap) rsSpec := newReplicaSet(2, labelMap)
manager.rsStore.Store.Add(rsSpec) manager.rsStore.Store.Add(rsSpec)
newPodList(manager.podStore.Store, 2, api.PodRunning, labelMap, rsSpec, "pod") newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod")
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
manager.syncReplicaSet(getKey(rsSpec, t)) manager.syncReplicaSet(getKey(rsSpec, t))
@ -164,7 +164,7 @@ func TestSyncReplicaSetDeletes(t *testing.T) {
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(1, labelMap) rsSpec := newReplicaSet(1, labelMap)
manager.rsStore.Store.Add(rsSpec) manager.rsStore.Store.Add(rsSpec)
newPodList(manager.podStore.Store, 2, api.PodRunning, labelMap, rsSpec, "pod") newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod")
manager.syncReplicaSet(getKey(rsSpec, t)) manager.syncReplicaSet(getKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 0, 1) validateSyncReplicaSet(t, &fakePodControl, 0, 1)
@ -238,7 +238,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
rs := newReplicaSet(activePods, labelMap) rs := newReplicaSet(activePods, labelMap)
manager.rsStore.Store.Add(rs) manager.rsStore.Store.Add(rs)
rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods)} rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods)}
newPodList(manager.podStore.Store, activePods, api.PodRunning, labelMap, rs, "pod") newPodList(manager.podStore.Indexer, activePods, api.PodRunning, labelMap, rs, "pod")
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@ -284,8 +284,8 @@ func TestControllerUpdateReplicas(t *testing.T) {
manager.rsStore.Store.Add(rs) manager.rsStore.Store.Add(rs)
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ObservedGeneration: 0} rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ObservedGeneration: 0}
rs.Generation = 1 rs.Generation = 1
newPodList(manager.podStore.Store, 2, api.PodRunning, labelMap, rs, "pod") newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
newPodList(manager.podStore.Store, 2, api.PodRunning, extraLabelMap, rs, "podWithExtraLabel") newPodList(manager.podStore.Indexer, 2, api.PodRunning, extraLabelMap, rs, "podWithExtraLabel")
// This response body is just so we don't err out decoding the http response // This response body is just so we don't err out decoding the http response
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{}) response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
@ -325,7 +325,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(2, labelMap) rsSpec := newReplicaSet(2, labelMap)
manager.rsStore.Store.Add(rsSpec) manager.rsStore.Store.Add(rsSpec)
newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap, rsSpec, "pod") newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod")
// Creates a replica and sets expectations // Creates a replica and sets expectations
rsSpec.Status.Replicas = 1 rsSpec.Status.Replicas = 1
@ -548,7 +548,7 @@ func TestUpdatePods(t *testing.T) {
manager.rsStore.Store.Add(&testRSSpec2) manager.rsStore.Store.Add(&testRSSpec2)
// Put one pod in the podStore // Put one pod in the podStore
pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap1, testRSSpec1, "pod").Items[0] pod1 := newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
pod2 := pod1 pod2 := pod1
pod2.Labels = labelMap2 pod2.Labels = labelMap2
@ -587,7 +587,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
rs := newReplicaSet(1, labelMap) rs := newReplicaSet(1, labelMap)
manager.rsStore.Store.Add(rs) manager.rsStore.Store.Add(rs)
rs.Status = extensions.ReplicaSetStatus{Replicas: 2} rs.Status = extensions.ReplicaSetStatus{Replicas: 2}
newPodList(manager.podStore.Store, 1, api.PodRunning, labelMap, rs, "pod") newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rs, "pod")
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@ -688,7 +688,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// The store accrues active pods. It's also used by the ReplicaSet to determine how many // The store accrues active pods. It's also used by the ReplicaSet to determine how many
// replicas to create. // replicas to create.
activePods := int32(len(manager.podStore.Store.List())) activePods := int32(len(manager.podStore.Indexer.List()))
if replicas != 0 { if replicas != 0 {
// This is the number of pods currently "in flight". They were created by the // This is the number of pods currently "in flight". They were created by the
// ReplicaSet controller above, which then puts the ReplicaSet to sleep till // ReplicaSet controller above, which then puts the ReplicaSet to sleep till
@ -703,7 +703,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// This simulates the watch events for all but 1 of the expected pods. // This simulates the watch events for all but 1 of the expected pods.
// None of these should wake the controller because it has expectations==BurstReplicas. // None of these should wake the controller because it has expectations==BurstReplicas.
for i := int32(0); i < expectedPods-1; i++ { for i := int32(0); i < expectedPods-1; i++ {
manager.podStore.Store.Add(&pods.Items[i]) manager.podStore.Indexer.Add(&pods.Items[i])
manager.addPod(&pods.Items[i]) manager.addPod(&pods.Items[i])
} }
@ -760,7 +760,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// The last add pod will decrease the expectation of the ReplicaSet to 0, // The last add pod will decrease the expectation of the ReplicaSet to 0,
// which will cause it to create/delete the remaining replicas up to burstReplicas. // which will cause it to create/delete the remaining replicas up to burstReplicas.
if replicas != 0 { if replicas != 0 {
manager.podStore.Store.Add(&pods.Items[expectedPods-1]) manager.podStore.Indexer.Add(&pods.Items[expectedPods-1])
manager.addPod(&pods.Items[expectedPods-1]) manager.addPod(&pods.Items[expectedPods-1])
} else { } else {
expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t)) expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t))
@ -775,14 +775,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
Labels: rsSpec.Spec.Selector.MatchLabels, Labels: rsSpec.Spec.Selector.MatchLabels,
}, },
} }
manager.podStore.Store.Delete(lastPod) manager.podStore.Indexer.Delete(lastPod)
manager.deletePod(lastPod) manager.deletePod(lastPod)
} }
pods.Items = pods.Items[expectedPods:] pods.Items = pods.Items[expectedPods:]
} }
// Confirm that we've created the right number of replicas // Confirm that we've created the right number of replicas
activePods := int32(len(manager.podStore.Store.List())) activePods := int32(len(manager.podStore.Indexer.List()))
if activePods != rsSpec.Spec.Replicas { if activePods != rsSpec.Spec.Replicas {
t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods) t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods)
} }
@ -821,7 +821,7 @@ func TestRSSyncExpectations(t *testing.T) {
rsSpec := newReplicaSet(2, labelMap) rsSpec := newReplicaSet(2, labelMap)
manager.rsStore.Store.Add(rsSpec) manager.rsStore.Store.Add(rsSpec)
pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec, "pod") pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec, "pod")
manager.podStore.Store.Add(&pods.Items[0]) manager.podStore.Indexer.Add(&pods.Items[0])
postExpectationsPod := pods.Items[1] postExpectationsPod := pods.Items[1]
manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{ manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{
@ -829,7 +829,7 @@ func TestRSSyncExpectations(t *testing.T) {
// If we check active pods before checking expectataions, the // If we check active pods before checking expectataions, the
// ReplicaSet will create a new replica because it doesn't see // ReplicaSet will create a new replica because it doesn't see
// this pod, but has fulfilled its expectations. // this pod, but has fulfilled its expectations.
manager.podStore.Store.Add(&postExpectationsPod) manager.podStore.Indexer.Add(&postExpectationsPod)
}, },
}) })
manager.syncReplicaSet(getKey(rsSpec, t)) manager.syncReplicaSet(getKey(rsSpec, t))
@ -873,7 +873,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
// This should have no effect, since we've deleted the ReplicaSet. // This should have no effect, since we've deleted the ReplicaSet.
podExp.Add(-1, 0) podExp.Add(-1, 0)
manager.podStore.Store.Replace(make([]interface{}, 0), "0") manager.podStore.Indexer.Replace(make([]interface{}, 0), "0")
manager.syncReplicaSet(getKey(rs, t)) manager.syncReplicaSet(getKey(rs, t))
validateSyncReplicaSet(t, &fakePodControl, 0, 0) validateSyncReplicaSet(t, &fakePodControl, 0, 0)
} }

View File

@ -73,7 +73,7 @@ type ReplicationManager struct {
// we have a personal informer, we must start it ourselves. If you start // we have a personal informer, we must start it ourselves. If you start
// the controller using NewReplicationManager(passing SharedInformer), this // the controller using NewReplicationManager(passing SharedInformer), this
// will be null // will be null
internalPodInformer framework.SharedInformer internalPodInformer framework.SharedIndexInformer
// An rc is temporarily suspended after creating/deleting these many replicas. // An rc is temporarily suspended after creating/deleting these many replicas.
// It resumes normal action after observing the watch events for them. // It resumes normal action after observing the watch events for them.
@ -102,7 +102,7 @@ type ReplicationManager struct {
queue *workqueue.Type queue *workqueue.Type
} }
func NewReplicationManager(podInformer framework.SharedInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
@ -122,7 +122,7 @@ func NewReplicationManager(podInformer framework.SharedInformer, kubeClient clie
queue: workqueue.New(), queue: workqueue.New(),
} }
rm.rcStore.Store, rm.rcController = framework.NewInformer( rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
@ -177,6 +177,7 @@ func NewReplicationManager(podInformer framework.SharedInformer, kubeClient clie
// way of achieving this is by performing a `stop` operation on the controller. // way of achieving this is by performing a `stop` operation on the controller.
DeleteFunc: rm.enqueueController, DeleteFunc: rm.enqueueController,
}, },
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
@ -187,7 +188,7 @@ func NewReplicationManager(podInformer framework.SharedInformer, kubeClient clie
UpdateFunc: rm.updatePod, UpdateFunc: rm.updatePod,
DeleteFunc: rm.deletePod, DeleteFunc: rm.deletePod,
}) })
rm.podStore.Store = podInformer.GetStore() rm.podStore.Indexer = podInformer.GetIndexer()
rm.podController = podInformer.GetController() rm.podController = podInformer.GetController()
rm.syncHandler = rm.syncReplicationController rm.syncHandler = rm.syncReplicationController
@ -199,7 +200,7 @@ func NewReplicationManager(podInformer framework.SharedInformer, kubeClient clie
// NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer. // NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer.
func NewReplicationManagerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { func NewReplicationManagerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
podInformer := informers.CreateSharedPodInformer(kubeClient, resyncPeriod()) podInformer := informers.CreateSharedIndexPodInformer(kubeClient, resyncPeriod(), cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
rm := NewReplicationManager(podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize) rm := NewReplicationManager(podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize)
rm.internalPodInformer = podInformer rm.internalPodInformer = podInformer
@ -276,7 +277,7 @@ func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationCon
// isCacheValid check if the cache is valid // isCacheValid check if the cache is valid
func (rm *ReplicationManager) isCacheValid(pod *api.Pod, cachedRC *api.ReplicationController) bool { func (rm *ReplicationManager) isCacheValid(pod *api.Pod, cachedRC *api.ReplicationController) bool {
_, exists, err := rm.rcStore.Get(cachedRC) exists, err := rm.rcStore.Exists(cachedRC)
// rc has been deleted or updated, cache is invalid // rc has been deleted or updated, cache is invalid
if err != nil || !exists || !isControllerMatch(pod, cachedRC) { if err != nil || !exists || !isControllerMatch(pod, cachedRC) {
return false return false
@ -522,7 +523,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
return nil return nil
} }
obj, exists, err := rm.rcStore.Store.GetByKey(key) obj, exists, err := rm.rcStore.Indexer.GetByKey(key)
if !exists { if !exists {
glog.Infof("Replication Controller has been deleted %v", key) glog.Infof("Replication Controller has been deleted %v", key)
rm.expectations.DeleteExpectations(key) rm.expectations.DeleteExpectations(key)

View File

@ -143,8 +143,8 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
// 2 running pods, a controller with 2 replicas, sync is a no-op // 2 running pods, a controller with 2 replicas, sync is a no-op
controllerSpec := newReplicationController(2) controllerSpec := newReplicationController(2)
manager.rcStore.Store.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec, "pod") newPodList(manager.podStore.Indexer, 2, api.PodRunning, controllerSpec, "pod")
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
manager.syncReplicationController(getKey(controllerSpec, t)) manager.syncReplicationController(getKey(controllerSpec, t))
@ -160,8 +160,8 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
// 2 running pods and a controller with 1 replica, one pod delete expected // 2 running pods and a controller with 1 replica, one pod delete expected
controllerSpec := newReplicationController(1) controllerSpec := newReplicationController(1)
manager.rcStore.Store.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
newPodList(manager.podStore.Store, 2, api.PodRunning, controllerSpec, "pod") newPodList(manager.podStore.Indexer, 2, api.PodRunning, controllerSpec, "pod")
manager.syncReplicationController(getKey(controllerSpec, t)) manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 0, 1) validateSyncReplication(t, &fakePodControl, 0, 1)
@ -183,7 +183,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
// The DeletedFinalStateUnknown object should cause the rc manager to insert // The DeletedFinalStateUnknown object should cause the rc manager to insert
// the controller matching the selectors of the deleted pod into the work queue. // the controller matching the selectors of the deleted pod into the work queue.
controllerSpec := newReplicationController(1) controllerSpec := newReplicationController(1)
manager.rcStore.Store.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
pods := newPodList(nil, 1, api.PodRunning, controllerSpec, "pod") pods := newPodList(nil, 1, api.PodRunning, controllerSpec, "pod")
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
@ -207,7 +207,7 @@ func TestSyncReplicationControllerCreates(t *testing.T) {
// A controller with 2 replicas and no pods in the store, 2 creates expected // A controller with 2 replicas and no pods in the store, 2 creates expected
rc := newReplicationController(2) rc := newReplicationController(2)
manager.rcStore.Store.Add(rc) manager.rcStore.Indexer.Add(rc)
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@ -230,9 +230,9 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
// Steady state for the replication controller, no Status.Replicas updates expected // Steady state for the replication controller, no Status.Replicas updates expected
activePods := 5 activePods := 5
rc := newReplicationController(activePods) rc := newReplicationController(activePods)
manager.rcStore.Store.Add(rc) manager.rcStore.Indexer.Add(rc)
rc.Status = api.ReplicationControllerStatus{Replicas: int32(activePods)} rc.Status = api.ReplicationControllerStatus{Replicas: activePods}
newPodList(manager.podStore.Store, activePods, api.PodRunning, rc, "pod") newPodList(manager.podStore.Indexer, activePods, api.PodRunning, rc, "pod")
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@ -271,14 +271,14 @@ func TestControllerUpdateReplicas(t *testing.T) {
// Insufficient number of pods in the system, and Status.Replicas is wrong; // Insufficient number of pods in the system, and Status.Replicas is wrong;
// Status.Replica should update to match number of pods in system, 1 new pod should be created. // Status.Replica should update to match number of pods in system, 1 new pod should be created.
rc := newReplicationController(5) rc := newReplicationController(5)
manager.rcStore.Store.Add(rc) manager.rcStore.Indexer.Add(rc)
rc.Status = api.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ObservedGeneration: 0} rc.Status = api.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ObservedGeneration: 0}
rc.Generation = 1 rc.Generation = 1
newPodList(manager.podStore.Store, 2, api.PodRunning, rc, "pod") newPodList(manager.podStore.Indexer, 2, api.PodRunning, rc, "pod")
rcCopy := *rc rcCopy := *rc
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"} extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
rcCopy.Spec.Selector = extraLabelMap rcCopy.Spec.Selector = extraLabelMap
newPodList(manager.podStore.Store, 2, api.PodRunning, &rcCopy, "podWithExtraLabel") newPodList(manager.podStore.Indexer, 2, api.PodRunning, &rcCopy, "podWithExtraLabel")
// This response body is just so we don't err out decoding the http response // This response body is just so we don't err out decoding the http response
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{})
@ -315,8 +315,8 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
controllerSpec := newReplicationController(2) controllerSpec := newReplicationController(2)
manager.rcStore.Store.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
newPodList(manager.podStore.Store, 1, api.PodRunning, controllerSpec, "pod") newPodList(manager.podStore.Indexer, 1, api.PodRunning, controllerSpec, "pod")
// Creates a replica and sets expectations // Creates a replica and sets expectations
controllerSpec.Status.Replicas = 1 controllerSpec.Status.Replicas = 1
@ -403,7 +403,7 @@ func TestPodControllerLookup(t *testing.T) {
} }
for _, c := range testCases { for _, c := range testCases {
for _, r := range c.inRCs { for _, r := range c.inRCs {
manager.rcStore.Add(r) manager.rcStore.Indexer.Add(r)
} }
if rc := manager.getPodController(c.pod); rc != nil { if rc := manager.getPodController(c.pod); rc != nil {
if c.outRCName != rc.Name { if c.outRCName != rc.Name {
@ -430,7 +430,7 @@ func TestWatchControllers(t *testing.T) {
// and closes the received channel to indicate that the test can finish. // and closes the received channel to indicate that the test can finish.
manager.syncHandler = func(key string) error { manager.syncHandler = func(key string) error {
obj, exists, err := manager.rcStore.Store.GetByKey(key) obj, exists, err := manager.rcStore.Indexer.GetByKey(key)
if !exists || err != nil { if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key) t.Errorf("Expected to find controller under key %v", key)
} }
@ -467,13 +467,13 @@ func TestWatchPods(t *testing.T) {
// Put one rc and one pod into the controller's stores // Put one rc and one pod into the controller's stores
testControllerSpec := newReplicationController(1) testControllerSpec := newReplicationController(1)
manager.rcStore.Store.Add(testControllerSpec) manager.rcStore.Indexer.Add(testControllerSpec)
received := make(chan string) received := make(chan string)
// The pod update sent through the fakeWatcher should figure out the managing rc and // The pod update sent through the fakeWatcher should figure out the managing rc and
// send it into the syncHandler. // send it into the syncHandler.
manager.syncHandler = func(key string) error { manager.syncHandler = func(key string) error {
obj, exists, err := manager.rcStore.Store.GetByKey(key) obj, exists, err := manager.rcStore.Indexer.GetByKey(key)
if !exists || err != nil { if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key) t.Errorf("Expected to find controller under key %v", key)
} }
@ -511,7 +511,7 @@ func TestUpdatePods(t *testing.T) {
received := make(chan string) received := make(chan string)
manager.syncHandler = func(key string) error { manager.syncHandler = func(key string) error {
obj, exists, err := manager.rcStore.Store.GetByKey(key) obj, exists, err := manager.rcStore.Indexer.GetByKey(key)
if !exists || err != nil { if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key) t.Errorf("Expected to find controller under key %v", key)
} }
@ -525,14 +525,14 @@ func TestUpdatePods(t *testing.T) {
// Put 2 rcs and one pod into the controller's stores // Put 2 rcs and one pod into the controller's stores
testControllerSpec1 := newReplicationController(1) testControllerSpec1 := newReplicationController(1)
manager.rcStore.Store.Add(testControllerSpec1) manager.rcStore.Indexer.Add(testControllerSpec1)
testControllerSpec2 := *testControllerSpec1 testControllerSpec2 := *testControllerSpec1
testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"} testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"}
testControllerSpec2.Name = "barfoo" testControllerSpec2.Name = "barfoo"
manager.rcStore.Store.Add(&testControllerSpec2) manager.rcStore.Indexer.Add(&testControllerSpec2)
// Put one pod in the podStore // Put one pod in the podStore
pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, testControllerSpec1, "pod").Items[0] pod1 := newPodList(manager.podStore.Indexer, 1, api.PodRunning, testControllerSpec1, "pod").Items[0]
pod2 := pod1 pod2 := pod1
pod2.Labels = testControllerSpec2.Spec.Selector pod2.Labels = testControllerSpec2.Spec.Selector
@ -568,9 +568,9 @@ func TestControllerUpdateRequeue(t *testing.T) {
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
rc := newReplicationController(1) rc := newReplicationController(1)
manager.rcStore.Store.Add(rc) manager.rcStore.Indexer.Add(rc)
rc.Status = api.ReplicationControllerStatus{Replicas: 2} rc.Status = api.ReplicationControllerStatus{Replicas: 2}
newPodList(manager.podStore.Store, 1, api.PodRunning, rc, "pod") newPodList(manager.podStore.Indexer, 1, api.PodRunning, rc, "pod")
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@ -651,7 +651,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
controllerSpec := newReplicationController(numReplicas) controllerSpec := newReplicationController(numReplicas)
manager.rcStore.Store.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
expectedPods := 0 expectedPods := 0
pods := newPodList(nil, numReplicas, api.PodPending, controllerSpec, "pod") pods := newPodList(nil, numReplicas, api.PodPending, controllerSpec, "pod")
@ -665,14 +665,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
for _, replicas := range []int{numReplicas, 0} { for _, replicas := range []int{numReplicas, 0} {
controllerSpec.Spec.Replicas = int32(replicas) controllerSpec.Spec.Replicas = int32(replicas)
manager.rcStore.Store.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
for i := 0; i < numReplicas; i += burstReplicas { for i := 0; i < numReplicas; i += burstReplicas {
manager.syncReplicationController(getKey(controllerSpec, t)) manager.syncReplicationController(getKey(controllerSpec, t))
// The store accrues active pods. It's also used by the rc to determine how many // The store accrues active pods. It's also used by the rc to determine how many
// replicas to create. // replicas to create.
activePods := len(manager.podStore.Store.List()) activePods := len(manager.podStore.Indexer.List())
if replicas != 0 { if replicas != 0 {
// This is the number of pods currently "in flight". They were created by the rc manager above, // This is the number of pods currently "in flight". They were created by the rc manager above,
// which then puts the rc to sleep till all of them have been observed. // which then puts the rc to sleep till all of them have been observed.
@ -686,7 +686,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// This simulates the watch events for all but 1 of the expected pods. // This simulates the watch events for all but 1 of the expected pods.
// None of these should wake the controller because it has expectations==BurstReplicas. // None of these should wake the controller because it has expectations==BurstReplicas.
for i := 0; i < expectedPods-1; i++ { for i := 0; i < expectedPods-1; i++ {
manager.podStore.Store.Add(&pods.Items[i]) manager.podStore.Indexer.Add(&pods.Items[i])
manager.addPod(&pods.Items[i]) manager.addPod(&pods.Items[i])
} }
@ -722,7 +722,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// has exactly one expectation at the end, to verify that we // has exactly one expectation at the end, to verify that we
// don't double delete. // don't double delete.
for i := range podsToDelete[1:] { for i := range podsToDelete[1:] {
manager.podStore.Delete(podsToDelete[i]) manager.podStore.Indexer.Delete(podsToDelete[i])
manager.deletePod(podsToDelete[i]) manager.deletePod(podsToDelete[i])
} }
podExp, exists, err := manager.expectations.GetExpectations(rcKey) podExp, exists, err := manager.expectations.GetExpectations(rcKey)
@ -743,7 +743,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// The last add pod will decrease the expectation of the rc to 0, // The last add pod will decrease the expectation of the rc to 0,
// which will cause it to create/delete the remaining replicas up to burstReplicas. // which will cause it to create/delete the remaining replicas up to burstReplicas.
if replicas != 0 { if replicas != 0 {
manager.podStore.Store.Add(&pods.Items[expectedPods-1]) manager.podStore.Indexer.Add(&pods.Items[expectedPods-1])
manager.addPod(&pods.Items[expectedPods-1]) manager.addPod(&pods.Items[expectedPods-1])
} else { } else {
expectedDel := manager.expectations.GetUIDs(getKey(controllerSpec, t)) expectedDel := manager.expectations.GetUIDs(getKey(controllerSpec, t))
@ -758,14 +758,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
Labels: controllerSpec.Spec.Selector, Labels: controllerSpec.Spec.Selector,
}, },
} }
manager.podStore.Store.Delete(lastPod) manager.podStore.Indexer.Delete(lastPod)
manager.deletePod(lastPod) manager.deletePod(lastPod)
} }
pods.Items = pods.Items[expectedPods:] pods.Items = pods.Items[expectedPods:]
} }
// Confirm that we've created the right number of replicas // Confirm that we've created the right number of replicas
activePods := int32(len(manager.podStore.Store.List())) activePods := int32(len(manager.podStore.Indexer.List()))
if activePods != controllerSpec.Spec.Replicas { if activePods != controllerSpec.Spec.Replicas {
t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods) t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods)
} }
@ -801,9 +801,9 @@ func TestRCSyncExpectations(t *testing.T) {
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
controllerSpec := newReplicationController(2) controllerSpec := newReplicationController(2)
manager.rcStore.Store.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
pods := newPodList(nil, 2, api.PodPending, controllerSpec, "pod") pods := newPodList(nil, 2, api.PodPending, controllerSpec, "pod")
manager.podStore.Store.Add(&pods.Items[0]) manager.podStore.Indexer.Add(&pods.Items[0])
postExpectationsPod := pods.Items[1] postExpectationsPod := pods.Items[1]
manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRCExpectations{ manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRCExpectations{
@ -811,7 +811,7 @@ func TestRCSyncExpectations(t *testing.T) {
// If we check active pods before checking expectataions, the rc // If we check active pods before checking expectataions, the rc
// will create a new replica because it doesn't see this pod, but // will create a new replica because it doesn't see this pod, but
// has fulfilled its expectations. // has fulfilled its expectations.
manager.podStore.Store.Add(&postExpectationsPod) manager.podStore.Indexer.Add(&postExpectationsPod)
}, },
}) })
manager.syncReplicationController(getKey(controllerSpec, t)) manager.syncReplicationController(getKey(controllerSpec, t))
@ -824,7 +824,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
rc := newReplicationController(1) rc := newReplicationController(1)
manager.rcStore.Store.Add(rc) manager.rcStore.Indexer.Add(rc)
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@ -846,7 +846,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
if !exists || err != nil { if !exists || err != nil {
t.Errorf("No expectations found for rc") t.Errorf("No expectations found for rc")
} }
manager.rcStore.Delete(rc) manager.rcStore.Indexer.Delete(rc)
manager.syncReplicationController(getKey(rc, t)) manager.syncReplicationController(getKey(rc, t))
if _, exists, err = manager.expectations.GetExpectations(rcKey); exists { if _, exists, err = manager.expectations.GetExpectations(rcKey); exists {
@ -855,7 +855,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
// This should have no effect, since we've deleted the rc. // This should have no effect, since we've deleted the rc.
podExp.Add(-1, 0) podExp.Add(-1, 0)
manager.podStore.Store.Replace(make([]interface{}, 0), "0") manager.podStore.Indexer.Replace(make([]interface{}, 0), "0")
manager.syncReplicationController(getKey(rc, t)) manager.syncReplicationController(getKey(rc, t))
validateSyncReplication(t, &fakePodControl, 0, 0) validateSyncReplication(t, &fakePodControl, 0, 0)
} }
@ -871,7 +871,7 @@ func TestRCManagerNotReady(t *testing.T) {
// want to end up creating replicas in this case until the pod reflector // want to end up creating replicas in this case until the pod reflector
// has synced, so the rc manager should just requeue the rc. // has synced, so the rc manager should just requeue the rc.
controllerSpec := newReplicationController(1) controllerSpec := newReplicationController(1)
manager.rcStore.Store.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
rcKey := getKey(controllerSpec, t) rcKey := getKey(controllerSpec, t)
manager.syncReplicationController(rcKey) manager.syncReplicationController(rcKey)
@ -914,7 +914,7 @@ func TestOverlappingRCs(t *testing.T) {
} }
shuffledControllers := shuffle(controllers) shuffledControllers := shuffle(controllers)
for j := range shuffledControllers { for j := range shuffledControllers {
manager.rcStore.Store.Add(shuffledControllers[j]) manager.rcStore.Indexer.Add(shuffledControllers[j])
} }
// Add a pod and make sure only the oldest rc is synced // Add a pod and make sure only the oldest rc is synced
pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod") pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod")
@ -934,7 +934,7 @@ func TestDeletionTimestamp(t *testing.T) {
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
controllerSpec := newReplicationController(1) controllerSpec := newReplicationController(1)
manager.rcStore.Store.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
rcKey, err := controller.KeyFunc(controllerSpec) rcKey, err := controller.KeyFunc(controllerSpec)
if err != nil { if err != nil {
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err) t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
@ -1015,6 +1015,7 @@ func TestDeletionTimestamp(t *testing.T) {
} }
} }
/*
func BenchmarkGetPodControllerMultiNS(b *testing.B) { func BenchmarkGetPodControllerMultiNS(b *testing.B) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
@ -1043,7 +1044,7 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
ns := fmt.Sprintf("ns-%d", i) ns := fmt.Sprintf("ns-%d", i)
for j := 0; j < 10; j++ { for j := 0; j < 10; j++ {
rcName := fmt.Sprintf("rc-%d", j) rcName := fmt.Sprintf("rc-%d", j)
manager.rcStore.Add(&api.ReplicationController{ manager.rcStore.Indexer.Add(&api.ReplicationController{
ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: ns}, ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: ns},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: map[string]string{"rcName": rcName}, Selector: map[string]string{"rcName": rcName},
@ -1085,7 +1086,7 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
for i := 0; i < rcNum; i++ { for i := 0; i < rcNum; i++ {
rcName := fmt.Sprintf("rc-%d", i) rcName := fmt.Sprintf("rc-%d", i)
manager.rcStore.Add(&api.ReplicationController{ manager.rcStore.Indexer.Add(&api.ReplicationController{
ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: "foo"}, ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: "foo"},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: map[string]string{"rcName": rcName}, Selector: map[string]string{"rcName": rcName},
@ -1100,3 +1101,4 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
} }
} }
} }
*/

View File

@ -109,7 +109,7 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
PVLister: &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, PVLister: &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
PVCLister: &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, PVCLister: &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, ControllerLister: &cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
schedulerCache: schedulerCache, schedulerCache: schedulerCache,
StopEverything: stopEverything, StopEverything: stopEverything,
@ -124,7 +124,7 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
// We construct this here instead of in CreateFromKeys because // We construct this here instead of in CreateFromKeys because
// ScheduledPodLister is something we provide to plug in functions that // ScheduledPodLister is something we provide to plug in functions that
// they may need to call. // they may need to call.
c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer( c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = framework.NewIndexerInformer(
c.createAssignedNonTerminatedPodLW(), c.createAssignedNonTerminatedPodLW(),
&api.Pod{}, &api.Pod{},
0, 0,
@ -133,6 +133,7 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
UpdateFunc: c.updatePodInCache, UpdateFunc: c.updatePodInCache,
DeleteFunc: c.deletePodFromCache, DeleteFunc: c.deletePodFromCache,
}, },
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
c.NodeLister.Store, c.nodePopulator = framework.NewInformer( c.NodeLister.Store, c.nodePopulator = framework.NewInformer(
@ -356,7 +357,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String,
// Watch and cache all ReplicationController objects. Scheduler needs to find all pods // Watch and cache all ReplicationController objects. Scheduler needs to find all pods
// created by the same services or ReplicationControllers/ReplicaSets, so that it can spread them correctly. // created by the same services or ReplicationControllers/ReplicaSets, so that it can spread them correctly.
// Cache this locally. // Cache this locally.
cache.NewReflector(f.createControllerLW(), &api.ReplicationController{}, f.ControllerLister.Store, 0).RunUntil(f.StopEverything) cache.NewReflector(f.createControllerLW(), &api.ReplicationController{}, f.ControllerLister.Indexer, 0).RunUntil(f.StopEverything)
// Watch and cache all ReplicaSet objects. Scheduler needs to find all pods // Watch and cache all ReplicaSet objects. Scheduler needs to find all pods
// created by the same services or ReplicationControllers/ReplicaSets, so that it can spread them correctly. // created by the same services or ReplicationControllers/ReplicaSets, so that it can spread them correctly.