Merge pull request #32718 from mikedanese/mv-informer

Automatic merge from submit-queue

move informer and controller to pkg/client/cache

@kubernetes/sig-api-machinery
This commit is contained in:
Kubernetes Submit Queue 2016-09-15 16:44:30 -07:00 committed by GitHub
commit fcc97f37ee
78 changed files with 425 additions and 498 deletions

View File

@ -43,7 +43,7 @@ import (
"k8s.io/kubernetes/pkg/apiserver/authenticator" "k8s.io/kubernetes/pkg/apiserver/authenticator"
"k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/controller/informers"
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
"k8s.io/kubernetes/pkg/genericapiserver" "k8s.io/kubernetes/pkg/genericapiserver"
"k8s.io/kubernetes/pkg/genericapiserver/authorizer" "k8s.io/kubernetes/pkg/genericapiserver/authorizer"

View File

@ -51,9 +51,9 @@ import (
"k8s.io/kubernetes/pkg/controller/deployment" "k8s.io/kubernetes/pkg/controller/deployment"
"k8s.io/kubernetes/pkg/controller/disruption" "k8s.io/kubernetes/pkg/controller/disruption"
endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint" endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/garbagecollector" "k8s.io/kubernetes/pkg/controller/garbagecollector"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly" "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/pkg/controller/job"
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace" namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
nodecontroller "k8s.io/kubernetes/pkg/controller/node" nodecontroller "k8s.io/kubernetes/pkg/controller/node"

View File

@ -79,7 +79,6 @@ import (
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/mesos" cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/mesos"
controllerfw "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/healthz" "k8s.io/kubernetes/pkg/healthz"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
@ -786,10 +785,10 @@ func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config
log.Fatalf("Cannot create client to watch nodes: %v", err) log.Fatalf("Cannot create client to watch nodes: %v", err)
} }
nodeLW := cache.NewListWatchFromClient(nodesClient.CoreClient, "nodes", api.NamespaceAll, fields.Everything()) nodeLW := cache.NewListWatchFromClient(nodesClient.CoreClient, "nodes", api.NamespaceAll, fields.Everything())
nodeStore, nodeCtl := controllerfw.NewInformer(nodeLW, &api.Node{}, s.nodeRelistPeriod, &controllerfw.ResourceEventHandlerFuncs{ nodeStore, nodeCtl := cache.NewInformer(nodeLW, &api.Node{}, s.nodeRelistPeriod, &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(obj interface{}) { DeleteFunc: func(obj interface{}) {
if eiRegistry != nil { if eiRegistry != nil {
// TODO(jdef) use controllerfw.DeletionHandlingMetaNamespaceKeyFunc at some point? // TODO(jdef) use cache.DeletionHandlingMetaNamespaceKeyFunc at some point?
nodeName := "" nodeName := ""
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
nodeName = tombstone.Key nodeName = tombstone.Key

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
kservice "k8s.io/kubernetes/pkg/controller/endpoint" kservice "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@ -43,7 +42,7 @@ import (
) )
var ( var (
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
) )
type EndpointController interface { type EndpointController interface {
@ -56,7 +55,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
client: client, client: client,
queue: workqueue.New(), queue: workqueue.New(),
} }
e.serviceStore.Store, e.serviceController = framework.NewInformer( e.serviceStore.Store, e.serviceController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Services(api.NamespaceAll).List(options) return e.client.Core().Services(api.NamespaceAll).List(options)
@ -67,7 +66,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
}, },
&api.Service{}, &api.Service{},
kservice.FullServiceResyncPeriod, kservice.FullServiceResyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: e.enqueueService, AddFunc: e.enqueueService,
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
e.enqueueService(cur) e.enqueueService(cur)
@ -76,7 +75,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
}, },
) )
e.podStore.Indexer, e.podController = framework.NewIndexerInformer( e.podStore.Indexer, e.podController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Pods(api.NamespaceAll).List(options) return e.client.Core().Pods(api.NamespaceAll).List(options)
@ -87,7 +86,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
}, },
&api.Pod{}, &api.Pod{},
5*time.Minute, 5*time.Minute,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: e.addPod, AddFunc: e.addPod,
UpdateFunc: e.updatePod, UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod, DeleteFunc: e.deletePod,
@ -113,8 +112,8 @@ type endpointController struct {
// Since we join two objects, we'll watch both of them with // Since we join two objects, we'll watch both of them with
// controllers. // controllers.
serviceController *framework.Controller serviceController *cache.Controller
podController *framework.Controller podController *cache.Controller
} }
// Runs e; will not return until stopCh is closed. workers determines how many // Runs e; will not return until stopCh is closed. workers determines how many

View File

@ -16,7 +16,7 @@ limitations under the License.
// Package app does all of the work necessary to create a Kubernetes // Package app does all of the work necessary to create a Kubernetes
// APIServer by binding together the API, master and APIServer infrastructure. // APIServer by binding together the API, master and APIServer infrastructure.
// It can be configured and called directly or via the hyperkube framework. // It can be configured and called directly or via the hyperkube cache.
package app package app
import ( import (
@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/apiserver/authenticator" "k8s.io/kubernetes/pkg/apiserver/authenticator"
"k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/genericapiserver" "k8s.io/kubernetes/pkg/genericapiserver"
"k8s.io/kubernetes/pkg/genericapiserver/authorizer" "k8s.io/kubernetes/pkg/genericapiserver/authorizer"
genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options" genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options"

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -50,7 +49,7 @@ type ClusterController struct {
clusterKubeClientMap map[string]ClusterClient clusterKubeClientMap map[string]ClusterClient
// cluster framework and store // cluster framework and store
clusterController *framework.Controller clusterController *cache.Controller
clusterStore cluster_cache.StoreToClusterLister clusterStore cluster_cache.StoreToClusterLister
} }
@ -63,7 +62,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
clusterClusterStatusMap: make(map[string]federation_v1beta1.ClusterStatus), clusterClusterStatusMap: make(map[string]federation_v1beta1.ClusterStatus),
clusterKubeClientMap: make(map[string]ClusterClient), clusterKubeClientMap: make(map[string]ClusterClient),
} }
cc.clusterStore.Store, cc.clusterController = framework.NewInformer( cc.clusterStore.Store, cc.clusterController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return cc.federationClient.Federation().Clusters().List(options) return cc.federationClient.Federation().Clusters().List(options)
@ -74,7 +73,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
}, },
&federation_v1beta1.Cluster{}, &federation_v1beta1.Cluster{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: cc.delFromClusterSet, DeleteFunc: cc.delFromClusterSet,
AddFunc: cc.addToClusterSet, AddFunc: cc.addToClusterSet,
}, },

View File

@ -33,7 +33,6 @@ import (
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4" kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
@ -81,7 +80,7 @@ type IngressController struct {
// Definitions of ingresses that should be federated. // Definitions of ingresses that should be federated.
ingressInformerStore cache.Store ingressInformerStore cache.Store
// Informer controller for ingresses that should be federated. // Informer controller for ingresses that should be federated.
ingressInformerController framework.ControllerInterface ingressInformerController cache.ControllerInterface
// Client to federated api server. // Client to federated api server.
federatedApiClient federation_release_1_4.Interface federatedApiClient federation_release_1_4.Interface
@ -125,7 +124,7 @@ func NewIngressController(client federation_release_1_4.Interface) *IngressContr
ic.configMapDeliverer = util.NewDelayingDeliverer() ic.configMapDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on ingresses that should be federated. // Start informer in federated API servers on ingresses that should be federated.
ic.ingressInformerStore, ic.ingressInformerController = framework.NewInformer( ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return client.Extensions().Ingresses(api.NamespaceAll).List(options) return client.Extensions().Ingresses(api.NamespaceAll).List(options)
@ -145,8 +144,8 @@ func NewIngressController(client federation_release_1_4.Interface) *IngressContr
// Federated informer on ingresses in members of federation. // Federated informer on ingresses in members of federation.
ic.ingressFederatedInformer = util.NewFederatedInformer( ic.ingressFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) { func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
return framework.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options) return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options)
@ -177,9 +176,9 @@ func NewIngressController(client federation_release_1_4.Interface) *IngressContr
// Federated informer on configmaps for ingress controllers in members of the federation. // Federated informer on configmaps for ingress controllers in members of the federation.
ic.configMapFederatedInformer = util.NewFederatedInformer( ic.configMapFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) { func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name) glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name)
return framework.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
if targetClient == nil { if targetClient == nil {

View File

@ -32,7 +32,6 @@ import (
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4" kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -62,7 +61,7 @@ type NamespaceController struct {
// Definitions of namespaces that should be federated. // Definitions of namespaces that should be federated.
namespaceInformerStore cache.Store namespaceInformerStore cache.Store
// Informer controller for namespaces that should be federated. // Informer controller for namespaces that should be federated.
namespaceInformerController framework.ControllerInterface namespaceInformerController cache.ControllerInterface
// Client to federated api server. // Client to federated api server.
federatedApiClient federation_release_1_4.Interface federatedApiClient federation_release_1_4.Interface
@ -100,7 +99,7 @@ func NewNamespaceController(client federation_release_1_4.Interface) *NamespaceC
nc.clusterDeliverer = util.NewDelayingDeliverer() nc.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on namespaces that should be federated. // Start informer in federated API servers on namespaces that should be federated.
nc.namespaceInformerStore, nc.namespaceInformerController = framework.NewInformer( nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return client.Core().Namespaces().List(options) return client.Core().Namespaces().List(options)
@ -116,8 +115,8 @@ func NewNamespaceController(client federation_release_1_4.Interface) *NamespaceC
// Federated informer on namespaces in members of federation. // Federated informer on namespaces in members of federation.
nc.namespaceFederatedInformer = util.NewFederatedInformer( nc.namespaceFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) { func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
return framework.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Core().Namespaces().List(options) return targetClient.Core().Namespaces().List(options)

View File

@ -39,7 +39,6 @@ import (
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -79,7 +78,7 @@ func parseFederationReplicaSetReference(frs *extensionsv1.ReplicaSet) (*fed.Fede
type ReplicaSetController struct { type ReplicaSetController struct {
fedClient fedclientset.Interface fedClient fedclientset.Interface
replicaSetController *framework.Controller replicaSetController *cache.Controller
replicaSetStore cache.StoreToReplicaSetLister replicaSetStore cache.StoreToReplicaSetLister
fedReplicaSetInformer fedutil.FederatedInformer fedReplicaSetInformer fedutil.FederatedInformer
@ -118,8 +117,8 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
eventRecorder: recorder, eventRecorder: recorder,
} }
replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) { replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return framework.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options) return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)
@ -145,8 +144,8 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
} }
frsc.fedReplicaSetInformer = fedutil.NewFederatedInformer(federationClient, replicaSetFedInformerFactory, &clusterLifecycle) frsc.fedReplicaSetInformer = fedutil.NewFederatedInformer(federationClient, replicaSetFedInformerFactory, &clusterLifecycle)
podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) { podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return framework.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Core().Pods(apiv1.NamespaceAll).List(options) return clientset.Core().Pods(apiv1.NamespaceAll).List(options)
@ -166,7 +165,7 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
} }
frsc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{}) frsc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{})
frsc.replicaSetStore.Store, frsc.replicaSetController = framework.NewInformer( frsc.replicaSetStore.Store, frsc.replicaSetController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options) return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)

View File

@ -30,7 +30,6 @@ import (
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4" kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@ -59,7 +58,7 @@ type SecretController struct {
// Definitions of secrets that should be federated. // Definitions of secrets that should be federated.
secretInformerStore cache.Store secretInformerStore cache.Store
// Informer controller for secrets that should be federated. // Informer controller for secrets that should be federated.
secretInformerController framework.ControllerInterface secretInformerController cache.ControllerInterface
// Client to federated api server. // Client to federated api server.
federatedApiClient federation_release_1_4.Interface federatedApiClient federation_release_1_4.Interface
@ -97,7 +96,7 @@ func NewSecretController(client federation_release_1_4.Interface) *SecretControl
secretcontroller.clusterDeliverer = util.NewDelayingDeliverer() secretcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on secrets that should be federated. // Start informer in federated API servers on secrets that should be federated.
secretcontroller.secretInformerStore, secretcontroller.secretInformerController = framework.NewInformer( secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return client.Core().Secrets(api_v1.NamespaceAll).List(options) return client.Core().Secrets(api_v1.NamespaceAll).List(options)
@ -113,8 +112,8 @@ func NewSecretController(client federation_release_1_4.Interface) *SecretControl
// Federated informer on secrets in members of federation. // Federated informer on secrets in members of federation.
secretcontroller.secretFederatedInformer = util.NewFederatedInformer( secretcontroller.secretFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) { func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
return framework.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Core().Secrets(api_v1.NamespaceAll).List(options) return targetClient.Core().Secrets(api_v1.NamespaceAll).List(options)

View File

@ -25,7 +25,6 @@ import (
cache "k8s.io/kubernetes/pkg/client/cache" cache "k8s.io/kubernetes/pkg/client/cache"
release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4" release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue" "k8s.io/kubernetes/pkg/util/workqueue"
@ -43,11 +42,11 @@ type clusterCache struct {
// A store of services, populated by the serviceController // A store of services, populated by the serviceController
serviceStore cache.StoreToServiceLister serviceStore cache.StoreToServiceLister
// Watches changes to all services // Watches changes to all services
serviceController *framework.Controller serviceController *cache.Controller
// A store of endpoint, populated by the serviceController // A store of endpoint, populated by the serviceController
endpointStore cache.StoreToEndpointsLister endpointStore cache.StoreToEndpointsLister
// Watches changes to all endpoints // Watches changes to all endpoints
endpointController *framework.Controller endpointController *cache.Controller
// services that need to be synced // services that need to be synced
serviceQueue *workqueue.Type serviceQueue *workqueue.Type
// endpoints that need to be synced // endpoints that need to be synced
@ -91,7 +90,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
serviceQueue: workqueue.New(), serviceQueue: workqueue.New(),
endpointQueue: workqueue.New(), endpointQueue: workqueue.New(),
} }
cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = framework.NewInformer( cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return clientset.Core().Endpoints(v1.NamespaceAll).List(options) return clientset.Core().Endpoints(v1.NamespaceAll).List(options)
@ -102,7 +101,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
}, },
&v1.Endpoints{}, &v1.Endpoints{},
serviceSyncPeriod, serviceSyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
cc.enqueueEndpoint(obj, clusterName) cc.enqueueEndpoint(obj, clusterName)
}, },
@ -115,7 +114,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
}, },
) )
cachedClusterClient.serviceStore.Store, cachedClusterClient.serviceController = framework.NewInformer( cachedClusterClient.serviceStore.Store, cachedClusterClient.serviceController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return clientset.Core().Services(v1.NamespaceAll).List(options) return clientset.Core().Services(v1.NamespaceAll).List(options)
@ -126,7 +125,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
}, },
&v1.Service{}, &v1.Service{},
serviceSyncPeriod, serviceSyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
cc.enqueueService(obj, clusterName) cc.enqueueService(obj, clusterName)
}, },

View File

@ -35,7 +35,6 @@ import (
release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4" release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -109,11 +108,11 @@ type ServiceController struct {
// A store of services, populated by the serviceController // A store of services, populated by the serviceController
serviceStore cache.StoreToServiceLister serviceStore cache.StoreToServiceLister
// Watches changes to all services // Watches changes to all services
serviceController *framework.Controller serviceController *cache.Controller
// A store of services, populated by the serviceController // A store of services, populated by the serviceController
clusterStore federationcache.StoreToClusterLister clusterStore federationcache.StoreToClusterLister
// Watches changes to all services // Watches changes to all services
clusterController *framework.Controller clusterController *cache.Controller
eventBroadcaster record.EventBroadcaster eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder eventRecorder record.EventRecorder
// services that need to be synced // services that need to be synced
@ -145,7 +144,7 @@ func New(federationClient federation_release_1_4.Interface, dns dnsprovider.Inte
queue: workqueue.New(), queue: workqueue.New(),
knownClusterSet: make(sets.String), knownClusterSet: make(sets.String),
} }
s.serviceStore.Store, s.serviceController = framework.NewInformer( s.serviceStore.Store, s.serviceController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return s.federationClient.Core().Services(v1.NamespaceAll).List(options) return s.federationClient.Core().Services(v1.NamespaceAll).List(options)
@ -156,7 +155,7 @@ func New(federationClient federation_release_1_4.Interface, dns dnsprovider.Inte
}, },
&v1.Service{}, &v1.Service{},
serviceSyncPeriod, serviceSyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: s.enqueueService, AddFunc: s.enqueueService,
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
// there is case that old and new are equals but we still catch the event now. // there is case that old and new are equals but we still catch the event now.
@ -167,7 +166,7 @@ func New(federationClient federation_release_1_4.Interface, dns dnsprovider.Inte
DeleteFunc: s.enqueueService, DeleteFunc: s.enqueueService,
}, },
) )
s.clusterStore.Store, s.clusterController = framework.NewInformer( s.clusterStore.Store, s.clusterController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return s.federationClient.Federation().Clusters().List(options) return s.federationClient.Federation().Clusters().List(options)
@ -178,7 +177,7 @@ func New(federationClient federation_release_1_4.Interface, dns dnsprovider.Inte
}, },
&v1beta1.Cluster{}, &v1beta1.Cluster{},
clusterSyncPeriod, clusterSyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: s.clusterCache.delFromClusterSet, DeleteFunc: s.clusterCache.delFromClusterSet,
AddFunc: s.clusterCache.addToClientMap, AddFunc: s.clusterCache.addToClientMap,
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4" kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@ -111,8 +110,8 @@ type FederatedInformerForTestOnly interface {
} }
// A function that should be used to create an informer on the target object. Store should use // A function that should be used to create an informer on the target object. Store should use
// framework.DeletionHandlingMetaNamespaceKeyFunc as a keying function. // cache.DeletionHandlingMetaNamespaceKeyFunc as a keying function.
type TargetInformerFactory func(*federation_api.Cluster, kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) type TargetInformerFactory func(*federation_api.Cluster, kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface)
// A structure with cluster lifecycle handler functions. Cluster is available (and ClusterAvailable is fired) // A structure with cluster lifecycle handler functions. Cluster is available (and ClusterAvailable is fired)
// when it is created in federated etcd and ready. Cluster becomes unavailable (and ClusterUnavailable is fired) // when it is created in federated etcd and ready. Cluster becomes unavailable (and ClusterUnavailable is fired)
@ -154,7 +153,7 @@ func NewFederatedInformer(
return data return data
} }
federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller = framework.NewInformer( federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return federationClient.Federation().Clusters().List(options) return federationClient.Federation().Clusters().List(options)
@ -165,7 +164,7 @@ func NewFederatedInformer(
}, },
&federation_api.Cluster{}, &federation_api.Cluster{},
clusterSyncPeriod, clusterSyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) { DeleteFunc: func(old interface{}) {
oldCluster, ok := old.(*federation_api.Cluster) oldCluster, ok := old.(*federation_api.Cluster)
if ok { if ok {
@ -238,7 +237,7 @@ func isClusterReady(cluster *federation_api.Cluster) bool {
} }
type informer struct { type informer struct {
controller framework.ControllerInterface controller cache.ControllerInterface
store cache.Store store cache.Store
stopChan chan struct{} stopChan chan struct{}
} }
@ -455,7 +454,7 @@ func (fs *federatedStoreImpl) GetFromAllClusters(key string) ([]FederatedObject,
// GetKeyFor returns the key under which the item would be put in the store. // GetKeyFor returns the key under which the item would be put in the store.
func (fs *federatedStoreImpl) GetKeyFor(item interface{}) string { func (fs *federatedStoreImpl) GetKeyFor(item interface{}) string {
// TODO: support other keying functions. // TODO: support other keying functions.
key, _ := framework.DeletionHandlingMetaNamespaceKeyFunc(item) key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(item)
return key return key
} }

View File

@ -28,7 +28,6 @@ import (
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4" kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
fake_kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4/fake" fake_kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@ -79,8 +78,8 @@ func TestFederatedInformer(t *testing.T) {
return true, watch.NewFake(), nil return true, watch.NewFake(), nil
}) })
targetInformerFactory := func(cluster *federation_api.Cluster, clientset kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) { targetInformerFactory := func(cluster *federation_api.Cluster, clientset kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
return framework.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Core().Services(api_v1.NamespaceAll).List(options) return clientset.Core().Services(api_v1.NamespaceAll).List(options)
@ -91,7 +90,7 @@ func TestFederatedInformer(t *testing.T) {
}, },
&api_v1.Service{}, &api_v1.Service{},
10*time.Second, 10*time.Second,
framework.ResourceEventHandlerFuncs{}) cache.ResourceEventHandlerFuncs{})
} }
addedClusters := make(chan string, 1) addedClusters := make(chan string, 1)

View File

@ -21,14 +21,14 @@ import (
"reflect" "reflect"
api_v1 "k8s.io/kubernetes/pkg/api/v1" api_v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/client/cache"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
) )
// Returns framework.ResourceEventHandlerFuncs that trigger the given function // Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on all object changes. // on all object changes.
func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *framework.ResourceEventHandlerFuncs { func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *cache.ResourceEventHandlerFuncs {
return &framework.ResourceEventHandlerFuncs{ return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) { DeleteFunc: func(old interface{}) {
oldObj := old.(pkg_runtime.Object) oldObj := old.(pkg_runtime.Object)
triggerFunc(oldObj) triggerFunc(oldObj)
@ -46,9 +46,9 @@ func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *framework.Res
} }
} }
// Returns framework.ResourceEventHandlerFuncs that trigger the given function // Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on object add and delete as well as spec/object meta on update. // on object add and delete as well as spec/object meta on update.
func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *framework.ResourceEventHandlerFuncs { func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *cache.ResourceEventHandlerFuncs {
getFieldOrPanic := func(obj interface{}, fieldName string) interface{} { getFieldOrPanic := func(obj interface{}, fieldName string) interface{} {
val := reflect.ValueOf(obj).Elem().FieldByName(fieldName) val := reflect.ValueOf(obj).Elem().FieldByName(fieldName)
if val.IsValid() { if val.IsValid() {
@ -57,7 +57,7 @@ func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *frame
panic(fmt.Errorf("field not found: %s", fieldName)) panic(fmt.Errorf("field not found: %s", fieldName))
} }
} }
return &framework.ResourceEventHandlerFuncs{ return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) { DeleteFunc: func(old interface{}) {
oldObj := old.(pkg_runtime.Object) oldObj := old.(pkg_runtime.Object)
triggerFunc(oldObj) triggerFunc(oldObj)

View File

@ -17,7 +17,7 @@ limitations under the License.
package admission package admission
import ( import (
"k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/controller/informers"
) )
// PluginInitializer is used for initialization of shareable resources between admission plugins. // PluginInitializer is used for initialization of shareable resources between admission plugins.

View File

@ -17,7 +17,7 @@ limitations under the License.
package admission package admission
import ( import (
"k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/controller/informers"
) )
// Validator holds Validate functions, which are responsible for validation of initialized shared resources // Validator holds Validate functions, which are responsible for validation of initialized shared resources

View File

@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package framework package cache
import ( import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -28,13 +27,13 @@ import (
// Config contains all the settings for a Controller. // Config contains all the settings for a Controller.
type Config struct { type Config struct {
// The queue for your objects; either a cache.FIFO or // The queue for your objects; either a FIFO or
// a cache.DeltaFIFO. Your Process() function should accept // a DeltaFIFO. Your Process() function should accept
// the output of this Oueue's Pop() method. // the output of this Oueue's Pop() method.
cache.Queue Queue
// Something that can list and watch your objects. // Something that can list and watch your objects.
cache.ListerWatcher ListerWatcher
// Something that can process your objects. // Something that can process your objects.
Process ProcessFunc Process ProcessFunc
@ -45,7 +44,7 @@ type Config struct {
// Reprocess everything at least this often. // Reprocess everything at least this often.
// Note that if it takes longer for you to clear the queue than this // Note that if it takes longer for you to clear the queue than this
// period, you will end up processing items in the order determined // period, you will end up processing items in the order determined
// by cache.FIFO.Replace(). Currently, this is random. If this is a // by FIFO.Replace(). Currently, this is random. If this is a
// problem, we can change that replacement policy to append new // problem, we can change that replacement policy to append new
// things to the end of the queue instead of replacing the entire // things to the end of the queue instead of replacing the entire
// queue. // queue.
@ -64,7 +63,7 @@ type ProcessFunc func(obj interface{}) error
// Controller is a generic controller framework. // Controller is a generic controller framework.
type Controller struct { type Controller struct {
config Config config Config
reflector *cache.Reflector reflector *Reflector
reflectorMutex sync.RWMutex reflectorMutex sync.RWMutex
} }
@ -87,7 +86,7 @@ func New(c *Config) *Controller {
// Run blocks; call via go. // Run blocks; call via go.
func (c *Controller) Run(stopCh <-chan struct{}) { func (c *Controller) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
r := cache.NewReflector( r := NewReflector(
c.config.ListerWatcher, c.config.ListerWatcher,
c.config.ObjectType, c.config.ObjectType,
c.config.Queue, c.config.Queue,
@ -110,9 +109,9 @@ func (c *Controller) HasSynced() bool {
// Requeue adds the provided object back into the queue if it does not already exist. // Requeue adds the provided object back into the queue if it does not already exist.
func (c *Controller) Requeue(obj interface{}) error { func (c *Controller) Requeue(obj interface{}) error {
return c.config.Queue.AddIfNotPresent(cache.Deltas{ return c.config.Queue.AddIfNotPresent(Deltas{
cache.Delta{ Delta{
Type: cache.Sync, Type: Sync,
Object: obj, Object: obj,
}, },
}) })
@ -124,7 +123,7 @@ func (c *Controller) Requeue(obj interface{}) error {
// concurrently. // concurrently.
func (c *Controller) processLoop() { func (c *Controller) processLoop() {
for { for {
obj, err := c.config.Queue.Pop(cache.PopProcessFunc(c.config.Process)) obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
if err != nil { if err != nil {
if c.config.RetryOnError { if c.config.RetryOnError {
// This is the safe way to re-enqueue. // This is the safe way to re-enqueue.
@ -145,7 +144,7 @@ func (c *Controller) processLoop() {
// get called even if nothing changed. This is useful for periodically // get called even if nothing changed. This is useful for periodically
// evaluating or syncing something. // evaluating or syncing something.
// * OnDelete will get the final state of the item if it is known, otherwise // * OnDelete will get the final state of the item if it is known, otherwise
// it will get an object of type cache.DeletedFinalStateUnknown. This can // it will get an object of type DeletedFinalStateUnknown. This can
// happen if the watch is closed and misses the delete event and we don't // happen if the watch is closed and misses the delete event and we don't
// notice the deletion until the subsequent re-list. // notice the deletion until the subsequent re-list.
type ResourceEventHandler interface { type ResourceEventHandler interface {
@ -185,18 +184,18 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
} }
// DeletionHandlingMetaNamespaceKeyFunc checks for // DeletionHandlingMetaNamespaceKeyFunc checks for
// cache.DeletedFinalStateUnknown objects before calling // DeletedFinalStateUnknown objects before calling
// cache.MetaNamespaceKeyFunc. // MetaNamespaceKeyFunc.
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { if d, ok := obj.(DeletedFinalStateUnknown); ok {
return d.Key, nil return d.Key, nil
} }
return cache.MetaNamespaceKeyFunc(obj) return MetaNamespaceKeyFunc(obj)
} }
// NewInformer returns a cache.Store and a controller for populating the store // NewInformer returns a Store and a controller for populating the store
// while also providing event notifications. You should only used the returned // while also providing event notifications. You should only used the returned
// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event // Store for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty. // notifications to be faulty.
// //
// Parameters: // Parameters:
@ -210,18 +209,18 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
// * h is the object you want notifications sent to. // * h is the object you want notifications sent to.
// //
func NewInformer( func NewInformer(
lw cache.ListerWatcher, lw ListerWatcher,
objType runtime.Object, objType runtime.Object,
resyncPeriod time.Duration, resyncPeriod time.Duration,
h ResourceEventHandler, h ResourceEventHandler,
) (cache.Store, *Controller) { ) (Store, *Controller) {
// This will hold the client state, as we know it. // This will hold the client state, as we know it.
clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc) clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass clientState in as a // This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set // KeyLister, that way resync operations will result in the correct set
// of update/delete deltas. // of update/delete deltas.
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState) fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
cfg := &Config{ cfg := &Config{
Queue: fifo, Queue: fifo,
@ -232,9 +231,9 @@ func NewInformer(
Process: func(obj interface{}) error { Process: func(obj interface{}) error {
// from oldest to newest // from oldest to newest
for _, d := range obj.(cache.Deltas) { for _, d := range obj.(Deltas) {
switch d.Type { switch d.Type {
case cache.Sync, cache.Added, cache.Updated: case Sync, Added, Updated:
if old, exists, err := clientState.Get(d.Object); err == nil && exists { if old, exists, err := clientState.Get(d.Object); err == nil && exists {
if err := clientState.Update(d.Object); err != nil { if err := clientState.Update(d.Object); err != nil {
return err return err
@ -246,7 +245,7 @@ func NewInformer(
} }
h.OnAdd(d.Object) h.OnAdd(d.Object)
} }
case cache.Deleted: case Deleted:
if err := clientState.Delete(d.Object); err != nil { if err := clientState.Delete(d.Object); err != nil {
return err return err
} }
@ -259,9 +258,9 @@ func NewInformer(
return clientState, New(cfg) return clientState, New(cfg)
} }
// NewIndexerInformer returns a cache.Indexer and a controller for populating the index // NewIndexerInformer returns a Indexer and a controller for populating the index
// while also providing event notifications. You should only used the returned // while also providing event notifications. You should only used the returned
// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event // Index for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty. // notifications to be faulty.
// //
// Parameters: // Parameters:
@ -275,19 +274,19 @@ func NewInformer(
// * h is the object you want notifications sent to. // * h is the object you want notifications sent to.
// //
func NewIndexerInformer( func NewIndexerInformer(
lw cache.ListerWatcher, lw ListerWatcher,
objType runtime.Object, objType runtime.Object,
resyncPeriod time.Duration, resyncPeriod time.Duration,
h ResourceEventHandler, h ResourceEventHandler,
indexers cache.Indexers, indexers Indexers,
) (cache.Indexer, *Controller) { ) (Indexer, *Controller) {
// This will hold the client state, as we know it. // This will hold the client state, as we know it.
clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
// This will hold incoming changes. Note how we pass clientState in as a // This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set // KeyLister, that way resync operations will result in the correct set
// of update/delete deltas. // of update/delete deltas.
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState) fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
cfg := &Config{ cfg := &Config{
Queue: fifo, Queue: fifo,
@ -298,9 +297,9 @@ func NewIndexerInformer(
Process: func(obj interface{}) error { Process: func(obj interface{}) error {
// from oldest to newest // from oldest to newest
for _, d := range obj.(cache.Deltas) { for _, d := range obj.(Deltas) {
switch d.Type { switch d.Type {
case cache.Sync, cache.Added, cache.Updated: case Sync, Added, Updated:
if old, exists, err := clientState.Get(d.Object); err == nil && exists { if old, exists, err := clientState.Get(d.Object); err == nil && exists {
if err := clientState.Update(d.Object); err != nil { if err := clientState.Update(d.Object); err != nil {
return err return err
@ -312,7 +311,7 @@ func NewIndexerInformer(
} }
h.OnAdd(d.Object) h.OnAdd(d.Object)
} }
case cache.Deleted: case Deleted:
if err := clientState.Delete(d.Object); err != nil { if err := clientState.Delete(d.Object); err != nil {
return err return err
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package framework_test package cache
import ( import (
"fmt" "fmt"
@ -24,8 +24,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" fcache "k8s.io/kubernetes/pkg/client/testing/cache"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -34,34 +33,22 @@ import (
"github.com/google/gofuzz" "github.com/google/gofuzz"
) )
type testLW struct {
ListFunc func(options api.ListOptions) (runtime.Object, error)
WatchFunc func(options api.ListOptions) (watch.Interface, error)
}
func (t *testLW) List(options api.ListOptions) (runtime.Object, error) {
return t.ListFunc(options)
}
func (t *testLW) Watch(options api.ListOptions) (watch.Interface, error) {
return t.WatchFunc(options)
}
func Example() { func Example() {
// source simulates an apiserver object endpoint. // source simulates an apiserver object endpoint.
source := framework.NewFakeControllerSource() source := fcache.NewFakeControllerSource()
// This will hold the downstream state, as we know it. // This will hold the downstream state, as we know it.
downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc) downstream := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass downstream in as a // This will hold incoming changes. Note how we pass downstream in as a
// KeyLister, that way resync operations will result in the correct set // KeyLister, that way resync operations will result in the correct set
// of update/delete deltas. // of update/delete deltas.
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream) fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, downstream)
// Let's do threadsafe output to get predictable test results. // Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000) deletionCounter := make(chan string, 1000)
cfg := &framework.Config{ cfg := &Config{
Queue: fifo, Queue: fifo,
ListerWatcher: source, ListerWatcher: source,
ObjectType: &api.Pod{}, ObjectType: &api.Pod{},
@ -72,9 +59,9 @@ func Example() {
// everything that comes in. // everything that comes in.
Process: func(obj interface{}) error { Process: func(obj interface{}) error {
// Obj is from the Pop method of the Queue we make above. // Obj is from the Pop method of the Queue we make above.
newest := obj.(cache.Deltas).Newest() newest := obj.(Deltas).Newest()
if newest.Type != cache.Deleted { if newest.Type != Deleted {
// Update our downstream store. // Update our downstream store.
err := downstream.Add(newest.Object) err := downstream.Add(newest.Object)
if err != nil { if err != nil {
@ -107,7 +94,7 @@ func Example() {
// Create the controller and run it until we close stop. // Create the controller and run it until we close stop.
stop := make(chan struct{}) stop := make(chan struct{})
defer close(stop) defer close(stop)
go framework.New(cfg).Run(stop) go New(cfg).Run(stop)
// Let's add a few objects to the source. // Let's add a few objects to the source.
testIDs := []string{"a-hello", "b-controller", "c-framework"} testIDs := []string{"a-hello", "b-controller", "c-framework"}
@ -132,25 +119,25 @@ func Example() {
// c-framework // c-framework
} }
func ExampleInformer() { func ExampleNewInformer() {
// source simulates an apiserver object endpoint. // source simulates an apiserver object endpoint.
source := framework.NewFakeControllerSource() source := fcache.NewFakeControllerSource()
// Let's do threadsafe output to get predictable test results. // Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000) deletionCounter := make(chan string, 1000)
// Make a controller that immediately deletes anything added to it, and // Make a controller that immediately deletes anything added to it, and
// logs anything deleted. // logs anything deleted.
_, controller := framework.NewInformer( _, controller := NewInformer(
source, source,
&api.Pod{}, &api.Pod{},
time.Millisecond*100, time.Millisecond*100,
framework.ResourceEventHandlerFuncs{ ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
source.Delete(obj.(runtime.Object)) source.Delete(obj.(runtime.Object))
}, },
DeleteFunc: func(obj interface{}) { DeleteFunc: func(obj interface{}) {
key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil { if err != nil {
key = "oops something went wrong with the key" key = "oops something went wrong with the key"
} }
@ -196,7 +183,7 @@ func TestHammerController(t *testing.T) {
// race detector. // race detector.
// source simulates an apiserver object endpoint. // source simulates an apiserver object endpoint.
source := framework.NewFakeControllerSource() source := fcache.NewFakeControllerSource()
// Let's do threadsafe output to get predictable test results. // Let's do threadsafe output to get predictable test results.
outputSetLock := sync.Mutex{} outputSetLock := sync.Mutex{}
@ -204,7 +191,7 @@ func TestHammerController(t *testing.T) {
outputSet := map[string][]string{} outputSet := map[string][]string{}
recordFunc := func(eventType string, obj interface{}) { recordFunc := func(eventType string, obj interface{}) {
key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil { if err != nil {
t.Errorf("something wrong with key: %v", err) t.Errorf("something wrong with key: %v", err)
key = "oops something went wrong with the key" key = "oops something went wrong with the key"
@ -217,11 +204,11 @@ func TestHammerController(t *testing.T) {
} }
// Make a controller which just logs all the changes it gets. // Make a controller which just logs all the changes it gets.
_, controller := framework.NewInformer( _, controller := NewInformer(
source, source,
&api.Pod{}, &api.Pod{},
time.Millisecond*100, time.Millisecond*100,
framework.ResourceEventHandlerFuncs{ ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { recordFunc("add", obj) }, AddFunc: func(obj interface{}) { recordFunc("add", obj) },
UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) }, UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) },
DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) }, DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) },
@ -305,7 +292,7 @@ func TestUpdate(t *testing.T) {
// call to update. // call to update.
// source simulates an apiserver object endpoint. // source simulates an apiserver object endpoint.
source := framework.NewFakeControllerSource() source := fcache.NewFakeControllerSource()
const ( const (
FROM = "from" FROM = "from"
@ -358,7 +345,7 @@ func TestUpdate(t *testing.T) {
// It calls Done() on the wait group on deletions so we can tell when // It calls Done() on the wait group on deletions so we can tell when
// everything we've added has been deleted. // everything we've added has been deleted.
watchCh := make(chan struct{}) watchCh := make(chan struct{})
_, controller := framework.NewInformer( _, controller := NewInformer(
&testLW{ &testLW{
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
watch, err := source.Watch(options) watch, err := source.Watch(options)
@ -371,7 +358,7 @@ func TestUpdate(t *testing.T) {
}, },
&api.Pod{}, &api.Pod{},
0, 0,
framework.ResourceEventHandlerFuncs{ ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) { UpdateFunc: func(oldObj, newObj interface{}) {
o, n := oldObj.(*api.Pod), newObj.(*api.Pod) o, n := oldObj.(*api.Pod), newObj.(*api.Pod)
from, to := o.Labels["check"], n.Labels["check"] from, to := o.Labels["check"], n.Labels["check"]

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package framework package cache
import ( import (
"testing" "testing"

View File

@ -34,12 +34,12 @@ import (
var nevererrc chan error var nevererrc chan error
type testLW struct { type testLW struct {
ListFunc func() (runtime.Object, error) ListFunc func(options api.ListOptions) (runtime.Object, error)
WatchFunc func(options api.ListOptions) (watch.Interface, error) WatchFunc func(options api.ListOptions) (watch.Interface, error)
} }
func (t *testLW) List(options api.ListOptions) (runtime.Object, error) { func (t *testLW) List(options api.ListOptions) (runtime.Object, error) {
return t.ListFunc() return t.ListFunc(options)
} }
func (t *testLW) Watch(options api.ListOptions) (watch.Interface, error) { func (t *testLW) Watch(options api.ListOptions) (watch.Interface, error) {
return t.WatchFunc(options) return t.WatchFunc(options)
@ -53,7 +53,7 @@ func TestCloseWatchChannelOnError(t *testing.T) {
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return fw, nil return fw, nil
}, },
ListFunc: func() (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil
}, },
} }
@ -79,7 +79,7 @@ func TestRunUntil(t *testing.T) {
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return fw, nil return fw, nil
}, },
ListFunc: func() (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil
}, },
} }
@ -227,7 +227,7 @@ func TestReflectorListAndWatch(t *testing.T) {
go func() { createdFakes <- fw }() go func() { createdFakes <- fw }()
return fw, nil return fw, nil
}, },
ListFunc: func() (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil
}, },
} }
@ -345,7 +345,7 @@ func TestReflectorListAndWatchWithErrors(t *testing.T) {
}() }()
return fw, nil return fw, nil
}, },
ListFunc: func() (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return item.list, item.listErr return item.list, item.listErr
}, },
} }
@ -373,7 +373,7 @@ func TestReflectorResync(t *testing.T) {
fw := watch.NewFake() fw := watch.NewFake()
return fw, nil return fw, nil
}, },
ListFunc: func() (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "0"}}, nil return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "0"}}, nil
}, },
} }

View File

@ -14,19 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package framework package cache
import ( import (
"fmt" "fmt"
"sync" "sync"
"time" "time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"github.com/golang/glog"
) )
// if you use this, there is one behavior change compared to a standard Informer. // if you use this, there is one behavior change compared to a standard Informer.
@ -42,7 +41,7 @@ type SharedInformer interface {
// You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned. // You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
// TODO we should try to remove this restriction eventually. // TODO we should try to remove this restriction eventually.
AddEventHandler(handler ResourceEventHandler) error AddEventHandler(handler ResourceEventHandler) error
GetStore() cache.Store GetStore() Store
// GetController gives back a synthetic interface that "votes" to start the informer // GetController gives back a synthetic interface that "votes" to start the informer
GetController() ControllerInterface GetController() ControllerInterface
Run(stopCh <-chan struct{}) Run(stopCh <-chan struct{})
@ -53,24 +52,24 @@ type SharedInformer interface {
type SharedIndexInformer interface { type SharedIndexInformer interface {
SharedInformer SharedInformer
// AddIndexers add indexers to the informer before it starts. // AddIndexers add indexers to the informer before it starts.
AddIndexers(indexers cache.Indexers) error AddIndexers(indexers Indexers) error
GetIndexer() cache.Indexer GetIndexer() Indexer
} }
// NewSharedInformer creates a new instance for the listwatcher. // NewSharedInformer creates a new instance for the listwatcher.
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can // TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
// be shared amongst all consumers. // be shared amongst all consumers.
func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer { func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
return NewSharedIndexInformer(lw, objType, resyncPeriod, cache.Indexers{}) return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
} }
// NewSharedIndexInformer creates a new instance for the listwatcher. // NewSharedIndexInformer creates a new instance for the listwatcher.
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can // TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
// be shared amongst all consumers. // be shared amongst all consumers.
func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer { func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
sharedIndexInformer := &sharedIndexInformer{ sharedIndexInformer := &sharedIndexInformer{
processor: &sharedProcessor{}, processor: &sharedProcessor{},
indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
listerWatcher: lw, listerWatcher: lw,
objectType: objType, objectType: objType,
fullResyncPeriod: resyncPeriod, fullResyncPeriod: resyncPeriod,
@ -107,13 +106,13 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
} }
type sharedIndexInformer struct { type sharedIndexInformer struct {
indexer cache.Indexer indexer Indexer
controller *Controller controller *Controller
processor *sharedProcessor processor *sharedProcessor
// This block is tracked to handle late initialization of the controller // This block is tracked to handle late initialization of the controller
listerWatcher cache.ListerWatcher listerWatcher ListerWatcher
objectType runtime.Object objectType runtime.Object
fullResyncPeriod time.Duration fullResyncPeriod time.Duration
@ -160,7 +159,7 @@ type deleteNotification struct {
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer) fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer)
cfg := &Config{ cfg := &Config{
Queue: fifo, Queue: fifo,
@ -211,15 +210,15 @@ func (s *sharedIndexInformer) LastSyncResourceVersion() string {
return s.controller.reflector.LastSyncResourceVersion() return s.controller.reflector.LastSyncResourceVersion()
} }
func (s *sharedIndexInformer) GetStore() cache.Store { func (s *sharedIndexInformer) GetStore() Store {
return s.indexer return s.indexer
} }
func (s *sharedIndexInformer) GetIndexer() cache.Indexer { func (s *sharedIndexInformer) GetIndexer() Indexer {
return s.indexer return s.indexer
} }
func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error { func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
s.startedLock.Lock() s.startedLock.Lock()
defer s.startedLock.Unlock() defer s.startedLock.Unlock()
@ -271,9 +270,9 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
defer s.blockDeltas.Unlock() defer s.blockDeltas.Unlock()
// from oldest to newest // from oldest to newest
for _, d := range obj.(cache.Deltas) { for _, d := range obj.(Deltas) {
switch d.Type { switch d.Type {
case cache.Sync, cache.Added, cache.Updated: case Sync, Added, Updated:
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
if err := s.indexer.Update(d.Object); err != nil { if err := s.indexer.Update(d.Object); err != nil {
return err return err
@ -285,7 +284,7 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
} }
s.processor.distribute(addNotification{newObj: d.Object}) s.processor.distribute(addNotification{newObj: d.Object})
} }
case cache.Deleted: case Deleted:
if err := s.indexer.Delete(d.Object); err != nil { if err := s.indexer.Delete(d.Object); err != nil {
return err return err
} }

View File

@ -29,7 +29,6 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilcertificates "k8s.io/kubernetes/pkg/util/certificates" utilcertificates "k8s.io/kubernetes/pkg/util/certificates"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@ -47,7 +46,7 @@ type CertificateController struct {
kubeClient clientset.Interface kubeClient clientset.Interface
// CSR framework and store // CSR framework and store
csrController *framework.Controller csrController *cache.Controller
csrStore cache.StoreToCertificateRequestLister csrStore cache.StoreToCertificateRequestLister
// To allow injection of updateCertificateRequestStatus for testing. // To allow injection of updateCertificateRequestStatus for testing.
@ -85,7 +84,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
} }
// Manage the addition/update of certificate requests // Manage the addition/update of certificate requests
cc.csrStore.Store, cc.csrController = framework.NewInformer( cc.csrStore.Store, cc.csrController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return cc.kubeClient.Certificates().CertificateSigningRequests().List(options) return cc.kubeClient.Certificates().CertificateSigningRequests().List(options)
@ -96,7 +95,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
}, },
&certificates.CertificateSigningRequest{}, &certificates.CertificateSigningRequest{},
syncPeriod, syncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
csr := obj.(*certificates.CertificateSigningRequest) csr := obj.(*certificates.CertificateSigningRequest)
glog.V(4).Infof("Adding certificate request %s", csr.Name) glog.V(4).Infof("Adding certificate request %s", csr.Name)

View File

@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/clock"
@ -54,7 +53,7 @@ const (
) )
var ( var (
KeyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
) )
type ResyncPeriodFunc func() time.Duration type ResyncPeriodFunc func() time.Duration

View File

@ -23,8 +23,6 @@ import (
"sync" "sync"
"time" "time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
@ -34,8 +32,7 @@ import (
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilerrors "k8s.io/kubernetes/pkg/util/errors" utilerrors "k8s.io/kubernetes/pkg/util/errors"
@ -46,6 +43,8 @@ import (
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
"github.com/golang/glog"
) )
const ( const (
@ -72,7 +71,7 @@ type DaemonSetsController struct {
// we have a personal informer, we must start it ourselves. If you start // we have a personal informer, we must start it ourselves. If you start
// the controller using NewDaemonSetsController(passing SharedInformer), this // the controller using NewDaemonSetsController(passing SharedInformer), this
// will be null // will be null
internalPodInformer framework.SharedInformer internalPodInformer cache.SharedInformer
// An dsc is temporarily suspended after creating/deleting these many replicas. // An dsc is temporarily suspended after creating/deleting these many replicas.
// It resumes normal action after observing the watch events for them. // It resumes normal action after observing the watch events for them.
@ -89,17 +88,17 @@ type DaemonSetsController struct {
// A store of nodes // A store of nodes
nodeStore cache.StoreToNodeLister nodeStore cache.StoreToNodeLister
// Watches changes to all daemon sets. // Watches changes to all daemon sets.
dsController *framework.Controller dsController *cache.Controller
// Watches changes to all pods // Watches changes to all pods
podController framework.ControllerInterface podController cache.ControllerInterface
// Watches changes to all nodes. // Watches changes to all nodes.
nodeController *framework.Controller nodeController *cache.Controller
// podStoreSynced returns true if the pod store has been synced at least once. // podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing. // Added as a member to the struct to allow injection for testing.
podStoreSynced framework.InformerSynced podStoreSynced cache.InformerSynced
// nodeStoreSynced returns true if the node store has been synced at least once. // nodeStoreSynced returns true if the node store has been synced at least once.
// Added as a member to the struct to allow injection for testing. // Added as a member to the struct to allow injection for testing.
nodeStoreSynced framework.InformerSynced nodeStoreSynced cache.InformerSynced
lookupCache *controller.MatchingCache lookupCache *controller.MatchingCache
@ -107,7 +106,7 @@ type DaemonSetsController struct {
queue workqueue.RateLimitingInterface queue workqueue.RateLimitingInterface
} }
func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController { func NewDaemonSetsController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset. // TODO: remove the wrapper when every clients have moved to use the clientset.
@ -128,7 +127,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "daemonset"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "daemonset"),
} }
// Manage addition/update of daemon sets. // Manage addition/update of daemon sets.
dsc.dsStore.Store, dsc.dsController = framework.NewInformer( dsc.dsStore.Store, dsc.dsController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options) return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
@ -140,7 +139,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
&extensions.DaemonSet{}, &extensions.DaemonSet{},
// TODO: Can we have much longer period here? // TODO: Can we have much longer period here?
FullDaemonSetResyncPeriod, FullDaemonSetResyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
ds := obj.(*extensions.DaemonSet) ds := obj.(*extensions.DaemonSet)
glog.V(4).Infof("Adding daemon set %s", ds.Name) glog.V(4).Infof("Adding daemon set %s", ds.Name)
@ -173,7 +172,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete // Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed. // more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dsc.addPod, AddFunc: dsc.addPod,
UpdateFunc: dsc.updatePod, UpdateFunc: dsc.updatePod,
DeleteFunc: dsc.deletePod, DeleteFunc: dsc.deletePod,
@ -183,7 +182,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
dsc.podStoreSynced = podInformer.HasSynced dsc.podStoreSynced = podInformer.HasSynced
// Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change, // Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change,
dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer( dsc.nodeStore.Store, dsc.nodeController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dsc.kubeClient.Core().Nodes().List(options) return dsc.kubeClient.Core().Nodes().List(options)
@ -194,7 +193,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
}, },
&api.Node{}, &api.Node{},
resyncPeriod(), resyncPeriod(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: dsc.addNode, AddFunc: dsc.addNode,
UpdateFunc: dsc.updateNode, UpdateFunc: dsc.updateNode,
}, },
@ -242,7 +241,7 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
go dsc.podController.Run(stopCh) go dsc.podController.Run(stopCh)
go dsc.nodeController.Run(stopCh) go dsc.nodeController.Run(stopCh)
if !framework.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced) { if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced) {
return return
} }

View File

@ -36,7 +36,6 @@ import (
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@ -70,15 +69,15 @@ type DeploymentController struct {
// A store of deployments, populated by the dController // A store of deployments, populated by the dController
dStore cache.StoreToDeploymentLister dStore cache.StoreToDeploymentLister
// Watches changes to all deployments // Watches changes to all deployments
dController *framework.Controller dController *cache.Controller
// A store of ReplicaSets, populated by the rsController // A store of ReplicaSets, populated by the rsController
rsStore cache.StoreToReplicaSetLister rsStore cache.StoreToReplicaSetLister
// Watches changes to all ReplicaSets // Watches changes to all ReplicaSets
rsController *framework.Controller rsController *cache.Controller
// A store of pods, populated by the podController // A store of pods, populated by the podController
podStore cache.StoreToPodLister podStore cache.StoreToPodLister
// Watches changes to all pods // Watches changes to all pods
podController *framework.Controller podController *cache.Controller
// dStoreSynced returns true if the Deployment store has been synced at least once. // dStoreSynced returns true if the Deployment store has been synced at least once.
// Added as a member to the struct to allow injection for testing. // Added as a member to the struct to allow injection for testing.
@ -110,7 +109,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
} }
dc.dStore.Indexer, dc.dController = framework.NewIndexerInformer( dc.dStore.Indexer, dc.dController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.client.Extensions().Deployments(api.NamespaceAll).List(options) return dc.client.Extensions().Deployments(api.NamespaceAll).List(options)
@ -121,7 +120,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
}, },
&extensions.Deployment{}, &extensions.Deployment{},
FullDeploymentResyncPeriod, FullDeploymentResyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: dc.addDeploymentNotification, AddFunc: dc.addDeploymentNotification,
UpdateFunc: dc.updateDeploymentNotification, UpdateFunc: dc.updateDeploymentNotification,
// This will enter the sync loop and no-op, because the deployment has been deleted from the store. // This will enter the sync loop and no-op, because the deployment has been deleted from the store.
@ -130,7 +129,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
dc.rsStore.Store, dc.rsController = framework.NewInformer( dc.rsStore.Store, dc.rsController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.client.Extensions().ReplicaSets(api.NamespaceAll).List(options) return dc.client.Extensions().ReplicaSets(api.NamespaceAll).List(options)
@ -141,14 +140,14 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
}, },
&extensions.ReplicaSet{}, &extensions.ReplicaSet{},
resyncPeriod(), resyncPeriod(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: dc.addReplicaSet, AddFunc: dc.addReplicaSet,
UpdateFunc: dc.updateReplicaSet, UpdateFunc: dc.updateReplicaSet,
DeleteFunc: dc.deleteReplicaSet, DeleteFunc: dc.deleteReplicaSet,
}, },
) )
dc.podStore.Indexer, dc.podController = framework.NewIndexerInformer( dc.podStore.Indexer, dc.podController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.client.Core().Pods(api.NamespaceAll).List(options) return dc.client.Core().Pods(api.NamespaceAll).List(options)
@ -159,7 +158,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
}, },
&api.Pod{}, &api.Pod{},
resyncPeriod(), resyncPeriod(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: dc.addPod, AddFunc: dc.addPod,
UpdateFunc: dc.updatePod, UpdateFunc: dc.updatePod,
DeleteFunc: dc.deletePod, DeleteFunc: dc.deletePod,

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@ -47,22 +46,22 @@ type DisruptionController struct {
kubeClient *client.Client kubeClient *client.Client
pdbStore cache.Store pdbStore cache.Store
pdbController *framework.Controller pdbController *cache.Controller
pdbLister cache.StoreToPodDisruptionBudgetLister pdbLister cache.StoreToPodDisruptionBudgetLister
podController framework.ControllerInterface podController cache.ControllerInterface
podLister cache.StoreToPodLister podLister cache.StoreToPodLister
rcIndexer cache.Indexer rcIndexer cache.Indexer
rcController *framework.Controller rcController *cache.Controller
rcLister cache.StoreToReplicationControllerLister rcLister cache.StoreToReplicationControllerLister
rsStore cache.Store rsStore cache.Store
rsController *framework.Controller rsController *cache.Controller
rsLister cache.StoreToReplicaSetLister rsLister cache.StoreToReplicaSetLister
dIndexer cache.Indexer dIndexer cache.Indexer
dController *framework.Controller dController *cache.Controller
dLister cache.StoreToDeploymentLister dLister cache.StoreToDeploymentLister
queue *workqueue.Type queue *workqueue.Type
@ -84,7 +83,7 @@ type controllerAndScale struct {
// controllers and their scale. // controllers and their scale.
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error) type podControllerFinder func(*api.Pod) ([]controllerAndScale, error)
func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClient *client.Client) *DisruptionController { func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
dc := &DisruptionController{ dc := &DisruptionController{
kubeClient: kubeClient, kubeClient: kubeClient,
podController: podInformer.GetController(), podController: podInformer.GetController(),
@ -97,13 +96,13 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
dc.podLister.Indexer = podInformer.GetIndexer() dc.podLister.Indexer = podInformer.GetIndexer()
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addPod, AddFunc: dc.addPod,
UpdateFunc: dc.updatePod, UpdateFunc: dc.updatePod,
DeleteFunc: dc.deletePod, DeleteFunc: dc.deletePod,
}) })
dc.pdbStore, dc.pdbController = framework.NewInformer( dc.pdbStore, dc.pdbController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options) return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options)
@ -114,7 +113,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
}, },
&policy.PodDisruptionBudget{}, &policy.PodDisruptionBudget{},
30*time.Second, 30*time.Second,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: dc.addDb, AddFunc: dc.addDb,
UpdateFunc: dc.updateDb, UpdateFunc: dc.updateDb,
DeleteFunc: dc.removeDb, DeleteFunc: dc.removeDb,
@ -122,7 +121,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
) )
dc.pdbLister.Store = dc.pdbStore dc.pdbLister.Store = dc.pdbStore
dc.rcIndexer, dc.rcController = framework.NewIndexerInformer( dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).List(options) return dc.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
@ -133,13 +132,13 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
}, },
&api.ReplicationController{}, &api.ReplicationController{},
30*time.Second, 30*time.Second,
framework.ResourceEventHandlerFuncs{}, cache.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
dc.rcLister.Indexer = dc.rcIndexer dc.rcLister.Indexer = dc.rcIndexer
dc.rsStore, dc.rsController = framework.NewInformer( dc.rsStore, dc.rsController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options) return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
@ -150,12 +149,12 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
}, },
&extensions.ReplicaSet{}, &extensions.ReplicaSet{},
30*time.Second, 30*time.Second,
framework.ResourceEventHandlerFuncs{}, cache.ResourceEventHandlerFuncs{},
) )
dc.rsLister.Store = dc.rsStore dc.rsLister.Store = dc.rsStore
dc.dIndexer, dc.dController = framework.NewIndexerInformer( dc.dIndexer, dc.dController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options) return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options)
@ -166,7 +165,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
}, },
&extensions.Deployment{}, &extensions.Deployment{},
30*time.Second, 30*time.Second,
framework.ResourceEventHandlerFuncs{}, cache.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )

View File

@ -34,8 +34,7 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@ -66,11 +65,11 @@ const (
) )
var ( var (
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
) )
// NewEndpointController returns a new *EndpointController. // NewEndpointController returns a new *EndpointController.
func NewEndpointController(podInformer framework.SharedIndexInformer, client *clientset.Clientset) *EndpointController { func NewEndpointController(podInformer cache.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil { if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
} }
@ -79,7 +78,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
queue: workqueue.NewNamed("endpoint"), queue: workqueue.NewNamed("endpoint"),
} }
e.serviceStore.Store, e.serviceController = framework.NewInformer( e.serviceStore.Store, e.serviceController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Services(api.NamespaceAll).List(options) return e.client.Core().Services(api.NamespaceAll).List(options)
@ -91,7 +90,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
&api.Service{}, &api.Service{},
// TODO: Can we have much longer period here? // TODO: Can we have much longer period here?
FullServiceResyncPeriod, FullServiceResyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: e.enqueueService, AddFunc: e.enqueueService,
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
e.enqueueService(cur) e.enqueueService(cur)
@ -100,7 +99,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
}, },
) )
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.addPod, AddFunc: e.addPod,
UpdateFunc: e.updatePod, UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod, DeleteFunc: e.deletePod,
@ -133,7 +132,7 @@ type EndpointController struct {
// we have a personal informer, we must start it ourselves. If you start // we have a personal informer, we must start it ourselves. If you start
// the controller using NewEndpointController(passing SharedInformer), this // the controller using NewEndpointController(passing SharedInformer), this
// will be null // will be null
internalPodInformer framework.SharedIndexInformer internalPodInformer cache.SharedIndexInformer
// Services that need to be updated. A channel is inappropriate here, // Services that need to be updated. A channel is inappropriate here,
// because it allows services with lots of pods to be serviced much // because it allows services with lots of pods to be serviced much
@ -144,8 +143,8 @@ type EndpointController struct {
// Since we join two objects, we'll watch both of them with // Since we join two objects, we'll watch both of them with
// controllers. // controllers.
serviceController *framework.Controller serviceController *cache.Controller
podController framework.ControllerInterface podController cache.ControllerInterface
// podStoreSynced returns true if the pod store has been synced at least once. // podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing. // Added as a member to the struct to allow injection for testing.
podStoreSynced func() bool podStoreSynced func() bool

View File

@ -1,18 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package framework implements all the grunt work involved in running a simple controller.
package framework // import "k8s.io/kubernetes/pkg/controller/framework"

View File

@ -32,7 +32,6 @@ import (
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly" "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
@ -49,7 +48,7 @@ const ResourceResyncTime time.Duration = 0
type monitor struct { type monitor struct {
store cache.Store store cache.Store
controller *framework.Controller controller *cache.Controller
} }
type objectReference struct { type objectReference struct {
@ -488,11 +487,11 @@ func (gc *GarbageCollector) monitorFor(resource unversioned.GroupVersionResource
} }
runtimeObject.GetObjectKind().SetGroupVersionKind(kind) runtimeObject.GetObjectKind().SetGroupVersionKind(kind)
} }
monitor.store, monitor.controller = framework.NewInformer( monitor.store, monitor.controller = cache.NewInformer(
gcListWatcher(client, resource), gcListWatcher(client, resource),
nil, nil,
ResourceResyncTime, ResourceResyncTime,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
// add the event to the propagator's eventQueue. // add the event to the propagator's eventQueue.
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
setObjectTypeMeta(obj) setObjectTypeMeta(obj)

View File

@ -21,13 +21,12 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller/framework"
) )
// PodInformer is type of SharedIndexInformer which watches and lists all pods. // PodInformer is type of SharedIndexInformer which watches and lists all pods.
// Interface provides constructor for informer and lister for pods // Interface provides constructor for informer and lister for pods
type PodInformer interface { type PodInformer interface {
Informer() framework.SharedIndexInformer Informer() cache.SharedIndexInformer
Lister() *cache.StoreToPodLister Lister() *cache.StoreToPodLister
} }
@ -37,7 +36,7 @@ type podInformer struct {
// Informer checks whether podInformer exists in sharedInformerFactory and if not, it creates new informer of type // Informer checks whether podInformer exists in sharedInformerFactory and if not, it creates new informer of type
// podInformer and connects it to sharedInformerFactory // podInformer and connects it to sharedInformerFactory
func (f *podInformer) Informer() framework.SharedIndexInformer { func (f *podInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
@ -63,7 +62,7 @@ func (f *podInformer) Lister() *cache.StoreToPodLister {
// NamespaceInformer is type of SharedIndexInformer which watches and lists all namespaces. // NamespaceInformer is type of SharedIndexInformer which watches and lists all namespaces.
// Interface provides constructor for informer and lister for namsespaces // Interface provides constructor for informer and lister for namsespaces
type NamespaceInformer interface { type NamespaceInformer interface {
Informer() framework.SharedIndexInformer Informer() cache.SharedIndexInformer
Lister() *cache.IndexerToNamespaceLister Lister() *cache.IndexerToNamespaceLister
} }
@ -73,7 +72,7 @@ type namespaceInformer struct {
// Informer checks whether namespaceInformer exists in sharedInformerFactory and if not, it creates new informer of type // Informer checks whether namespaceInformer exists in sharedInformerFactory and if not, it creates new informer of type
// namespaceInformer and connects it to sharedInformerFactory // namespaceInformer and connects it to sharedInformerFactory
func (f *namespaceInformer) Informer() framework.SharedIndexInformer { func (f *namespaceInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
@ -99,7 +98,7 @@ func (f *namespaceInformer) Lister() *cache.IndexerToNamespaceLister {
// NodeInformer is type of SharedIndexInformer which watches and lists all nodes. // NodeInformer is type of SharedIndexInformer which watches and lists all nodes.
// Interface provides constructor for informer and lister for nodes // Interface provides constructor for informer and lister for nodes
type NodeInformer interface { type NodeInformer interface {
Informer() framework.SharedIndexInformer Informer() cache.SharedIndexInformer
Lister() *cache.StoreToNodeLister Lister() *cache.StoreToNodeLister
} }
@ -109,7 +108,7 @@ type nodeInformer struct {
// Informer checks whether nodeInformer exists in sharedInformerFactory and if not, it creates new informer of type // Informer checks whether nodeInformer exists in sharedInformerFactory and if not, it creates new informer of type
// nodeInformer and connects it to sharedInformerFactory // nodeInformer and connects it to sharedInformerFactory
func (f *nodeInformer) Informer() framework.SharedIndexInformer { func (f *nodeInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
@ -135,7 +134,7 @@ func (f *nodeInformer) Lister() *cache.StoreToNodeLister {
// PVCInformer is type of SharedIndexInformer which watches and lists all persistent volume claims. // PVCInformer is type of SharedIndexInformer which watches and lists all persistent volume claims.
// Interface provides constructor for informer and lister for persistent volume claims // Interface provides constructor for informer and lister for persistent volume claims
type PVCInformer interface { type PVCInformer interface {
Informer() framework.SharedIndexInformer Informer() cache.SharedIndexInformer
Lister() *cache.StoreToPVCFetcher Lister() *cache.StoreToPVCFetcher
} }
@ -145,7 +144,7 @@ type pvcInformer struct {
// Informer checks whether pvcInformer exists in sharedInformerFactory and if not, it creates new informer of type // Informer checks whether pvcInformer exists in sharedInformerFactory and if not, it creates new informer of type
// pvcInformer and connects it to sharedInformerFactory // pvcInformer and connects it to sharedInformerFactory
func (f *pvcInformer) Informer() framework.SharedIndexInformer { func (f *pvcInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
@ -171,7 +170,7 @@ func (f *pvcInformer) Lister() *cache.StoreToPVCFetcher {
// PVInformer is type of SharedIndexInformer which watches and lists all persistent volumes. // PVInformer is type of SharedIndexInformer which watches and lists all persistent volumes.
// Interface provides constructor for informer and lister for persistent volumes // Interface provides constructor for informer and lister for persistent volumes
type PVInformer interface { type PVInformer interface {
Informer() framework.SharedIndexInformer Informer() cache.SharedIndexInformer
Lister() *cache.StoreToPVFetcher Lister() *cache.StoreToPVFetcher
} }
@ -181,7 +180,7 @@ type pvInformer struct {
// Informer checks whether pvInformer exists in sharedInformerFactory and if not, it creates new informer of type // Informer checks whether pvInformer exists in sharedInformerFactory and if not, it creates new informer of type
// pvInformer and connects it to sharedInformerFactory // pvInformer and connects it to sharedInformerFactory
func (f *pvInformer) Informer() framework.SharedIndexInformer { func (f *pvInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
) )
@ -47,7 +46,7 @@ type sharedInformerFactory struct {
lock sync.Mutex lock sync.Mutex
defaultResync time.Duration defaultResync time.Duration
informers map[reflect.Type]framework.SharedIndexInformer informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started // startedInformers is used for tracking which informers have been started
// this allows calling of Start method multiple times // this allows calling of Start method multiple times
startedInformers map[reflect.Type]bool startedInformers map[reflect.Type]bool
@ -58,7 +57,7 @@ func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Dur
return &sharedInformerFactory{ return &sharedInformerFactory{
client: client, client: client,
defaultResync: defaultResync, defaultResync: defaultResync,
informers: make(map[reflect.Type]framework.SharedIndexInformer), informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool), startedInformers: make(map[reflect.Type]bool),
} }
} }
@ -102,8 +101,8 @@ func (f *sharedInformerFactory) PersistentVolumes() PVInformer {
} }
// NewPodInformer returns a SharedIndexInformer that lists and watches all pods // NewPodInformer returns a SharedIndexInformer that lists and watches all pods
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().Pods(api.NamespaceAll).List(options) return client.Core().Pods(api.NamespaceAll).List(options)
@ -121,8 +120,8 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) fram
} }
// NewNodeInformer returns a SharedIndexInformer that lists and watches all nodes // NewNodeInformer returns a SharedIndexInformer that lists and watches all nodes
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().Nodes().List(options) return client.Core().Nodes().List(options)
@ -139,8 +138,8 @@ func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) fra
} }
// NewPVCInformer returns a SharedIndexInformer that lists and watches all PVCs // NewPVCInformer returns a SharedIndexInformer that lists and watches all PVCs
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
@ -157,8 +156,8 @@ func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) fram
} }
// NewPVInformer returns a SharedIndexInformer that lists and watches all PVs // NewPVInformer returns a SharedIndexInformer that lists and watches all PVs
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().PersistentVolumes().List(options) return client.Core().PersistentVolumes().List(options)
@ -175,8 +174,8 @@ func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) frame
} }
// NewNamespaceInformer returns a SharedIndexInformer that lists and watches namespaces // NewNamespaceInformer returns a SharedIndexInformer that lists and watches namespaces
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().Namespaces().List(options) return client.Core().Namespaces().List(options)

View File

@ -31,8 +31,7 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@ -51,7 +50,7 @@ type JobController struct {
// we have a personal informer, we must start it ourselves. If you start // we have a personal informer, we must start it ourselves. If you start
// the controller using NewJobController(passing SharedInformer), this // the controller using NewJobController(passing SharedInformer), this
// will be null // will be null
internalPodInformer framework.SharedInformer internalPodInformer cache.SharedInformer
// To allow injection of updateJobStatus for testing. // To allow injection of updateJobStatus for testing.
updateHandler func(job *batch.Job) error updateHandler func(job *batch.Job) error
@ -66,7 +65,7 @@ type JobController struct {
// A store of job, populated by the jobController // A store of job, populated by the jobController
jobStore cache.StoreToJobLister jobStore cache.StoreToJobLister
// Watches changes to all jobs // Watches changes to all jobs
jobController *framework.Controller jobController *cache.Controller
// A store of pods, populated by the podController // A store of pods, populated by the podController
podStore cache.StoreToPodLister podStore cache.StoreToPodLister
@ -77,7 +76,7 @@ type JobController struct {
recorder record.EventRecorder recorder record.EventRecorder
} }
func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController { func NewJobController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset. // TODO: remove the wrapper when every clients have moved to use the clientset.
@ -98,7 +97,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}), recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
} }
jm.jobStore.Store, jm.jobController = framework.NewInformer( jm.jobStore.Store, jm.jobController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options) return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options)
@ -110,7 +109,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
&batch.Job{}, &batch.Job{},
// TODO: Can we have much longer period here? // TODO: Can we have much longer period here?
replicationcontroller.FullControllerResyncPeriod, replicationcontroller.FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: jm.enqueueController, AddFunc: jm.enqueueController,
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
if job := cur.(*batch.Job); !IsJobFinished(job) { if job := cur.(*batch.Job); !IsJobFinished(job) {
@ -121,7 +120,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
}, },
) )
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: jm.addPod, AddFunc: jm.addPod,
UpdateFunc: jm.updatePod, UpdateFunc: jm.updatePod,
DeleteFunc: jm.deletePod, DeleteFunc: jm.deletePod,

View File

@ -25,7 +25,6 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@ -45,7 +44,7 @@ type NamespaceController struct {
// store that holds the namespaces // store that holds the namespaces
store cache.Store store cache.Store
// controller that observes the namespaces // controller that observes the namespaces
controller *framework.Controller controller *cache.Controller
// namespaces that have been queued up for processing by workers // namespaces that have been queued up for processing by workers
queue workqueue.RateLimitingInterface queue workqueue.RateLimitingInterface
// list of preferred group versions and their corresponding resource set for namespace deletion // list of preferred group versions and their corresponding resource set for namespace deletion
@ -95,7 +94,7 @@ func NewNamespaceController(
} }
// configure the backing store/controller // configure the backing store/controller
store, controller := framework.NewInformer( store, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().Namespaces().List(options) return kubeClient.Core().Namespaces().List(options)
@ -106,7 +105,7 @@ func NewNamespaceController(
}, },
&api.Namespace{}, &api.Namespace{},
resyncPeriod, resyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
namespace := obj.(*api.Namespace) namespace := obj.(*api.Namespace)
namespaceController.enqueueNamespace(namespace) namespaceController.enqueueNamespace(namespace)

View File

@ -33,8 +33,7 @@ import (
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -136,13 +135,13 @@ type NodeController struct {
maximumGracePeriod time.Duration maximumGracePeriod time.Duration
recorder record.EventRecorder recorder record.EventRecorder
// Pod framework and store // Pod framework and store
podController framework.ControllerInterface podController cache.ControllerInterface
podStore cache.StoreToPodLister podStore cache.StoreToPodLister
// Node framework and store // Node framework and store
nodeController *framework.Controller nodeController *cache.Controller
nodeStore cache.StoreToNodeLister nodeStore cache.StoreToNodeLister
// DaemonSet framework and store // DaemonSet framework and store
daemonSetController *framework.Controller daemonSetController *cache.Controller
daemonSetStore cache.StoreToDaemonSetLister daemonSetStore cache.StoreToDaemonSetLister
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true // allocate/recycle CIDRs for node if allocateNodeCIDRs == true
cidrAllocator CIDRAllocator cidrAllocator CIDRAllocator
@ -164,7 +163,7 @@ type NodeController struct {
// we have a personal informer, we must start it ourselves. If you start // we have a personal informer, we must start it ourselves. If you start
// the controller using NewDaemonSetsController(passing SharedInformer), this // the controller using NewDaemonSetsController(passing SharedInformer), this
// will be null // will be null
internalPodInformer framework.SharedIndexInformer internalPodInformer cache.SharedIndexInformer
} }
// NewNodeController returns a new node controller to sync instances from cloudprovider. // NewNodeController returns a new node controller to sync instances from cloudprovider.
@ -172,7 +171,7 @@ type NodeController struct {
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes // podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
// currently, this should be handled as a fatal error. // currently, this should be handled as a fatal error.
func NewNodeController( func NewNodeController(
podInformer framework.SharedIndexInformer, podInformer cache.SharedIndexInformer,
cloud cloudprovider.Interface, cloud cloudprovider.Interface,
kubeClient clientset.Interface, kubeClient clientset.Interface,
podEvictionTimeout time.Duration, podEvictionTimeout time.Duration,
@ -241,16 +240,16 @@ func NewNodeController(
nc.enterFullDisruptionFunc = nc.HealthyQPSFunc nc.enterFullDisruptionFunc = nc.HealthyQPSFunc
nc.computeZoneStateFunc = nc.ComputeZoneState nc.computeZoneStateFunc = nc.ComputeZoneState
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nc.maybeDeleteTerminatingPod, AddFunc: nc.maybeDeleteTerminatingPod,
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) }, UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
}) })
nc.podStore.Indexer = podInformer.GetIndexer() nc.podStore.Indexer = podInformer.GetIndexer()
nc.podController = podInformer.GetController() nc.podController = podInformer.GetController()
nodeEventHandlerFuncs := framework.ResourceEventHandlerFuncs{} nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
if nc.allocateNodeCIDRs { if nc.allocateNodeCIDRs {
nodeEventHandlerFuncs = framework.ResourceEventHandlerFuncs{ nodeEventHandlerFuncs = cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
node := obj.(*api.Node) node := obj.(*api.Node)
err := nc.cidrAllocator.AllocateOrOccupyCIDR(node) err := nc.cidrAllocator.AllocateOrOccupyCIDR(node)
@ -296,7 +295,7 @@ func NewNodeController(
} }
} }
nc.nodeStore.Store, nc.nodeController = framework.NewInformer( nc.nodeStore.Store, nc.nodeController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return nc.kubeClient.Core().Nodes().List(options) return nc.kubeClient.Core().Nodes().List(options)
@ -310,7 +309,7 @@ func NewNodeController(
nodeEventHandlerFuncs, nodeEventHandlerFuncs,
) )
nc.daemonSetStore.Store, nc.daemonSetController = framework.NewInformer( nc.daemonSetStore.Store, nc.daemonSetController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return nc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options) return nc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
@ -321,7 +320,7 @@ func NewNodeController(
}, },
&extensions.DaemonSet{}, &extensions.DaemonSet{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
framework.ResourceEventHandlerFuncs{}, cache.ResourceEventHandlerFuncs{},
) )
if allocateNodeCIDRs { if allocateNodeCIDRs {

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/errors"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@ -63,12 +62,12 @@ type PetSetController struct {
// podStoreSynced returns true if the pod store has synced at least once. // podStoreSynced returns true if the pod store has synced at least once.
podStoreSynced func() bool podStoreSynced func() bool
// Watches changes to all pods. // Watches changes to all pods.
podController framework.ControllerInterface podController cache.ControllerInterface
// A store of PetSets, populated by the psController. // A store of PetSets, populated by the psController.
psStore cache.StoreToPetSetLister psStore cache.StoreToPetSetLister
// Watches changes to all PetSets. // Watches changes to all PetSets.
psController *framework.Controller psController *cache.Controller
// A store of the 1 unhealthy pet blocking progress for a given ps // A store of the 1 unhealthy pet blocking progress for a given ps
blockingPetStore *unhealthyPetTracker blockingPetStore *unhealthyPetTracker
@ -82,7 +81,7 @@ type PetSetController struct {
} }
// NewPetSetController creates a new petset controller. // NewPetSetController creates a new petset controller.
func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController { func NewPetSetController(podInformer cache.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
@ -98,7 +97,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"),
} }
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
// lookup the petset and enqueue // lookup the petset and enqueue
AddFunc: psc.addPod, AddFunc: psc.addPod,
// lookup current and old petset if labels changed // lookup current and old petset if labels changed
@ -109,7 +108,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
psc.podStore.Indexer = podInformer.GetIndexer() psc.podStore.Indexer = podInformer.GetIndexer()
psc.podController = podInformer.GetController() psc.podController = podInformer.GetController()
psc.psStore.Store, psc.psController = framework.NewInformer( psc.psStore.Store, psc.psController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options) return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
@ -120,7 +119,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
}, },
&apps.PetSet{}, &apps.PetSet{},
petSetResyncPeriod, petSetResyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: psc.enqueuePetSet, AddFunc: psc.enqueuePetSet,
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
oldPS := old.(*apps.PetSet) oldPS := old.(*apps.PetSet)

View File

@ -33,7 +33,6 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@ -61,14 +60,14 @@ type HorizontalController struct {
// A store of HPA objects, populated by the controller. // A store of HPA objects, populated by the controller.
store cache.Store store cache.Store
// Watches changes to all HPA objects. // Watches changes to all HPA objects.
controller *framework.Controller controller *cache.Controller
} }
var downscaleForbiddenWindow = 5 * time.Minute var downscaleForbiddenWindow = 5 * time.Minute
var upscaleForbiddenWindow = 3 * time.Minute var upscaleForbiddenWindow = 3 * time.Minute
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *framework.Controller) { func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *cache.Controller) {
return framework.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options) return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options)
@ -79,7 +78,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
}, },
&autoscaling.HorizontalPodAutoscaler{}, &autoscaling.HorizontalPodAutoscaler{},
resyncPeriod, resyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
hpa := obj.(*autoscaling.HorizontalPodAutoscaler) hpa := obj.(*autoscaling.HorizontalPodAutoscaler)
hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -44,7 +43,7 @@ const (
type PodGCController struct { type PodGCController struct {
kubeClient clientset.Interface kubeClient clientset.Interface
podStore cache.StoreToPodLister podStore cache.StoreToPodLister
podStoreSyncer *framework.Controller podStoreSyncer *cache.Controller
deletePod func(namespace, name string) error deletePod func(namespace, name string) error
threshold int threshold int
} }
@ -63,7 +62,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown)) terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))
gcc.podStore.Indexer, gcc.podStoreSyncer = framework.NewIndexerInformer( gcc.podStore.Indexer, gcc.podStoreSyncer = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = terminatedSelector options.FieldSelector = terminatedSelector
@ -76,7 +75,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
}, },
&api.Pod{}, &api.Pod{},
resyncPeriod(), resyncPeriod(),
framework.ResourceEventHandlerFuncs{}, cache.ResourceEventHandlerFuncs{},
// We don't need to build a index for podStore here actually, but build one for consistency. // We don't need to build a index for podStore here actually, but build one for consistency.
// It will ensure that if people start making use of the podStore in more specific ways, // It will ensure that if people start making use of the podStore in more specific ways,
// they'll get the benefits they expect. It will also reserve the name for future refactorings. // they'll get the benefits they expect. It will also reserve the name for future refactorings.

View File

@ -36,8 +36,7 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilerrors "k8s.io/kubernetes/pkg/util/errors" utilerrors "k8s.io/kubernetes/pkg/util/errors"
@ -81,7 +80,7 @@ type ReplicaSetController struct {
// we have a personal informer, we must start it ourselves. If you start // we have a personal informer, we must start it ourselves. If you start
// the controller using NewReplicationManager(passing SharedInformer), this // the controller using NewReplicationManager(passing SharedInformer), this
// will be null // will be null
internalPodInformer framework.SharedIndexInformer internalPodInformer cache.SharedIndexInformer
// A ReplicaSet is temporarily suspended after creating/deleting these many replicas. // A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
// It resumes normal action after observing the watch events for them. // It resumes normal action after observing the watch events for them.
@ -95,11 +94,11 @@ type ReplicaSetController struct {
// A store of ReplicaSets, populated by the rsController // A store of ReplicaSets, populated by the rsController
rsStore cache.StoreToReplicaSetLister rsStore cache.StoreToReplicaSetLister
// Watches changes to all ReplicaSets // Watches changes to all ReplicaSets
rsController *framework.Controller rsController *cache.Controller
// A store of pods, populated by the podController // A store of pods, populated by the podController
podStore cache.StoreToPodLister podStore cache.StoreToPodLister
// Watches changes to all pods // Watches changes to all pods
podController framework.ControllerInterface podController cache.ControllerInterface
// podStoreSynced returns true if the pod store has been synced at least once. // podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing. // Added as a member to the struct to allow injection for testing.
podStoreSynced func() bool podStoreSynced func() bool
@ -115,7 +114,7 @@ type ReplicaSetController struct {
} }
// NewReplicaSetController creates a new ReplicaSetController. // NewReplicaSetController creates a new ReplicaSetController.
func NewReplicaSetController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController { func NewReplicaSetController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
@ -126,7 +125,7 @@ func NewReplicaSetController(podInformer framework.SharedIndexInformer, kubeClie
} }
// newReplicaSetController configures a replica set controller with the specified event recorder // newReplicaSetController configures a replica set controller with the specified event recorder
func newReplicaSetController(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController { func newReplicaSetController(eventRecorder record.EventRecorder, podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
} }
@ -143,7 +142,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
garbageCollectorEnabled: garbageCollectorEnabled, garbageCollectorEnabled: garbageCollectorEnabled,
} }
rsc.rsStore.Store, rsc.rsController = framework.NewInformer( rsc.rsStore.Store, rsc.rsController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options) return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
@ -155,7 +154,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
&extensions.ReplicaSet{}, &extensions.ReplicaSet{},
// TODO: Can we have much longer period here? // TODO: Can we have much longer period here?
FullControllerResyncPeriod, FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: rsc.enqueueReplicaSet, AddFunc: rsc.enqueueReplicaSet,
UpdateFunc: rsc.updateRS, UpdateFunc: rsc.updateRS,
// This will enter the sync loop and no-op, because the replica set has been deleted from the store. // This will enter the sync loop and no-op, because the replica set has been deleted from the store.
@ -165,7 +164,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
}, },
) )
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: rsc.addPod, AddFunc: rsc.addPod,
// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like // This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
// overkill the most frequent pod update is status, and the associated ReplicaSet will only list from // overkill the most frequent pod update is status, and the associated ReplicaSet will only list from

View File

@ -34,8 +34,7 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
@ -86,7 +85,7 @@ type ReplicationManager struct {
// we have a personal informer, we must start it ourselves. If you start // we have a personal informer, we must start it ourselves. If you start
// the controller using NewReplicationManager(passing SharedInformer), this // the controller using NewReplicationManager(passing SharedInformer), this
// will be null // will be null
internalPodInformer framework.SharedIndexInformer internalPodInformer cache.SharedIndexInformer
// An rc is temporarily suspended after creating/deleting these many replicas. // An rc is temporarily suspended after creating/deleting these many replicas.
// It resumes normal action after observing the watch events for them. // It resumes normal action after observing the watch events for them.
@ -100,11 +99,11 @@ type ReplicationManager struct {
// A store of replication controllers, populated by the rcController // A store of replication controllers, populated by the rcController
rcStore cache.StoreToReplicationControllerLister rcStore cache.StoreToReplicationControllerLister
// Watches changes to all replication controllers // Watches changes to all replication controllers
rcController *framework.Controller rcController *cache.Controller
// A store of pods, populated by the podController // A store of pods, populated by the podController
podStore cache.StoreToPodLister podStore cache.StoreToPodLister
// Watches changes to all pods // Watches changes to all pods
podController framework.ControllerInterface podController cache.ControllerInterface
// podStoreSynced returns true if the pod store has been synced at least once. // podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing. // Added as a member to the struct to allow injection for testing.
podStoreSynced func() bool podStoreSynced func() bool
@ -120,7 +119,7 @@ type ReplicationManager struct {
} }
// NewReplicationManager creates a replication manager // NewReplicationManager creates a replication manager
func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
@ -130,7 +129,7 @@ func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient
} }
// newReplicationManager configures a replication manager with the specified event recorder // newReplicationManager configures a replication manager with the specified event recorder
func newReplicationManager(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { func newReplicationManager(eventRecorder record.EventRecorder, podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
} }
@ -147,7 +146,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
garbageCollectorEnabled: garbageCollectorEnabled, garbageCollectorEnabled: garbageCollectorEnabled,
} }
rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer( rm.rcStore.Indexer, rm.rcController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
@ -159,7 +158,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
&api.ReplicationController{}, &api.ReplicationController{},
// TODO: Can we have much longer period here? // TODO: Can we have much longer period here?
FullControllerResyncPeriod, FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: rm.enqueueController, AddFunc: rm.enqueueController,
UpdateFunc: rm.updateRC, UpdateFunc: rm.updateRC,
// This will enter the sync loop and no-op, because the controller has been deleted from the store. // This will enter the sync loop and no-op, because the controller has been deleted from the store.
@ -170,7 +169,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: rm.addPod, AddFunc: rm.addPod,
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill // This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
// the most frequent pod update is status, and the associated rc will only list from local storage, so // the most frequent pod update is status, and the associated rc will only list from local storage, so

View File

@ -27,8 +27,7 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/quota/evaluator/core" "k8s.io/kubernetes/pkg/quota/evaluator/core"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@ -90,18 +89,18 @@ func ObjectReplenishmentDeleteFunc(options *ReplenishmentControllerOptions) func
type ReplenishmentControllerFactory interface { type ReplenishmentControllerFactory interface {
// NewController returns a controller configured with the specified options. // NewController returns a controller configured with the specified options.
// This method is NOT thread-safe. // This method is NOT thread-safe.
NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error)
} }
// replenishmentControllerFactory implements ReplenishmentControllerFactory // replenishmentControllerFactory implements ReplenishmentControllerFactory
type replenishmentControllerFactory struct { type replenishmentControllerFactory struct {
kubeClient clientset.Interface kubeClient clientset.Interface
podInformer framework.SharedInformer podInformer cache.SharedInformer
} }
// NewReplenishmentControllerFactory returns a factory that knows how to build controllers // NewReplenishmentControllerFactory returns a factory that knows how to build controllers
// to replenish resources when updated or deleted // to replenish resources when updated or deleted
func NewReplenishmentControllerFactory(podInformer framework.SharedInformer, kubeClient clientset.Interface) ReplenishmentControllerFactory { func NewReplenishmentControllerFactory(podInformer cache.SharedInformer, kubeClient clientset.Interface) ReplenishmentControllerFactory {
return &replenishmentControllerFactory{ return &replenishmentControllerFactory{
kubeClient: kubeClient, kubeClient: kubeClient,
podInformer: podInformer, podInformer: podInformer,
@ -112,8 +111,8 @@ func NewReplenishmentControllerFactoryFromClient(kubeClient clientset.Interface)
return NewReplenishmentControllerFactory(nil, kubeClient) return NewReplenishmentControllerFactory(nil, kubeClient)
} }
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) { func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error) {
var result framework.ControllerInterface var result cache.ControllerInterface
if r.kubeClient != nil && r.kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { if r.kubeClient != nil && r.kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replenishment_controller", r.kubeClient.Core().GetRESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("replenishment_controller", r.kubeClient.Core().GetRESTClient().GetRateLimiter())
} }
@ -121,7 +120,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
switch options.GroupKind { switch options.GroupKind {
case api.Kind("Pod"): case api.Kind("Pod"):
if r.podInformer != nil { if r.podInformer != nil {
r.podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ r.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: PodReplenishmentUpdateFunc(options), UpdateFunc: PodReplenishmentUpdateFunc(options),
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
}) })
@ -133,7 +132,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
result = r.podInformer result = r.podInformer
case api.Kind("Service"): case api.Kind("Service"):
_, result = framework.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().Services(api.NamespaceAll).List(options) return r.kubeClient.Core().Services(api.NamespaceAll).List(options)
@ -144,13 +143,13 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
}, },
&api.Service{}, &api.Service{},
options.ResyncPeriod(), options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
UpdateFunc: ServiceReplenishmentUpdateFunc(options), UpdateFunc: ServiceReplenishmentUpdateFunc(options),
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
}, },
) )
case api.Kind("ReplicationController"): case api.Kind("ReplicationController"):
_, result = framework.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
@ -161,12 +160,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
}, },
&api.ReplicationController{}, &api.ReplicationController{},
options.ResyncPeriod(), options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
}, },
) )
case api.Kind("PersistentVolumeClaim"): case api.Kind("PersistentVolumeClaim"):
_, result = framework.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
@ -177,12 +176,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
}, },
&api.PersistentVolumeClaim{}, &api.PersistentVolumeClaim{},
options.ResyncPeriod(), options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
}, },
) )
case api.Kind("Secret"): case api.Kind("Secret"):
_, result = framework.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().Secrets(api.NamespaceAll).List(options) return r.kubeClient.Core().Secrets(api.NamespaceAll).List(options)
@ -193,12 +192,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
}, },
&api.Secret{}, &api.Secret{},
options.ResyncPeriod(), options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
}, },
) )
case api.Kind("ConfigMap"): case api.Kind("ConfigMap"):
_, result = framework.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).List(options) return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).List(options)
@ -209,7 +208,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
}, },
&api.ConfigMap{}, &api.ConfigMap{},
options.ResyncPeriod(), options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
}, },
) )
@ -254,7 +253,7 @@ func IsUnhandledGroupKindError(err error) bool {
// returning the first success or failure it hits. If there are no hits either way, it return an UnhandledGroupKind error // returning the first success or failure it hits. If there are no hits either way, it return an UnhandledGroupKind error
type UnionReplenishmentControllerFactory []ReplenishmentControllerFactory type UnionReplenishmentControllerFactory []ReplenishmentControllerFactory
func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) { func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error) {
for _, factory := range f { for _, factory := range f {
controller, err := factory.NewController(options) controller, err := factory.NewController(options)
if !IsUnhandledGroupKindError(err) { if !IsUnhandledGroupKindError(err) {

View File

@ -26,7 +26,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@ -60,7 +59,7 @@ type ResourceQuotaController struct {
// An index of resource quota objects by namespace // An index of resource quota objects by namespace
rqIndexer cache.Indexer rqIndexer cache.Indexer
// Watches changes to all resource quota // Watches changes to all resource quota
rqController *framework.Controller rqController *cache.Controller
// ResourceQuota objects that need to be synchronized // ResourceQuota objects that need to be synchronized
queue workqueue.RateLimitingInterface queue workqueue.RateLimitingInterface
// missingUsageQueue holds objects that are missing the initial usage informatino // missingUsageQueue holds objects that are missing the initial usage informatino
@ -72,7 +71,7 @@ type ResourceQuotaController struct {
// knows how to calculate usage // knows how to calculate usage
registry quota.Registry registry quota.Registry
// controllers monitoring to notify for replenishment // controllers monitoring to notify for replenishment
replenishmentControllers []framework.ControllerInterface replenishmentControllers []cache.ControllerInterface
} }
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController { func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController {
@ -83,7 +82,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"), missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"),
resyncPeriod: options.ResyncPeriod, resyncPeriod: options.ResyncPeriod,
registry: options.Registry, registry: options.Registry,
replenishmentControllers: []framework.ControllerInterface{}, replenishmentControllers: []cache.ControllerInterface{},
} }
if options.KubeClient != nil && options.KubeClient.Core().GetRESTClient().GetRateLimiter() != nil { if options.KubeClient != nil && options.KubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().GetRESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().GetRESTClient().GetRateLimiter())
@ -92,7 +91,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
rq.syncHandler = rq.syncResourceQuotaFromKey rq.syncHandler = rq.syncResourceQuotaFromKey
// build the controller that observes quota // build the controller that observes quota
rq.rqIndexer, rq.rqController = framework.NewIndexerInformer( rq.rqIndexer, rq.rqController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options) return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options)
@ -103,7 +102,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
}, },
&api.ResourceQuota{}, &api.ResourceQuota{},
rq.resyncPeriod(), rq.resyncPeriod(),
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: rq.addQuota, AddFunc: rq.addQuota,
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
// We are only interested in observing updates to quota.spec to drive updates to quota.status. // We are only interested in observing updates to quota.spec to drive updates to quota.status.

View File

@ -80,7 +80,7 @@ func (rc *RouteController) reconcileNodeRoutes() error {
if err != nil { if err != nil {
return fmt.Errorf("error listing routes: %v", err) return fmt.Errorf("error listing routes: %v", err)
} }
// TODO (cjcullen): use pkg/controller/framework.NewInformer to watch this // TODO (cjcullen): use pkg/controller/cache.NewInformer to watch this
// and reduce the number of lists needed. // and reduce the number of lists needed.
nodeList, err := rc.kubeClient.Core().Nodes().List(api.ListOptions{}) nodeList, err := rc.kubeClient.Core().Nodes().List(api.ListOptions{})
if err != nil { if err != nil {

View File

@ -33,7 +33,6 @@ import (
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@ -88,7 +87,7 @@ type ServiceController struct {
// A store of services, populated by the serviceController // A store of services, populated by the serviceController
serviceStore cache.StoreToServiceLister serviceStore cache.StoreToServiceLister
// Watches changes to all services // Watches changes to all services
serviceController *framework.Controller serviceController *cache.Controller
eventBroadcaster record.EventBroadcaster eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder eventRecorder record.EventRecorder
nodeLister cache.StoreToNodeLister nodeLister cache.StoreToNodeLister
@ -120,7 +119,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
}, },
workingQueue: workqueue.NewDelayingQueue(), workingQueue: workqueue.NewDelayingQueue(),
} }
s.serviceStore.Store, s.serviceController = framework.NewInformer( s.serviceStore.Store, s.serviceController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return s.kubeClient.Core().Services(api.NamespaceAll).List(options) return s.kubeClient.Core().Services(api.NamespaceAll).List(options)
@ -131,7 +130,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
}, },
&api.Service{}, &api.Service{},
serviceSyncPeriod, serviceSyncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: s.enqueueService, AddFunc: s.enqueueService,
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
oldSvc, ok1 := old.(*api.Service) oldSvc, ok1 := old.(*api.Service)

View File

@ -26,7 +26,6 @@ import (
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@ -80,7 +79,7 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
// If we're maintaining a single account, we can scope the accounts we watch to just that name // If we're maintaining a single account, we can scope the accounts we watch to just that name
accountSelector = fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ServiceAccounts[0].Name}) accountSelector = fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ServiceAccounts[0].Name})
} }
e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer( e.serviceAccounts, e.serviceAccountController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = accountSelector options.FieldSelector = accountSelector
@ -93,13 +92,13 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
}, },
&api.ServiceAccount{}, &api.ServiceAccount{},
options.ServiceAccountResync, options.ServiceAccountResync,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: e.serviceAccountDeleted, DeleteFunc: e.serviceAccountDeleted,
}, },
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
) )
e.namespaces, e.namespaceController = framework.NewIndexerInformer( e.namespaces, e.namespaceController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Namespaces().List(options) return e.client.Core().Namespaces().List(options)
@ -110,7 +109,7 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
}, },
&api.Namespace{}, &api.Namespace{},
options.NamespaceResync, options.NamespaceResync,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: e.namespaceAdded, AddFunc: e.namespaceAdded,
UpdateFunc: e.namespaceUpdated, UpdateFunc: e.namespaceUpdated,
}, },
@ -131,8 +130,8 @@ type ServiceAccountsController struct {
namespaces cache.Indexer namespaces cache.Indexer
// Since we join two objects, we'll watch both of them with controllers. // Since we join two objects, we'll watch both of them with controllers.
serviceAccountController *framework.Controller serviceAccountController *cache.Controller
namespaceController *framework.Controller namespaceController *cache.Controller
} }
// Runs controller loops and returns immediately // Runs controller loops and returns immediately

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/registry/secret" "k8s.io/kubernetes/pkg/registry/secret"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -90,7 +89,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter())
} }
e.serviceAccounts, e.serviceAccountController = framework.NewInformer( e.serviceAccounts, e.serviceAccountController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options) return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
@ -101,7 +100,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
}, },
&api.ServiceAccount{}, &api.ServiceAccount{},
options.ServiceAccountResync, options.ServiceAccountResync,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: e.queueServiceAccountSync, AddFunc: e.queueServiceAccountSync,
UpdateFunc: e.queueServiceAccountUpdateSync, UpdateFunc: e.queueServiceAccountUpdateSync,
DeleteFunc: e.queueServiceAccountSync, DeleteFunc: e.queueServiceAccountSync,
@ -109,7 +108,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
) )
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
e.secrets, e.secretController = framework.NewIndexerInformer( e.secrets, e.secretController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = tokenSelector options.FieldSelector = tokenSelector
@ -122,7 +121,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
}, },
&api.Secret{}, &api.Secret{},
options.SecretResync, options.SecretResync,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: e.queueSecretSync, AddFunc: e.queueSecretSync,
UpdateFunc: e.queueSecretUpdateSync, UpdateFunc: e.queueSecretUpdateSync,
DeleteFunc: e.queueSecretSync, DeleteFunc: e.queueSecretSync,
@ -144,8 +143,8 @@ type TokensController struct {
secrets cache.Indexer secrets cache.Indexer
// Since we join two objects, we'll watch both of them with controllers. // Since we join two objects, we'll watch both of them with controllers.
serviceAccountController *framework.Controller serviceAccountController *cache.Controller
secretController *framework.Controller secretController *cache.Controller
// syncServiceAccountQueue handles service account events: // syncServiceAccountQueue handles service account events:
// * ensures a referenced token exists for service accounts which still exist // * ensures a referenced token exists for service accounts which still exist

View File

@ -25,10 +25,10 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler"
@ -66,10 +66,10 @@ type AttachDetachController interface {
// NewAttachDetachController returns a new instance of AttachDetachController. // NewAttachDetachController returns a new instance of AttachDetachController.
func NewAttachDetachController( func NewAttachDetachController(
kubeClient internalclientset.Interface, kubeClient internalclientset.Interface,
podInformer framework.SharedInformer, podInformer kcache.SharedInformer,
nodeInformer framework.SharedInformer, nodeInformer kcache.SharedInformer,
pvcInformer framework.SharedInformer, pvcInformer kcache.SharedInformer,
pvInformer framework.SharedInformer, pvInformer kcache.SharedInformer,
cloud cloudprovider.Interface, cloud cloudprovider.Interface,
plugins []volume.VolumePlugin, plugins []volume.VolumePlugin,
recorder record.EventRecorder) (AttachDetachController, error) { recorder record.EventRecorder) (AttachDetachController, error) {
@ -94,13 +94,13 @@ func NewAttachDetachController(
cloud: cloud, cloud: cloud,
} }
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ podInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: adc.podAdd, AddFunc: adc.podAdd,
UpdateFunc: adc.podUpdate, UpdateFunc: adc.podUpdate,
DeleteFunc: adc.podDelete, DeleteFunc: adc.podDelete,
}) })
nodeInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ nodeInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: adc.nodeAdd, AddFunc: adc.nodeAdd,
UpdateFunc: adc.nodeUpdate, UpdateFunc: adc.nodeUpdate,
DeleteFunc: adc.nodeDelete, DeleteFunc: adc.nodeDelete,
@ -143,12 +143,12 @@ type attachDetachController struct {
// pvcInformer is the shared PVC informer used to fetch and store PVC // pvcInformer is the shared PVC informer used to fetch and store PVC
// objects from the API server. It is shared with other controllers and // objects from the API server. It is shared with other controllers and
// therefore the PVC objects in its store should be treated as immutable. // therefore the PVC objects in its store should be treated as immutable.
pvcInformer framework.SharedInformer pvcInformer kcache.SharedInformer
// pvInformer is the shared PV informer used to fetch and store PV objects // pvInformer is the shared PV informer used to fetch and store PV objects
// from the API server. It is shared with other controllers and therefore // from the API server. It is shared with other controllers and therefore
// the PV objects in its store should be treated as immutable. // the PV objects in its store should be treated as immutable.
pvInformer framework.SharedInformer pvInformer kcache.SharedInformer
// cloud provider used by volume host // cloud provider used by volume host
cloud cloudprovider.Interface cloud cloudprovider.Interface

View File

@ -21,7 +21,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/controller/informers"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
) )

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
kcache "k8s.io/kubernetes/pkg/client/cache" kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/pkg/volume/util/volumehelper"
@ -48,7 +47,7 @@ type DesiredStateOfWorldPopulator interface {
// desiredStateOfWorld - the cache to populate // desiredStateOfWorld - the cache to populate
func NewDesiredStateOfWorldPopulator( func NewDesiredStateOfWorldPopulator(
loopSleepDuration time.Duration, loopSleepDuration time.Duration,
podInformer framework.SharedInformer, podInformer kcache.SharedInformer,
desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator { desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator {
return &desiredStateOfWorldPopulator{ return &desiredStateOfWorldPopulator{
loopSleepDuration: loopSleepDuration, loopSleepDuration: loopSleepDuration,
@ -59,7 +58,7 @@ func NewDesiredStateOfWorldPopulator(
type desiredStateOfWorldPopulator struct { type desiredStateOfWorldPopulator struct {
loopSleepDuration time.Duration loopSleepDuration time.Duration
podInformer framework.SharedInformer podInformer kcache.SharedInformer
desiredStateOfWorld cache.DesiredStateOfWorld desiredStateOfWorld cache.DesiredStateOfWorld
} }

View File

@ -22,7 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"

View File

@ -25,8 +25,8 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/util/strategicpatch" "k8s.io/kubernetes/pkg/util/strategicpatch"
) )
@ -42,7 +42,7 @@ type NodeStatusUpdater interface {
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater. // NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
func NewNodeStatusUpdater( func NewNodeStatusUpdater(
kubeClient internalclientset.Interface, kubeClient internalclientset.Interface,
nodeInformer framework.SharedInformer, nodeInformer kcache.SharedInformer,
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater { actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
return &nodeStatusUpdater{ return &nodeStatusUpdater{
actualStateOfWorld: actualStateOfWorld, actualStateOfWorld: actualStateOfWorld,
@ -53,7 +53,7 @@ func NewNodeStatusUpdater(
type nodeStatusUpdater struct { type nodeStatusUpdater struct {
kubeClient internalclientset.Interface kubeClient internalclientset.Interface
nodeInformer framework.SharedInformer nodeInformer kcache.SharedInformer
actualStateOfWorld cache.ActualStateOfWorld actualStateOfWorld cache.ActualStateOfWorld
} }

View File

@ -29,7 +29,6 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/util/goroutinemap" "k8s.io/kubernetes/pkg/util/goroutinemap"
vol "k8s.io/kubernetes/pkg/volume" vol "k8s.io/kubernetes/pkg/volume"
@ -151,12 +150,12 @@ const createProvisionedPVInterval = 10 * time.Second
// PersistentVolumeController is a controller that synchronizes // PersistentVolumeController is a controller that synchronizes
// PersistentVolumeClaims and PersistentVolumes. It starts two // PersistentVolumeClaims and PersistentVolumes. It starts two
// framework.Controllers that watch PersistentVolume and PersistentVolumeClaim // cache.Controllers that watch PersistentVolume and PersistentVolumeClaim
// changes. // changes.
type PersistentVolumeController struct { type PersistentVolumeController struct {
volumeController *framework.Controller volumeController *cache.Controller
volumeSource cache.ListerWatcher volumeSource cache.ListerWatcher
claimController *framework.Controller claimController *cache.Controller
claimSource cache.ListerWatcher claimSource cache.ListerWatcher
classReflector *cache.Reflector classReflector *cache.Reflector
classSource cache.ListerWatcher classSource cache.ListerWatcher
@ -192,7 +191,7 @@ type PersistentVolumeController struct {
} }
// syncClaim is the main controller method to decide what to do with a claim. // syncClaim is the main controller method to decide what to do with a claim.
// It's invoked by appropriate framework.Controller callbacks when a claim is // It's invoked by appropriate cache.Controller callbacks when a claim is
// created, updated or periodically synced. We do not differentiate between // created, updated or periodically synced. We do not differentiate between
// these events. // these events.
// For easier readability, it was split into syncUnboundClaim and syncBoundClaim // For easier readability, it was split into syncUnboundClaim and syncBoundClaim
@ -382,7 +381,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu
} }
// syncVolume is the main controller method to decide what to do with a volume. // syncVolume is the main controller method to decide what to do with a volume.
// It's invoked by appropriate framework.Controller callbacks when a volume is // It's invoked by appropriate cache.Controller callbacks when a volume is
// created, updated or periodically synced. We do not differentiate between // created, updated or periodically synced. We do not differentiate between
// these events. // these events.
func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error { func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error {

View File

@ -30,7 +30,6 @@ import (
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/goroutinemap" "k8s.io/kubernetes/pkg/util/goroutinemap"
@ -65,7 +64,7 @@ func NewPersistentVolumeController(
controller := &PersistentVolumeController{ controller := &PersistentVolumeController{
volumes: newPersistentVolumeOrderedIndex(), volumes: newPersistentVolumeOrderedIndex(),
claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc), claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
kubeClient: kubeClient, kubeClient: kubeClient,
eventRecorder: eventRecorder, eventRecorder: eventRecorder,
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */), runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
@ -120,22 +119,22 @@ func NewPersistentVolumeController(
} }
controller.classSource = classSource controller.classSource = classSource
_, controller.volumeController = framework.NewIndexerInformer( _, controller.volumeController = cache.NewIndexerInformer(
volumeSource, volumeSource,
&api.PersistentVolume{}, &api.PersistentVolume{},
syncPeriod, syncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: controller.addVolume, AddFunc: controller.addVolume,
UpdateFunc: controller.updateVolume, UpdateFunc: controller.updateVolume,
DeleteFunc: controller.deleteVolume, DeleteFunc: controller.deleteVolume,
}, },
cache.Indexers{"accessmodes": accessModesIndexFunc}, cache.Indexers{"accessmodes": accessModesIndexFunc},
) )
_, controller.claimController = framework.NewInformer( _, controller.claimController = cache.NewInformer(
claimSource, claimSource,
&api.PersistentVolumeClaim{}, &api.PersistentVolumeClaim{},
syncPeriod, syncPeriod,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: controller.addClaim, AddFunc: controller.addClaim,
UpdateFunc: controller.updateClaim, UpdateFunc: controller.updateClaim,
DeleteFunc: controller.deleteClaim, DeleteFunc: controller.deleteClaim,
@ -144,7 +143,7 @@ func NewPersistentVolumeController(
// This is just a cache of StorageClass instances, no special actions are // This is just a cache of StorageClass instances, no special actions are
// needed when a class is created/deleted/updated. // needed when a class is created/deleted/updated.
controller.classes = cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc) controller.classes = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
controller.classReflector = cache.NewReflector( controller.classReflector = cache.NewReflector(
classSource, classSource,
&storage.StorageClass{}, &storage.StorageClass{},
@ -212,7 +211,7 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim *api.PersistentVo
return storeObjectUpdate(ctrl.claims, claim, "claim") return storeObjectUpdate(ctrl.claims, claim, "claim")
} }
// addVolume is callback from framework.Controller watching PersistentVolume // addVolume is callback from cache.Controller watching PersistentVolume
// events. // events.
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
pv, ok := obj.(*api.PersistentVolume) pv, ok := obj.(*api.PersistentVolume)
@ -247,7 +246,7 @@ func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
} }
} }
// updateVolume is callback from framework.Controller watching PersistentVolume // updateVolume is callback from cache.Controller watching PersistentVolume
// events. // events.
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) { func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
newVolume, ok := newObj.(*api.PersistentVolume) newVolume, ok := newObj.(*api.PersistentVolume)
@ -282,7 +281,7 @@ func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{})
} }
} }
// deleteVolume is callback from framework.Controller watching PersistentVolume // deleteVolume is callback from cache.Controller watching PersistentVolume
// events. // events.
func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
_ = ctrl.volumes.store.Delete(obj) _ = ctrl.volumes.store.Delete(obj)
@ -330,7 +329,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
} }
} }
// addClaim is callback from framework.Controller watching PersistentVolumeClaim // addClaim is callback from cache.Controller watching PersistentVolumeClaim
// events. // events.
func (ctrl *PersistentVolumeController) addClaim(obj interface{}) { func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
// Store the new claim version in the cache and do not process it if this is // Store the new claim version in the cache and do not process it if this is
@ -360,7 +359,7 @@ func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
} }
} }
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim // updateClaim is callback from cache.Controller watching PersistentVolumeClaim
// events. // events.
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) { func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
// Store the new claim version in the cache and do not process it if this is // Store the new claim version in the cache and do not process it if this is
@ -390,7 +389,7 @@ func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{})
} }
} }
// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim // deleteClaim is callback from cache.Controller watching PersistentVolumeClaim
// events. // events.
func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) {
_ = ctrl.claims.Delete(obj) _ = ctrl.claims.Delete(obj)

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/controller/framework" fcache "k8s.io/kubernetes/pkg/client/testing/cache"
) )
// Test the real controller methods (add/update/delete claim/volume) with // Test the real controller methods (add/update/delete claim/volume) with
@ -161,8 +161,8 @@ func TestControllerSync(t *testing.T) {
// Initialize the controller // Initialize the controller
client := &fake.Clientset{} client := &fake.Clientset{}
volumeSource := framework.NewFakePVControllerSource() volumeSource := fcache.NewFakePVControllerSource()
claimSource := framework.NewFakePVCControllerSource() claimSource := fcache.NewFakePVCControllerSource()
ctrl := newTestController(client, volumeSource, claimSource, nil, true) ctrl := newTestController(client, volumeSource, claimSource, nil, true)
reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors) reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors)
for _, claim := range test.initialClaims { for _, claim := range test.initialClaims {
@ -247,7 +247,7 @@ func storeVersion(t *testing.T, prefix string, c cache.Store, version string, ex
// TestControllerCache tests func storeObjectUpdate() // TestControllerCache tests func storeObjectUpdate()
func TestControllerCache(t *testing.T) { func TestControllerCache(t *testing.T) {
// Cache under test // Cache under test
c := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc) c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// Store new PV // Store new PV
storeVersion(t, "Step1", c, "1", true) storeVersion(t, "Step1", c, "1", true)
@ -264,7 +264,7 @@ func TestControllerCache(t *testing.T) {
} }
func TestControllerCacheParsingError(t *testing.T) { func TestControllerCacheParsingError(t *testing.T) {
c := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc) c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// There must be something in the cache to compare with // There must be something in the cache to compare with
storeVersion(t, "Step1", c, "1", true) storeVersion(t, "Step1", c, "1", true)

View File

@ -38,8 +38,8 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
fcache "k8s.io/kubernetes/pkg/client/testing/cache"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
@ -125,8 +125,8 @@ type volumeReactor struct {
changedObjects []interface{} changedObjects []interface{}
changedSinceLastSync int changedSinceLastSync int
ctrl *PersistentVolumeController ctrl *PersistentVolumeController
volumeSource *framework.FakePVControllerSource volumeSource *fcache.FakePVControllerSource
claimSource *framework.FakePVCControllerSource claimSource *fcache.FakePVCControllerSource
lock sync.Mutex lock sync.Mutex
errors []reactorError errors []reactorError
} }
@ -571,7 +571,7 @@ func (r *volumeReactor) addClaimEvent(claim *api.PersistentVolumeClaim) {
r.claimSource.Add(claim) r.claimSource.Add(claim)
} }
func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource *framework.FakePVControllerSource, claimSource *framework.FakePVCControllerSource, errors []reactorError) *volumeReactor { func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource *fcache.FakePVControllerSource, claimSource *fcache.FakePVCControllerSource, errors []reactorError) *volumeReactor {
reactor := &volumeReactor{ reactor := &volumeReactor{
volumes: make(map[string]*api.PersistentVolume), volumes: make(map[string]*api.PersistentVolume),
claims: make(map[string]*api.PersistentVolumeClaim), claims: make(map[string]*api.PersistentVolumeClaim),
@ -586,13 +586,13 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController,
func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController { func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController {
if volumeSource == nil { if volumeSource == nil {
volumeSource = framework.NewFakePVControllerSource() volumeSource = fcache.NewFakePVControllerSource()
} }
if claimSource == nil { if claimSource == nil {
claimSource = framework.NewFakePVCControllerSource() claimSource = fcache.NewFakePVCControllerSource()
} }
if classSource == nil { if classSource == nil {
classSource = framework.NewFakeControllerSource() classSource = fcache.NewFakeControllerSource()
} }
ctrl := NewPersistentVolumeController( ctrl := NewPersistentVolumeController(
kubeClient, kubeClient,

View File

@ -33,7 +33,6 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
kcache "k8s.io/kubernetes/pkg/client/cache" kcache "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
kframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/validation" "k8s.io/kubernetes/pkg/util/validation"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -112,10 +111,10 @@ type KubeDNS struct {
domainPath []string domainPath []string
// endpointsController invokes registered callbacks when endpoints change. // endpointsController invokes registered callbacks when endpoints change.
endpointsController *kframework.Controller endpointsController *kcache.Controller
// serviceController invokes registered callbacks when services change. // serviceController invokes registered callbacks when services change.
serviceController *kframework.Controller serviceController *kcache.Controller
// Map of federation names that the cluster in which this kube-dns is running belongs to, to // Map of federation names that the cluster in which this kube-dns is running belongs to, to
// the corresponding domain names. // the corresponding domain names.
@ -188,7 +187,7 @@ func (kd *KubeDNS) GetCacheAsJSON() (string, error) {
func (kd *KubeDNS) setServicesStore() { func (kd *KubeDNS) setServicesStore() {
// Returns a cache.ListWatch that gets all changes to services. // Returns a cache.ListWatch that gets all changes to services.
kd.servicesStore, kd.serviceController = kframework.NewInformer( kd.servicesStore, kd.serviceController = kcache.NewInformer(
&kcache.ListWatch{ &kcache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return kd.kubeClient.Core().Services(kapi.NamespaceAll).List(options) return kd.kubeClient.Core().Services(kapi.NamespaceAll).List(options)
@ -199,7 +198,7 @@ func (kd *KubeDNS) setServicesStore() {
}, },
&kapi.Service{}, &kapi.Service{},
resyncPeriod, resyncPeriod,
kframework.ResourceEventHandlerFuncs{ kcache.ResourceEventHandlerFuncs{
AddFunc: kd.newService, AddFunc: kd.newService,
DeleteFunc: kd.removeService, DeleteFunc: kd.removeService,
UpdateFunc: kd.updateService, UpdateFunc: kd.updateService,
@ -209,7 +208,7 @@ func (kd *KubeDNS) setServicesStore() {
func (kd *KubeDNS) setEndpointsStore() { func (kd *KubeDNS) setEndpointsStore() {
// Returns a cache.ListWatch that gets all changes to endpoints. // Returns a cache.ListWatch that gets all changes to endpoints.
kd.endpointsStore, kd.endpointsController = kframework.NewInformer( kd.endpointsStore, kd.endpointsController = kcache.NewInformer(
&kcache.ListWatch{ &kcache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return kd.kubeClient.Core().Endpoints(kapi.NamespaceAll).List(options) return kd.kubeClient.Core().Endpoints(kapi.NamespaceAll).List(options)
@ -220,7 +219,7 @@ func (kd *KubeDNS) setEndpointsStore() {
}, },
&kapi.Endpoints{}, &kapi.Endpoints{},
resyncPeriod, resyncPeriod,
kframework.ResourceEventHandlerFuncs{ kcache.ResourceEventHandlerFuncs{
AddFunc: kd.handleEndpointAdd, AddFunc: kd.handleEndpointAdd,
UpdateFunc: func(oldObj, newObj interface{}) { UpdateFunc: func(oldObj, newObj interface{}) {
// TODO: Avoid unwanted updates. // TODO: Avoid unwanted updates.

View File

@ -21,13 +21,13 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/golang-lru"
"k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"github.com/hashicorp/golang-lru"
) )
func getResourceList(cpu, memory string) api.ResourceList { func getResourceList(cpu, memory string) api.ResourceList {

View File

@ -19,6 +19,7 @@ package autoprovision
import ( import (
"io" "io"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"fmt" "fmt"
@ -26,8 +27,7 @@ import (
"k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
) )
func init() { func init() {
@ -42,7 +42,7 @@ func init() {
type provision struct { type provision struct {
*admission.Handler *admission.Handler
client clientset.Interface client clientset.Interface
namespaceInformer framework.SharedIndexInformer namespaceInformer cache.SharedIndexInformer
} }
var _ = admission.WantsInformerFactory(&provision{}) var _ = admission.WantsInformerFactory(&provision{})

View File

@ -28,7 +28,7 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
) )

View File

@ -19,6 +19,7 @@ package exists
import ( import (
"io" "io"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"fmt" "fmt"
@ -26,8 +27,7 @@ import (
"k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
) )
func init() { func init() {
@ -42,7 +42,7 @@ func init() {
type exists struct { type exists struct {
*admission.Handler *admission.Handler
client clientset.Interface client clientset.Interface
namespaceInformer framework.SharedIndexInformer namespaceInformer cache.SharedIndexInformer
} }
var _ = admission.WantsInformerFactory(&exists{}) var _ = admission.WantsInformerFactory(&exists{})

View File

@ -27,7 +27,7 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
) )

View File

@ -23,9 +23,9 @@ import (
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
@ -52,7 +52,7 @@ type lifecycle struct {
*admission.Handler *admission.Handler
client clientset.Interface client clientset.Interface
immortalNamespaces sets.String immortalNamespaces sets.String
namespaceInformer framework.SharedIndexInformer namespaceInformer cache.SharedIndexInformer
// forceLiveLookupCache holds a list of entries for namespaces that we have a strong reason to believe are stale in our local cache. // forceLiveLookupCache holds a list of entries for namespaces that we have a strong reason to believe are stale in our local cache.
// if a namespace is in this cache, then we will ignore our local state and always fetch latest from api server. // if a namespace is in this cache, then we will ignore our local state and always fetch latest from api server.
forceLiveLookupCache *lru.Cache forceLiveLookupCache *lru.Cache

View File

@ -27,7 +27,7 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"

View File

@ -18,6 +18,7 @@ package predicates
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
) )

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/runtime"
@ -77,8 +76,8 @@ type ConfigFactory struct {
// Close this to stop all reflectors // Close this to stop all reflectors
StopEverything chan struct{} StopEverything chan struct{}
scheduledPodPopulator *framework.Controller scheduledPodPopulator *cache.Controller
nodePopulator *framework.Controller nodePopulator *cache.Controller
schedulerCache schedulercache.Cache schedulerCache schedulercache.Cache
@ -125,11 +124,11 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
// We construct this here instead of in CreateFromKeys because // We construct this here instead of in CreateFromKeys because
// ScheduledPodLister is something we provide to plug in functions that // ScheduledPodLister is something we provide to plug in functions that
// they may need to call. // they may need to call.
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = framework.NewIndexerInformer( c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = cache.NewIndexerInformer(
c.createAssignedNonTerminatedPodLW(), c.createAssignedNonTerminatedPodLW(),
&api.Pod{}, &api.Pod{},
0, 0,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: c.addPodToCache, AddFunc: c.addPodToCache,
UpdateFunc: c.updatePodInCache, UpdateFunc: c.updatePodInCache,
DeleteFunc: c.deletePodFromCache, DeleteFunc: c.deletePodFromCache,
@ -137,11 +136,11 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
c.NodeLister.Store, c.nodePopulator = framework.NewInformer( c.NodeLister.Store, c.nodePopulator = cache.NewInformer(
c.createNodeLW(), c.createNodeLW(),
&api.Node{}, &api.Node{},
0, 0,
framework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: c.addNodeToCache, AddFunc: c.addNodeToCache,
UpdateFunc: c.updateNodeInCache, UpdateFunc: c.updateNodeInCache,
DeleteFunc: c.deleteNodeFromCache, DeleteFunc: c.deleteNodeFromCache,

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -192,7 +191,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc) existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
var ns string var ns string
var config framework.RCConfig var config framework.RCConfig
var controller *controllerframework.Controller var controller *cache.Controller
var newPods cache.Store var newPods cache.Store
var stopCh chan struct{} var stopCh chan struct{}
var tracker *podTracker var tracker *podTracker
@ -217,7 +216,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
stopCh = make(chan struct{}) stopCh = make(chan struct{})
tracker = newPodTracker() tracker = newPodTracker()
newPods, controller = controllerframework.NewInformer( newPods, controller = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector
@ -230,7 +229,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
}, },
&api.Pod{}, &api.Pod{},
0, 0,
controllerframework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
tracker.remember(obj.(*api.Pod), ADD) tracker.remember(obj.(*api.Pod), ADD)
}, },

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -188,7 +187,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
// eLock is a lock protects the events // eLock is a lock protects the events
var eLock sync.Mutex var eLock sync.Mutex
events := make([](*api.Event), 0) events := make([](*api.Event), 0)
_, controller := controllerframework.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dtc.Client.Events(dtc.Namespace).List(options) return dtc.Client.Events(dtc.Namespace).List(options)
@ -199,7 +198,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
}, },
&api.Event{}, &api.Event{},
0, 0,
controllerframework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
eLock.Lock() eLock.Lock()
defer eLock.Unlock() defer eLock.Unlock()
@ -215,7 +214,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
var uLock sync.Mutex var uLock sync.Mutex
updateCount := 0 updateCount := 0
label := labels.SelectorFromSet(labels.Set(map[string]string{"type": "densityPod"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"type": "densityPod"}))
_, updateController := controllerframework.NewInformer( _, updateController := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label options.LabelSelector = label
@ -228,7 +227,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
}, },
&api.Pod{}, &api.Pod{},
0, 0,
controllerframework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, _ interface{}) { UpdateFunc: func(_, _ interface{}) {
uLock.Lock() uLock.Lock()
defer uLock.Unlock() defer uLock.Unlock()
@ -533,7 +532,7 @@ var _ = framework.KubeDescribe("Density", func() {
} }
additionalPodsPrefix = "density-latency-pod" additionalPodsPrefix = "density-latency-pod"
latencyPodsStore, controller := controllerframework.NewInformer( latencyPodsStore, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}) options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
@ -546,7 +545,7 @@ var _ = framework.KubeDescribe("Density", func() {
}, },
&api.Pod{}, &api.Pod{},
0, 0,
controllerframework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod) p, ok := obj.(*api.Pod)
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))

View File

@ -18,14 +18,15 @@ package e2e
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
"os" "os"
"reflect" "reflect"
"strconv" "strconv"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
) )

View File

@ -29,10 +29,11 @@ import (
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"reflect"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"reflect"
) )
const ( const (

View File

@ -37,7 +37,6 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@ -580,8 +579,8 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name) nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
stopCh := make(chan struct{}) stopCh := make(chan struct{})
newNode := make(chan *api.Node) newNode := make(chan *api.Node)
var controller *controllerframework.Controller var controller *cache.Controller
_, controller = controllerframework.NewInformer( _, controller = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector options.FieldSelector = nodeSelector
@ -594,7 +593,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
}, },
&api.Node{}, &api.Node{},
0, 0,
controllerframework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) { UpdateFunc: func(oldObj, newObj interface{}) {
n, ok := newObj.(*api.Node) n, ok := newObj.(*api.Node)
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -275,7 +274,7 @@ func (eq *endpointQueries) added(e *api.Endpoints) {
// blocks until it has finished syncing. // blocks until it has finished syncing.
func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
_, controller := controllerframework.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return f.Client.Endpoints(f.Namespace.Name).List(options) return f.Client.Endpoints(f.Namespace.Name).List(options)
@ -286,7 +285,7 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
}, },
&api.Endpoints{}, &api.Endpoints{},
0, 0,
controllerframework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
if e, ok := obj.(*api.Endpoints); ok { if e, ok := obj.(*api.Endpoints); ok {
if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 { if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -478,7 +477,7 @@ func verifyPodStartupLatency(expect, actual framework.LatencyMetric) error {
// newInformerWatchPod creates an informer to check whether all pods are running. // newInformerWatchPod creates an informer to check whether all pods are running.
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]unversioned.Time, func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]unversioned.Time,
podType string) *controllerframework.Controller { podType string) *cache.Controller {
ns := f.Namespace.Name ns := f.Namespace.Name
checkPodRunning := func(p *api.Pod) { checkPodRunning := func(p *api.Pod) {
mutex.Lock() mutex.Lock()
@ -492,7 +491,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
} }
} }
_, controller := controllerframework.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}) options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType})
@ -505,7 +504,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
}, },
&api.Pod{}, &api.Pod{},
0, 0,
controllerframework.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod) p, ok := obj.(*api.Pod)
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))

View File

@ -17,6 +17,8 @@ limitations under the License.
package objectmeta package objectmeta
import ( import (
"testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"golang.org/x/net/context" "golang.org/x/net/context"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
@ -27,7 +29,6 @@ import (
etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" etcdstorage "k8s.io/kubernetes/pkg/storage/etcd"
"k8s.io/kubernetes/pkg/storage/etcd/etcdtest" "k8s.io/kubernetes/pkg/storage/etcd/etcdtest"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
"testing"
) )
func TestIgnoreClusterName(t *testing.T) { func TestIgnoreClusterName(t *testing.T) {

View File

@ -29,11 +29,11 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
controllerframwork "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
@ -127,7 +127,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
return ret, nil return ret, nil
} }
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, controllerframwork.SharedIndexInformer, clientset.Interface) { func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig() masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.EnableCoreControllers = false masterConfig.EnableCoreControllers = false
_, s := framework.RunAMaster(masterConfig) _, s := framework.RunAMaster(masterConfig)
@ -160,7 +160,7 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
// wait for the podInformer to observe the pods. Call this function before // wait for the podInformer to observe the pods. Call this function before
// running the RS controller to prevent the rc manager from creating new pods // running the RS controller to prevent the rc manager from creating new pods
// rather than adopting the existing ones. // rather than adopting the existing ones.
func waitToObservePods(t *testing.T, podInformer controllerframwork.SharedIndexInformer, podNum int) { func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List() objects := podInformer.GetIndexer().List()
if len(objects) == podNum { if len(objects) == podNum {

View File

@ -28,11 +28,11 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
controllerframwork "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
@ -124,7 +124,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
return ret, nil return ret, nil
} }
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, controllerframwork.SharedIndexInformer, clientset.Interface) { func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig() masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.EnableCoreControllers = false masterConfig.EnableCoreControllers = false
_, s := framework.RunAMaster(masterConfig) _, s := framework.RunAMaster(masterConfig)
@ -157,7 +157,7 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
// wait for the podInformer to observe the pods. Call this function before // wait for the podInformer to observe the pods. Call this function before
// running the RC manager to prevent the rc manager from creating new pods // running the RC manager to prevent the rc manager from creating new pods
// rather than adopting the existing ones. // rather than adopting the existing ones.
func waitToObservePods(t *testing.T, podInformer controllerframwork.SharedIndexInformer, podNum int) { func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List() objects := podInformer.GetIndexer().List()
if len(objects) == podNum { if len(objects) == podNum {

View File

@ -593,7 +593,6 @@ k8s.io/kubernetes/pkg/controller/deployment,asalkeld,0
k8s.io/kubernetes/pkg/controller/deployment/util,saad-ali,1 k8s.io/kubernetes/pkg/controller/deployment/util,saad-ali,1
k8s.io/kubernetes/pkg/controller/disruption,fabioy,1 k8s.io/kubernetes/pkg/controller/disruption,fabioy,1
k8s.io/kubernetes/pkg/controller/endpoint,mwielgus,1 k8s.io/kubernetes/pkg/controller/endpoint,mwielgus,1
k8s.io/kubernetes/pkg/controller/framework,smarterclayton,1
k8s.io/kubernetes/pkg/controller/garbagecollector,rmmh,1 k8s.io/kubernetes/pkg/controller/garbagecollector,rmmh,1
k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly,cjcullen,1 k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly,cjcullen,1
k8s.io/kubernetes/pkg/controller/gc,jdef,1 k8s.io/kubernetes/pkg/controller/gc,jdef,1

1 name owner auto-assigned
593 k8s.io/kubernetes/pkg/controller/deployment/util saad-ali 1
594 k8s.io/kubernetes/pkg/controller/disruption fabioy 1
595 k8s.io/kubernetes/pkg/controller/endpoint mwielgus 1
k8s.io/kubernetes/pkg/controller/framework smarterclayton 1
596 k8s.io/kubernetes/pkg/controller/garbagecollector rmmh 1
597 k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly cjcullen 1
598 k8s.io/kubernetes/pkg/controller/gc jdef 1