Merge pull request #32718 from mikedanese/mv-informer

Automatic merge from submit-queue

move informer and controller to pkg/client/cache

@kubernetes/sig-api-machinery
This commit is contained in:
Kubernetes Submit Queue 2016-09-15 16:44:30 -07:00 committed by GitHub
commit fcc97f37ee
78 changed files with 425 additions and 498 deletions

View File

@ -43,7 +43,7 @@ import (
"k8s.io/kubernetes/pkg/apiserver/authenticator"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
"k8s.io/kubernetes/pkg/genericapiserver"
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"

View File

@ -51,9 +51,9 @@ import (
"k8s.io/kubernetes/pkg/controller/deployment"
"k8s.io/kubernetes/pkg/controller/disruption"
endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/garbagecollector"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/job"
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
nodecontroller "k8s.io/kubernetes/pkg/controller/node"

View File

@ -79,7 +79,6 @@ import (
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/mesos"
controllerfw "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/healthz"
"k8s.io/kubernetes/pkg/master/ports"
@ -786,10 +785,10 @@ func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config
log.Fatalf("Cannot create client to watch nodes: %v", err)
}
nodeLW := cache.NewListWatchFromClient(nodesClient.CoreClient, "nodes", api.NamespaceAll, fields.Everything())
nodeStore, nodeCtl := controllerfw.NewInformer(nodeLW, &api.Node{}, s.nodeRelistPeriod, &controllerfw.ResourceEventHandlerFuncs{
nodeStore, nodeCtl := cache.NewInformer(nodeLW, &api.Node{}, s.nodeRelistPeriod, &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(obj interface{}) {
if eiRegistry != nil {
// TODO(jdef) use controllerfw.DeletionHandlingMetaNamespaceKeyFunc at some point?
// TODO(jdef) use cache.DeletionHandlingMetaNamespaceKeyFunc at some point?
nodeName := ""
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
nodeName = tombstone.Key

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
kservice "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr"
@ -43,7 +42,7 @@ import (
)
var (
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
)
type EndpointController interface {
@ -56,7 +55,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
client: client,
queue: workqueue.New(),
}
e.serviceStore.Store, e.serviceController = framework.NewInformer(
e.serviceStore.Store, e.serviceController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Services(api.NamespaceAll).List(options)
@ -67,7 +66,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
},
&api.Service{},
kservice.FullServiceResyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: e.enqueueService,
UpdateFunc: func(old, cur interface{}) {
e.enqueueService(cur)
@ -76,7 +75,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
},
)
e.podStore.Indexer, e.podController = framework.NewIndexerInformer(
e.podStore.Indexer, e.podController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Pods(api.NamespaceAll).List(options)
@ -87,7 +86,7 @@ func NewEndpointController(client *clientset.Clientset) *endpointController {
},
&api.Pod{},
5*time.Minute,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: e.addPod,
UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod,
@ -113,8 +112,8 @@ type endpointController struct {
// Since we join two objects, we'll watch both of them with
// controllers.
serviceController *framework.Controller
podController *framework.Controller
serviceController *cache.Controller
podController *cache.Controller
}
// Runs e; will not return until stopCh is closed. workers determines how many

View File

@ -16,7 +16,7 @@ limitations under the License.
// Package app does all of the work necessary to create a Kubernetes
// APIServer by binding together the API, master and APIServer infrastructure.
// It can be configured and called directly or via the hyperkube framework.
// It can be configured and called directly or via the hyperkube cache.
package app
import (
@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/apiserver/authenticator"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/genericapiserver"
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options"

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
@ -50,7 +49,7 @@ type ClusterController struct {
clusterKubeClientMap map[string]ClusterClient
// cluster framework and store
clusterController *framework.Controller
clusterController *cache.Controller
clusterStore cluster_cache.StoreToClusterLister
}
@ -63,7 +62,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
clusterClusterStatusMap: make(map[string]federation_v1beta1.ClusterStatus),
clusterKubeClientMap: make(map[string]ClusterClient),
}
cc.clusterStore.Store, cc.clusterController = framework.NewInformer(
cc.clusterStore.Store, cc.clusterController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return cc.federationClient.Federation().Clusters().List(options)
@ -74,7 +73,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
},
&federation_v1beta1.Cluster{},
controller.NoResyncPeriodFunc(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
DeleteFunc: cc.delFromClusterSet,
AddFunc: cc.addToClusterSet,
},

View File

@ -33,7 +33,6 @@ import (
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
@ -81,7 +80,7 @@ type IngressController struct {
// Definitions of ingresses that should be federated.
ingressInformerStore cache.Store
// Informer controller for ingresses that should be federated.
ingressInformerController framework.ControllerInterface
ingressInformerController cache.ControllerInterface
// Client to federated api server.
federatedApiClient federation_release_1_4.Interface
@ -125,7 +124,7 @@ func NewIngressController(client federation_release_1_4.Interface) *IngressContr
ic.configMapDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on ingresses that should be federated.
ic.ingressInformerStore, ic.ingressInformerController = framework.NewInformer(
ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return client.Extensions().Ingresses(api.NamespaceAll).List(options)
@ -145,8 +144,8 @@ func NewIngressController(client federation_release_1_4.Interface) *IngressContr
// Federated informer on ingresses in members of federation.
ic.ingressFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options)
@ -177,9 +176,9 @@ func NewIngressController(client federation_release_1_4.Interface) *IngressContr
// Federated informer on configmaps for ingress controllers in members of the federation.
ic.configMapFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) {
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name)
return framework.NewInformer(
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
if targetClient == nil {

View File

@ -32,7 +32,6 @@ import (
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets"
@ -62,7 +61,7 @@ type NamespaceController struct {
// Definitions of namespaces that should be federated.
namespaceInformerStore cache.Store
// Informer controller for namespaces that should be federated.
namespaceInformerController framework.ControllerInterface
namespaceInformerController cache.ControllerInterface
// Client to federated api server.
federatedApiClient federation_release_1_4.Interface
@ -100,7 +99,7 @@ func NewNamespaceController(client federation_release_1_4.Interface) *NamespaceC
nc.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on namespaces that should be federated.
nc.namespaceInformerStore, nc.namespaceInformerController = framework.NewInformer(
nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return client.Core().Namespaces().List(options)
@ -116,8 +115,8 @@ func NewNamespaceController(client federation_release_1_4.Interface) *NamespaceC
// Federated informer on namespaces in members of federation.
nc.namespaceFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Core().Namespaces().List(options)

View File

@ -39,7 +39,6 @@ import (
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/wait"
@ -79,7 +78,7 @@ func parseFederationReplicaSetReference(frs *extensionsv1.ReplicaSet) (*fed.Fede
type ReplicaSetController struct {
fedClient fedclientset.Interface
replicaSetController *framework.Controller
replicaSetController *cache.Controller
replicaSetStore cache.StoreToReplicaSetLister
fedReplicaSetInformer fedutil.FederatedInformer
@ -118,8 +117,8 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
eventRecorder: recorder,
}
replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)
@ -145,8 +144,8 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
}
frsc.fedReplicaSetInformer = fedutil.NewFederatedInformer(federationClient, replicaSetFedInformerFactory, &clusterLifecycle)
podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Core().Pods(apiv1.NamespaceAll).List(options)
@ -166,7 +165,7 @@ func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSe
}
frsc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{})
frsc.replicaSetStore.Store, frsc.replicaSetController = framework.NewInformer(
frsc.replicaSetStore.Store, frsc.replicaSetController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)

View File

@ -30,7 +30,6 @@ import (
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch"
@ -59,7 +58,7 @@ type SecretController struct {
// Definitions of secrets that should be federated.
secretInformerStore cache.Store
// Informer controller for secrets that should be federated.
secretInformerController framework.ControllerInterface
secretInformerController cache.ControllerInterface
// Client to federated api server.
federatedApiClient federation_release_1_4.Interface
@ -97,7 +96,7 @@ func NewSecretController(client federation_release_1_4.Interface) *SecretControl
secretcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on secrets that should be federated.
secretcontroller.secretInformerStore, secretcontroller.secretInformerController = framework.NewInformer(
secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return client.Core().Secrets(api_v1.NamespaceAll).List(options)
@ -113,8 +112,8 @@ func NewSecretController(client federation_release_1_4.Interface) *SecretControl
// Federated informer on secrets in members of federation.
secretcontroller.secretFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Core().Secrets(api_v1.NamespaceAll).List(options)

View File

@ -25,7 +25,6 @@ import (
cache "k8s.io/kubernetes/pkg/client/cache"
release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
@ -43,11 +42,11 @@ type clusterCache struct {
// A store of services, populated by the serviceController
serviceStore cache.StoreToServiceLister
// Watches changes to all services
serviceController *framework.Controller
serviceController *cache.Controller
// A store of endpoint, populated by the serviceController
endpointStore cache.StoreToEndpointsLister
// Watches changes to all endpoints
endpointController *framework.Controller
endpointController *cache.Controller
// services that need to be synced
serviceQueue *workqueue.Type
// endpoints that need to be synced
@ -91,7 +90,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
serviceQueue: workqueue.New(),
endpointQueue: workqueue.New(),
}
cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = framework.NewInformer(
cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return clientset.Core().Endpoints(v1.NamespaceAll).List(options)
@ -102,7 +101,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
},
&v1.Endpoints{},
serviceSyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cc.enqueueEndpoint(obj, clusterName)
},
@ -115,7 +114,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
},
)
cachedClusterClient.serviceStore.Store, cachedClusterClient.serviceController = framework.NewInformer(
cachedClusterClient.serviceStore.Store, cachedClusterClient.serviceController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return clientset.Core().Services(v1.NamespaceAll).List(options)
@ -126,7 +125,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
},
&v1.Service{},
serviceSyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cc.enqueueService(obj, clusterName)
},

View File

@ -35,7 +35,6 @@ import (
release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
@ -109,11 +108,11 @@ type ServiceController struct {
// A store of services, populated by the serviceController
serviceStore cache.StoreToServiceLister
// Watches changes to all services
serviceController *framework.Controller
serviceController *cache.Controller
// A store of services, populated by the serviceController
clusterStore federationcache.StoreToClusterLister
// Watches changes to all services
clusterController *framework.Controller
clusterController *cache.Controller
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
// services that need to be synced
@ -145,7 +144,7 @@ func New(federationClient federation_release_1_4.Interface, dns dnsprovider.Inte
queue: workqueue.New(),
knownClusterSet: make(sets.String),
}
s.serviceStore.Store, s.serviceController = framework.NewInformer(
s.serviceStore.Store, s.serviceController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return s.federationClient.Core().Services(v1.NamespaceAll).List(options)
@ -156,7 +155,7 @@ func New(federationClient federation_release_1_4.Interface, dns dnsprovider.Inte
},
&v1.Service{},
serviceSyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: s.enqueueService,
UpdateFunc: func(old, cur interface{}) {
// there is case that old and new are equals but we still catch the event now.
@ -167,7 +166,7 @@ func New(federationClient federation_release_1_4.Interface, dns dnsprovider.Inte
DeleteFunc: s.enqueueService,
},
)
s.clusterStore.Store, s.clusterController = framework.NewInformer(
s.clusterStore.Store, s.clusterController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return s.federationClient.Federation().Clusters().List(options)
@ -178,7 +177,7 @@ func New(federationClient federation_release_1_4.Interface, dns dnsprovider.Inte
},
&v1beta1.Cluster{},
clusterSyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
DeleteFunc: s.clusterCache.delFromClusterSet,
AddFunc: s.clusterCache.addToClientMap,
UpdateFunc: func(old, cur interface{}) {

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller/framework"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
@ -111,8 +110,8 @@ type FederatedInformerForTestOnly interface {
}
// A function that should be used to create an informer on the target object. Store should use
// framework.DeletionHandlingMetaNamespaceKeyFunc as a keying function.
type TargetInformerFactory func(*federation_api.Cluster, kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface)
// cache.DeletionHandlingMetaNamespaceKeyFunc as a keying function.
type TargetInformerFactory func(*federation_api.Cluster, kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface)
// A structure with cluster lifecycle handler functions. Cluster is available (and ClusterAvailable is fired)
// when it is created in federated etcd and ready. Cluster becomes unavailable (and ClusterUnavailable is fired)
@ -154,7 +153,7 @@ func NewFederatedInformer(
return data
}
federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller = framework.NewInformer(
federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return federationClient.Federation().Clusters().List(options)
@ -165,7 +164,7 @@ func NewFederatedInformer(
},
&federation_api.Cluster{},
clusterSyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
oldCluster, ok := old.(*federation_api.Cluster)
if ok {
@ -238,7 +237,7 @@ func isClusterReady(cluster *federation_api.Cluster) bool {
}
type informer struct {
controller framework.ControllerInterface
controller cache.ControllerInterface
store cache.Store
stopChan chan struct{}
}
@ -455,7 +454,7 @@ func (fs *federatedStoreImpl) GetFromAllClusters(key string) ([]FederatedObject,
// GetKeyFor returns the key under which the item would be put in the store.
func (fs *federatedStoreImpl) GetKeyFor(item interface{}) string {
// TODO: support other keying functions.
key, _ := framework.DeletionHandlingMetaNamespaceKeyFunc(item)
key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(item)
return key
}

View File

@ -28,7 +28,6 @@ import (
kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4"
fake_kube_release_1_4 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_4/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
@ -79,8 +78,8 @@ func TestFederatedInformer(t *testing.T) {
return true, watch.NewFake(), nil
})
targetInformerFactory := func(cluster *federation_api.Cluster, clientset kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
targetInformerFactory := func(cluster *federation_api.Cluster, clientset kube_release_1_4.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Core().Services(api_v1.NamespaceAll).List(options)
@ -91,7 +90,7 @@ func TestFederatedInformer(t *testing.T) {
},
&api_v1.Service{},
10*time.Second,
framework.ResourceEventHandlerFuncs{})
cache.ResourceEventHandlerFuncs{})
}
addedClusters := make(chan string, 1)

View File

@ -21,14 +21,14 @@ import (
"reflect"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/client/cache"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
)
// Returns framework.ResourceEventHandlerFuncs that trigger the given function
// Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on all object changes.
func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *framework.ResourceEventHandlerFuncs {
return &framework.ResourceEventHandlerFuncs{
func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *cache.ResourceEventHandlerFuncs {
return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
oldObj := old.(pkg_runtime.Object)
triggerFunc(oldObj)
@ -46,9 +46,9 @@ func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *framework.Res
}
}
// Returns framework.ResourceEventHandlerFuncs that trigger the given function
// Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on object add and delete as well as spec/object meta on update.
func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *framework.ResourceEventHandlerFuncs {
func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *cache.ResourceEventHandlerFuncs {
getFieldOrPanic := func(obj interface{}, fieldName string) interface{} {
val := reflect.ValueOf(obj).Elem().FieldByName(fieldName)
if val.IsValid() {
@ -57,7 +57,7 @@ func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *frame
panic(fmt.Errorf("field not found: %s", fieldName))
}
}
return &framework.ResourceEventHandlerFuncs{
return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
oldObj := old.(pkg_runtime.Object)
triggerFunc(oldObj)

View File

@ -17,7 +17,7 @@ limitations under the License.
package admission
import (
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
)
// PluginInitializer is used for initialization of shareable resources between admission plugins.

View File

@ -17,7 +17,7 @@ limitations under the License.
package admission
import (
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
)
// Validator holds Validate functions, which are responsible for validation of initialized shared resources

View File

@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package cache
import (
"sync"
"time"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
@ -28,13 +27,13 @@ import (
// Config contains all the settings for a Controller.
type Config struct {
// The queue for your objects; either a cache.FIFO or
// a cache.DeltaFIFO. Your Process() function should accept
// The queue for your objects; either a FIFO or
// a DeltaFIFO. Your Process() function should accept
// the output of this Oueue's Pop() method.
cache.Queue
Queue
// Something that can list and watch your objects.
cache.ListerWatcher
ListerWatcher
// Something that can process your objects.
Process ProcessFunc
@ -45,7 +44,7 @@ type Config struct {
// Reprocess everything at least this often.
// Note that if it takes longer for you to clear the queue than this
// period, you will end up processing items in the order determined
// by cache.FIFO.Replace(). Currently, this is random. If this is a
// by FIFO.Replace(). Currently, this is random. If this is a
// problem, we can change that replacement policy to append new
// things to the end of the queue instead of replacing the entire
// queue.
@ -64,7 +63,7 @@ type ProcessFunc func(obj interface{}) error
// Controller is a generic controller framework.
type Controller struct {
config Config
reflector *cache.Reflector
reflector *Reflector
reflectorMutex sync.RWMutex
}
@ -87,7 +86,7 @@ func New(c *Config) *Controller {
// Run blocks; call via go.
func (c *Controller) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
r := cache.NewReflector(
r := NewReflector(
c.config.ListerWatcher,
c.config.ObjectType,
c.config.Queue,
@ -110,9 +109,9 @@ func (c *Controller) HasSynced() bool {
// Requeue adds the provided object back into the queue if it does not already exist.
func (c *Controller) Requeue(obj interface{}) error {
return c.config.Queue.AddIfNotPresent(cache.Deltas{
cache.Delta{
Type: cache.Sync,
return c.config.Queue.AddIfNotPresent(Deltas{
Delta{
Type: Sync,
Object: obj,
},
})
@ -124,7 +123,7 @@ func (c *Controller) Requeue(obj interface{}) error {
// concurrently.
func (c *Controller) processLoop() {
for {
obj, err := c.config.Queue.Pop(cache.PopProcessFunc(c.config.Process))
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
if err != nil {
if c.config.RetryOnError {
// This is the safe way to re-enqueue.
@ -145,7 +144,7 @@ func (c *Controller) processLoop() {
// get called even if nothing changed. This is useful for periodically
// evaluating or syncing something.
// * OnDelete will get the final state of the item if it is known, otherwise
// it will get an object of type cache.DeletedFinalStateUnknown. This can
// it will get an object of type DeletedFinalStateUnknown. This can
// happen if the watch is closed and misses the delete event and we don't
// notice the deletion until the subsequent re-list.
type ResourceEventHandler interface {
@ -185,18 +184,18 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
}
// DeletionHandlingMetaNamespaceKeyFunc checks for
// cache.DeletedFinalStateUnknown objects before calling
// cache.MetaNamespaceKeyFunc.
// DeletedFinalStateUnknown objects before calling
// MetaNamespaceKeyFunc.
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
if d, ok := obj.(cache.DeletedFinalStateUnknown); ok {
if d, ok := obj.(DeletedFinalStateUnknown); ok {
return d.Key, nil
}
return cache.MetaNamespaceKeyFunc(obj)
return MetaNamespaceKeyFunc(obj)
}
// NewInformer returns a cache.Store and a controller for populating the store
// NewInformer returns a Store and a controller for populating the store
// while also providing event notifications. You should only used the returned
// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event
// Store for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
//
// Parameters:
@ -210,18 +209,18 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
// * h is the object you want notifications sent to.
//
func NewInformer(
lw cache.ListerWatcher,
lw ListerWatcher,
objType runtime.Object,
resyncPeriod time.Duration,
h ResourceEventHandler,
) (cache.Store, *Controller) {
) (Store, *Controller) {
// This will hold the client state, as we know it.
clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc)
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
cfg := &Config{
Queue: fifo,
@ -232,9 +231,9 @@ func NewInformer(
Process: func(obj interface{}) error {
// from oldest to newest
for _, d := range obj.(cache.Deltas) {
for _, d := range obj.(Deltas) {
switch d.Type {
case cache.Sync, cache.Added, cache.Updated:
case Sync, Added, Updated:
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
if err := clientState.Update(d.Object); err != nil {
return err
@ -246,7 +245,7 @@ func NewInformer(
}
h.OnAdd(d.Object)
}
case cache.Deleted:
case Deleted:
if err := clientState.Delete(d.Object); err != nil {
return err
}
@ -259,9 +258,9 @@ func NewInformer(
return clientState, New(cfg)
}
// NewIndexerInformer returns a cache.Indexer and a controller for populating the index
// NewIndexerInformer returns a Indexer and a controller for populating the index
// while also providing event notifications. You should only used the returned
// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event
// Index for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
//
// Parameters:
@ -275,19 +274,19 @@ func NewInformer(
// * h is the object you want notifications sent to.
//
func NewIndexerInformer(
lw cache.ListerWatcher,
lw ListerWatcher,
objType runtime.Object,
resyncPeriod time.Duration,
h ResourceEventHandler,
indexers cache.Indexers,
) (cache.Indexer, *Controller) {
indexers Indexers,
) (Indexer, *Controller) {
// This will hold the client state, as we know it.
clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
cfg := &Config{
Queue: fifo,
@ -298,9 +297,9 @@ func NewIndexerInformer(
Process: func(obj interface{}) error {
// from oldest to newest
for _, d := range obj.(cache.Deltas) {
for _, d := range obj.(Deltas) {
switch d.Type {
case cache.Sync, cache.Added, cache.Updated:
case Sync, Added, Updated:
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
if err := clientState.Update(d.Object); err != nil {
return err
@ -312,7 +311,7 @@ func NewIndexerInformer(
}
h.OnAdd(d.Object)
}
case cache.Deleted:
case Deleted:
if err := clientState.Delete(d.Object); err != nil {
return err
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework_test
package cache
import (
"fmt"
@ -24,8 +24,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller/framework"
fcache "k8s.io/kubernetes/pkg/client/testing/cache"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
@ -34,34 +33,22 @@ import (
"github.com/google/gofuzz"
)
type testLW struct {
ListFunc func(options api.ListOptions) (runtime.Object, error)
WatchFunc func(options api.ListOptions) (watch.Interface, error)
}
func (t *testLW) List(options api.ListOptions) (runtime.Object, error) {
return t.ListFunc(options)
}
func (t *testLW) Watch(options api.ListOptions) (watch.Interface, error) {
return t.WatchFunc(options)
}
func Example() {
// source simulates an apiserver object endpoint.
source := framework.NewFakeControllerSource()
source := fcache.NewFakeControllerSource()
// This will hold the downstream state, as we know it.
downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
downstream := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass downstream in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream)
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, downstream)
// Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000)
cfg := &framework.Config{
cfg := &Config{
Queue: fifo,
ListerWatcher: source,
ObjectType: &api.Pod{},
@ -72,9 +59,9 @@ func Example() {
// everything that comes in.
Process: func(obj interface{}) error {
// Obj is from the Pop method of the Queue we make above.
newest := obj.(cache.Deltas).Newest()
newest := obj.(Deltas).Newest()
if newest.Type != cache.Deleted {
if newest.Type != Deleted {
// Update our downstream store.
err := downstream.Add(newest.Object)
if err != nil {
@ -107,7 +94,7 @@ func Example() {
// Create the controller and run it until we close stop.
stop := make(chan struct{})
defer close(stop)
go framework.New(cfg).Run(stop)
go New(cfg).Run(stop)
// Let's add a few objects to the source.
testIDs := []string{"a-hello", "b-controller", "c-framework"}
@ -132,25 +119,25 @@ func Example() {
// c-framework
}
func ExampleInformer() {
func ExampleNewInformer() {
// source simulates an apiserver object endpoint.
source := framework.NewFakeControllerSource()
source := fcache.NewFakeControllerSource()
// Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000)
// Make a controller that immediately deletes anything added to it, and
// logs anything deleted.
_, controller := framework.NewInformer(
_, controller := NewInformer(
source,
&api.Pod{},
time.Millisecond*100,
framework.ResourceEventHandlerFuncs{
ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
source.Delete(obj.(runtime.Object))
},
DeleteFunc: func(obj interface{}) {
key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
key = "oops something went wrong with the key"
}
@ -196,7 +183,7 @@ func TestHammerController(t *testing.T) {
// race detector.
// source simulates an apiserver object endpoint.
source := framework.NewFakeControllerSource()
source := fcache.NewFakeControllerSource()
// Let's do threadsafe output to get predictable test results.
outputSetLock := sync.Mutex{}
@ -204,7 +191,7 @@ func TestHammerController(t *testing.T) {
outputSet := map[string][]string{}
recordFunc := func(eventType string, obj interface{}) {
key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
t.Errorf("something wrong with key: %v", err)
key = "oops something went wrong with the key"
@ -217,11 +204,11 @@ func TestHammerController(t *testing.T) {
}
// Make a controller which just logs all the changes it gets.
_, controller := framework.NewInformer(
_, controller := NewInformer(
source,
&api.Pod{},
time.Millisecond*100,
framework.ResourceEventHandlerFuncs{
ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { recordFunc("add", obj) },
UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) },
DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) },
@ -305,7 +292,7 @@ func TestUpdate(t *testing.T) {
// call to update.
// source simulates an apiserver object endpoint.
source := framework.NewFakeControllerSource()
source := fcache.NewFakeControllerSource()
const (
FROM = "from"
@ -358,7 +345,7 @@ func TestUpdate(t *testing.T) {
// It calls Done() on the wait group on deletions so we can tell when
// everything we've added has been deleted.
watchCh := make(chan struct{})
_, controller := framework.NewInformer(
_, controller := NewInformer(
&testLW{
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
watch, err := source.Watch(options)
@ -371,7 +358,7 @@ func TestUpdate(t *testing.T) {
},
&api.Pod{},
0,
framework.ResourceEventHandlerFuncs{
ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
o, n := oldObj.(*api.Pod), newObj.(*api.Pod)
from, to := o.Labels["check"], n.Labels["check"]

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package cache
import (
"testing"

View File

@ -34,12 +34,12 @@ import (
var nevererrc chan error
type testLW struct {
ListFunc func() (runtime.Object, error)
ListFunc func(options api.ListOptions) (runtime.Object, error)
WatchFunc func(options api.ListOptions) (watch.Interface, error)
}
func (t *testLW) List(options api.ListOptions) (runtime.Object, error) {
return t.ListFunc()
return t.ListFunc(options)
}
func (t *testLW) Watch(options api.ListOptions) (watch.Interface, error) {
return t.WatchFunc(options)
@ -53,7 +53,7 @@ func TestCloseWatchChannelOnError(t *testing.T) {
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return fw, nil
},
ListFunc: func() (runtime.Object, error) {
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil
},
}
@ -79,7 +79,7 @@ func TestRunUntil(t *testing.T) {
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return fw, nil
},
ListFunc: func() (runtime.Object, error) {
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil
},
}
@ -227,7 +227,7 @@ func TestReflectorListAndWatch(t *testing.T) {
go func() { createdFakes <- fw }()
return fw, nil
},
ListFunc: func() (runtime.Object, error) {
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil
},
}
@ -345,7 +345,7 @@ func TestReflectorListAndWatchWithErrors(t *testing.T) {
}()
return fw, nil
},
ListFunc: func() (runtime.Object, error) {
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return item.list, item.listErr
},
}
@ -373,7 +373,7 @@ func TestReflectorResync(t *testing.T) {
fw := watch.NewFake()
return fw, nil
},
ListFunc: func() (runtime.Object, error) {
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "0"}}, nil
},
}

View File

@ -14,19 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
package cache
import (
"fmt"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/golang/glog"
)
// if you use this, there is one behavior change compared to a standard Informer.
@ -42,7 +41,7 @@ type SharedInformer interface {
// You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
// TODO we should try to remove this restriction eventually.
AddEventHandler(handler ResourceEventHandler) error
GetStore() cache.Store
GetStore() Store
// GetController gives back a synthetic interface that "votes" to start the informer
GetController() ControllerInterface
Run(stopCh <-chan struct{})
@ -53,24 +52,24 @@ type SharedInformer interface {
type SharedIndexInformer interface {
SharedInformer
// AddIndexers add indexers to the informer before it starts.
AddIndexers(indexers cache.Indexers) error
GetIndexer() cache.Indexer
AddIndexers(indexers Indexers) error
GetIndexer() Indexer
}
// NewSharedInformer creates a new instance for the listwatcher.
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
// be shared amongst all consumers.
func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
return NewSharedIndexInformer(lw, objType, resyncPeriod, cache.Indexers{})
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
}
// NewSharedIndexInformer creates a new instance for the listwatcher.
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
// be shared amongst all consumers.
func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer {
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
sharedIndexInformer := &sharedIndexInformer{
processor: &sharedProcessor{},
indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
listerWatcher: lw,
objectType: objType,
fullResyncPeriod: resyncPeriod,
@ -107,13 +106,13 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
}
type sharedIndexInformer struct {
indexer cache.Indexer
indexer Indexer
controller *Controller
processor *sharedProcessor
// This block is tracked to handle late initialization of the controller
listerWatcher cache.ListerWatcher
listerWatcher ListerWatcher
objectType runtime.Object
fullResyncPeriod time.Duration
@ -160,7 +159,7 @@ type deleteNotification struct {
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer)
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer)
cfg := &Config{
Queue: fifo,
@ -211,15 +210,15 @@ func (s *sharedIndexInformer) LastSyncResourceVersion() string {
return s.controller.reflector.LastSyncResourceVersion()
}
func (s *sharedIndexInformer) GetStore() cache.Store {
func (s *sharedIndexInformer) GetStore() Store {
return s.indexer
}
func (s *sharedIndexInformer) GetIndexer() cache.Indexer {
func (s *sharedIndexInformer) GetIndexer() Indexer {
return s.indexer
}
func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error {
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
@ -271,9 +270,9 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
defer s.blockDeltas.Unlock()
// from oldest to newest
for _, d := range obj.(cache.Deltas) {
for _, d := range obj.(Deltas) {
switch d.Type {
case cache.Sync, cache.Added, cache.Updated:
case Sync, Added, Updated:
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
if err := s.indexer.Update(d.Object); err != nil {
return err
@ -285,7 +284,7 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
}
s.processor.distribute(addNotification{newObj: d.Object})
}
case cache.Deleted:
case Deleted:
if err := s.indexer.Delete(d.Object); err != nil {
return err
}

View File

@ -29,7 +29,6 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
utilcertificates "k8s.io/kubernetes/pkg/util/certificates"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@ -47,7 +46,7 @@ type CertificateController struct {
kubeClient clientset.Interface
// CSR framework and store
csrController *framework.Controller
csrController *cache.Controller
csrStore cache.StoreToCertificateRequestLister
// To allow injection of updateCertificateRequestStatus for testing.
@ -85,7 +84,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
}
// Manage the addition/update of certificate requests
cc.csrStore.Store, cc.csrController = framework.NewInformer(
cc.csrStore.Store, cc.csrController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return cc.kubeClient.Certificates().CertificateSigningRequests().List(options)
@ -96,7 +95,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
},
&certificates.CertificateSigningRequest{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
csr := obj.(*certificates.CertificateSigningRequest)
glog.V(4).Infof("Adding certificate request %s", csr.Name)

View File

@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/clock"
@ -54,7 +53,7 @@ const (
)
var (
KeyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
)
type ResyncPeriodFunc func() time.Duration

View File

@ -23,8 +23,6 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
@ -34,8 +32,7 @@ import (
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
@ -46,6 +43,8 @@ import (
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
"github.com/golang/glog"
)
const (
@ -72,7 +71,7 @@ type DaemonSetsController struct {
// we have a personal informer, we must start it ourselves. If you start
// the controller using NewDaemonSetsController(passing SharedInformer), this
// will be null
internalPodInformer framework.SharedInformer
internalPodInformer cache.SharedInformer
// An dsc is temporarily suspended after creating/deleting these many replicas.
// It resumes normal action after observing the watch events for them.
@ -89,17 +88,17 @@ type DaemonSetsController struct {
// A store of nodes
nodeStore cache.StoreToNodeLister
// Watches changes to all daemon sets.
dsController *framework.Controller
dsController *cache.Controller
// Watches changes to all pods
podController framework.ControllerInterface
podController cache.ControllerInterface
// Watches changes to all nodes.
nodeController *framework.Controller
nodeController *cache.Controller
// podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podStoreSynced framework.InformerSynced
podStoreSynced cache.InformerSynced
// nodeStoreSynced returns true if the node store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
nodeStoreSynced framework.InformerSynced
nodeStoreSynced cache.InformerSynced
lookupCache *controller.MatchingCache
@ -107,7 +106,7 @@ type DaemonSetsController struct {
queue workqueue.RateLimitingInterface
}
func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
func NewDaemonSetsController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
@ -128,7 +127,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "daemonset"),
}
// Manage addition/update of daemon sets.
dsc.dsStore.Store, dsc.dsController = framework.NewInformer(
dsc.dsStore.Store, dsc.dsController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
@ -140,7 +139,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
&extensions.DaemonSet{},
// TODO: Can we have much longer period here?
FullDaemonSetResyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ds := obj.(*extensions.DaemonSet)
glog.V(4).Infof("Adding daemon set %s", ds.Name)
@ -173,7 +172,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dsc.addPod,
UpdateFunc: dsc.updatePod,
DeleteFunc: dsc.deletePod,
@ -183,7 +182,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
dsc.podStoreSynced = podInformer.HasSynced
// Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change,
dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
dsc.nodeStore.Store, dsc.nodeController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dsc.kubeClient.Core().Nodes().List(options)
@ -194,7 +193,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
},
&api.Node{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: dsc.addNode,
UpdateFunc: dsc.updateNode,
},
@ -242,7 +241,7 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
go dsc.podController.Run(stopCh)
go dsc.nodeController.Run(stopCh)
if !framework.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced) {
if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced) {
return
}

View File

@ -36,7 +36,6 @@ import (
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
@ -70,15 +69,15 @@ type DeploymentController struct {
// A store of deployments, populated by the dController
dStore cache.StoreToDeploymentLister
// Watches changes to all deployments
dController *framework.Controller
dController *cache.Controller
// A store of ReplicaSets, populated by the rsController
rsStore cache.StoreToReplicaSetLister
// Watches changes to all ReplicaSets
rsController *framework.Controller
rsController *cache.Controller
// A store of pods, populated by the podController
podStore cache.StoreToPodLister
// Watches changes to all pods
podController *framework.Controller
podController *cache.Controller
// dStoreSynced returns true if the Deployment store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
@ -110,7 +109,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
}
dc.dStore.Indexer, dc.dController = framework.NewIndexerInformer(
dc.dStore.Indexer, dc.dController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.client.Extensions().Deployments(api.NamespaceAll).List(options)
@ -121,7 +120,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
},
&extensions.Deployment{},
FullDeploymentResyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: dc.addDeploymentNotification,
UpdateFunc: dc.updateDeploymentNotification,
// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
@ -130,7 +129,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
dc.rsStore.Store, dc.rsController = framework.NewInformer(
dc.rsStore.Store, dc.rsController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.client.Extensions().ReplicaSets(api.NamespaceAll).List(options)
@ -141,14 +140,14 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
},
&extensions.ReplicaSet{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: dc.addReplicaSet,
UpdateFunc: dc.updateReplicaSet,
DeleteFunc: dc.deleteReplicaSet,
},
)
dc.podStore.Indexer, dc.podController = framework.NewIndexerInformer(
dc.podStore.Indexer, dc.podController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.client.Core().Pods(api.NamespaceAll).List(options)
@ -159,7 +158,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
},
&api.Pod{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: dc.addPod,
UpdateFunc: dc.updatePod,
DeleteFunc: dc.deletePod,

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/intstr"
@ -47,22 +46,22 @@ type DisruptionController struct {
kubeClient *client.Client
pdbStore cache.Store
pdbController *framework.Controller
pdbController *cache.Controller
pdbLister cache.StoreToPodDisruptionBudgetLister
podController framework.ControllerInterface
podController cache.ControllerInterface
podLister cache.StoreToPodLister
rcIndexer cache.Indexer
rcController *framework.Controller
rcController *cache.Controller
rcLister cache.StoreToReplicationControllerLister
rsStore cache.Store
rsController *framework.Controller
rsController *cache.Controller
rsLister cache.StoreToReplicaSetLister
dIndexer cache.Indexer
dController *framework.Controller
dController *cache.Controller
dLister cache.StoreToDeploymentLister
queue *workqueue.Type
@ -84,7 +83,7 @@ type controllerAndScale struct {
// controllers and their scale.
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error)
func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
dc := &DisruptionController{
kubeClient: kubeClient,
podController: podInformer.GetController(),
@ -97,13 +96,13 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
dc.podLister.Indexer = podInformer.GetIndexer()
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addPod,
UpdateFunc: dc.updatePod,
DeleteFunc: dc.deletePod,
})
dc.pdbStore, dc.pdbController = framework.NewInformer(
dc.pdbStore, dc.pdbController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options)
@ -114,7 +113,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
},
&policy.PodDisruptionBudget{},
30*time.Second,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: dc.addDb,
UpdateFunc: dc.updateDb,
DeleteFunc: dc.removeDb,
@ -122,7 +121,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
)
dc.pdbLister.Store = dc.pdbStore
dc.rcIndexer, dc.rcController = framework.NewIndexerInformer(
dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
@ -133,13 +132,13 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
},
&api.ReplicationController{},
30*time.Second,
framework.ResourceEventHandlerFuncs{},
cache.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
dc.rcLister.Indexer = dc.rcIndexer
dc.rsStore, dc.rsController = framework.NewInformer(
dc.rsStore, dc.rsController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
@ -150,12 +149,12 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
},
&extensions.ReplicaSet{},
30*time.Second,
framework.ResourceEventHandlerFuncs{},
cache.ResourceEventHandlerFuncs{},
)
dc.rsLister.Store = dc.rsStore
dc.dIndexer, dc.dController = framework.NewIndexerInformer(
dc.dIndexer, dc.dController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options)
@ -166,7 +165,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
},
&extensions.Deployment{},
30*time.Second,
framework.ResourceEventHandlerFuncs{},
cache.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)

View File

@ -34,8 +34,7 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
@ -66,11 +65,11 @@ const (
)
var (
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
)
// NewEndpointController returns a new *EndpointController.
func NewEndpointController(podInformer framework.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
func NewEndpointController(podInformer cache.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
}
@ -79,7 +78,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
queue: workqueue.NewNamed("endpoint"),
}
e.serviceStore.Store, e.serviceController = framework.NewInformer(
e.serviceStore.Store, e.serviceController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Services(api.NamespaceAll).List(options)
@ -91,7 +90,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
&api.Service{},
// TODO: Can we have much longer period here?
FullServiceResyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: e.enqueueService,
UpdateFunc: func(old, cur interface{}) {
e.enqueueService(cur)
@ -100,7 +99,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
},
)
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.addPod,
UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod,
@ -133,7 +132,7 @@ type EndpointController struct {
// we have a personal informer, we must start it ourselves. If you start
// the controller using NewEndpointController(passing SharedInformer), this
// will be null
internalPodInformer framework.SharedIndexInformer
internalPodInformer cache.SharedIndexInformer
// Services that need to be updated. A channel is inappropriate here,
// because it allows services with lots of pods to be serviced much
@ -144,8 +143,8 @@ type EndpointController struct {
// Since we join two objects, we'll watch both of them with
// controllers.
serviceController *framework.Controller
podController framework.ControllerInterface
serviceController *cache.Controller
podController cache.ControllerInterface
// podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podStoreSynced func() bool

View File

@ -1,18 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package framework implements all the grunt work involved in running a simple controller.
package framework // import "k8s.io/kubernetes/pkg/controller/framework"

View File

@ -32,7 +32,6 @@ import (
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
@ -49,7 +48,7 @@ const ResourceResyncTime time.Duration = 0
type monitor struct {
store cache.Store
controller *framework.Controller
controller *cache.Controller
}
type objectReference struct {
@ -488,11 +487,11 @@ func (gc *GarbageCollector) monitorFor(resource unversioned.GroupVersionResource
}
runtimeObject.GetObjectKind().SetGroupVersionKind(kind)
}
monitor.store, monitor.controller = framework.NewInformer(
monitor.store, monitor.controller = cache.NewInformer(
gcListWatcher(client, resource),
nil,
ResourceResyncTime,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
// add the event to the propagator's eventQueue.
AddFunc: func(obj interface{}) {
setObjectTypeMeta(obj)

View File

@ -21,13 +21,12 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller/framework"
)
// PodInformer is type of SharedIndexInformer which watches and lists all pods.
// Interface provides constructor for informer and lister for pods
type PodInformer interface {
Informer() framework.SharedIndexInformer
Informer() cache.SharedIndexInformer
Lister() *cache.StoreToPodLister
}
@ -37,7 +36,7 @@ type podInformer struct {
// Informer checks whether podInformer exists in sharedInformerFactory and if not, it creates new informer of type
// podInformer and connects it to sharedInformerFactory
func (f *podInformer) Informer() framework.SharedIndexInformer {
func (f *podInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
@ -63,7 +62,7 @@ func (f *podInformer) Lister() *cache.StoreToPodLister {
// NamespaceInformer is type of SharedIndexInformer which watches and lists all namespaces.
// Interface provides constructor for informer and lister for namsespaces
type NamespaceInformer interface {
Informer() framework.SharedIndexInformer
Informer() cache.SharedIndexInformer
Lister() *cache.IndexerToNamespaceLister
}
@ -73,7 +72,7 @@ type namespaceInformer struct {
// Informer checks whether namespaceInformer exists in sharedInformerFactory and if not, it creates new informer of type
// namespaceInformer and connects it to sharedInformerFactory
func (f *namespaceInformer) Informer() framework.SharedIndexInformer {
func (f *namespaceInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
@ -99,7 +98,7 @@ func (f *namespaceInformer) Lister() *cache.IndexerToNamespaceLister {
// NodeInformer is type of SharedIndexInformer which watches and lists all nodes.
// Interface provides constructor for informer and lister for nodes
type NodeInformer interface {
Informer() framework.SharedIndexInformer
Informer() cache.SharedIndexInformer
Lister() *cache.StoreToNodeLister
}
@ -109,7 +108,7 @@ type nodeInformer struct {
// Informer checks whether nodeInformer exists in sharedInformerFactory and if not, it creates new informer of type
// nodeInformer and connects it to sharedInformerFactory
func (f *nodeInformer) Informer() framework.SharedIndexInformer {
func (f *nodeInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
@ -135,7 +134,7 @@ func (f *nodeInformer) Lister() *cache.StoreToNodeLister {
// PVCInformer is type of SharedIndexInformer which watches and lists all persistent volume claims.
// Interface provides constructor for informer and lister for persistent volume claims
type PVCInformer interface {
Informer() framework.SharedIndexInformer
Informer() cache.SharedIndexInformer
Lister() *cache.StoreToPVCFetcher
}
@ -145,7 +144,7 @@ type pvcInformer struct {
// Informer checks whether pvcInformer exists in sharedInformerFactory and if not, it creates new informer of type
// pvcInformer and connects it to sharedInformerFactory
func (f *pvcInformer) Informer() framework.SharedIndexInformer {
func (f *pvcInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
@ -171,7 +170,7 @@ func (f *pvcInformer) Lister() *cache.StoreToPVCFetcher {
// PVInformer is type of SharedIndexInformer which watches and lists all persistent volumes.
// Interface provides constructor for informer and lister for persistent volumes
type PVInformer interface {
Informer() framework.SharedIndexInformer
Informer() cache.SharedIndexInformer
Lister() *cache.StoreToPVFetcher
}
@ -181,7 +180,7 @@ type pvInformer struct {
// Informer checks whether pvInformer exists in sharedInformerFactory and if not, it creates new informer of type
// pvInformer and connects it to sharedInformerFactory
func (f *pvInformer) Informer() framework.SharedIndexInformer {
func (f *pvInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
)
@ -47,7 +46,7 @@ type sharedInformerFactory struct {
lock sync.Mutex
defaultResync time.Duration
informers map[reflect.Type]framework.SharedIndexInformer
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started
// this allows calling of Start method multiple times
startedInformers map[reflect.Type]bool
@ -58,7 +57,7 @@ func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Dur
return &sharedInformerFactory{
client: client,
defaultResync: defaultResync,
informers: make(map[reflect.Type]framework.SharedIndexInformer),
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
}
}
@ -102,8 +101,8 @@ func (f *sharedInformerFactory) PersistentVolumes() PVInformer {
}
// NewPodInformer returns a SharedIndexInformer that lists and watches all pods
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer(
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().Pods(api.NamespaceAll).List(options)
@ -121,8 +120,8 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) fram
}
// NewNodeInformer returns a SharedIndexInformer that lists and watches all nodes
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer(
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().Nodes().List(options)
@ -139,8 +138,8 @@ func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) fra
}
// NewPVCInformer returns a SharedIndexInformer that lists and watches all PVCs
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer(
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
@ -157,8 +156,8 @@ func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) fram
}
// NewPVInformer returns a SharedIndexInformer that lists and watches all PVs
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer(
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().PersistentVolumes().List(options)
@ -175,8 +174,8 @@ func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) frame
}
// NewNamespaceInformer returns a SharedIndexInformer that lists and watches namespaces
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer {
sharedIndexInformer := framework.NewSharedIndexInformer(
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return client.Core().Namespaces().List(options)

View File

@ -31,8 +31,7 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
@ -51,7 +50,7 @@ type JobController struct {
// we have a personal informer, we must start it ourselves. If you start
// the controller using NewJobController(passing SharedInformer), this
// will be null
internalPodInformer framework.SharedInformer
internalPodInformer cache.SharedInformer
// To allow injection of updateJobStatus for testing.
updateHandler func(job *batch.Job) error
@ -66,7 +65,7 @@ type JobController struct {
// A store of job, populated by the jobController
jobStore cache.StoreToJobLister
// Watches changes to all jobs
jobController *framework.Controller
jobController *cache.Controller
// A store of pods, populated by the podController
podStore cache.StoreToPodLister
@ -77,7 +76,7 @@ type JobController struct {
recorder record.EventRecorder
}
func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
func NewJobController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
@ -98,7 +97,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
}
jm.jobStore.Store, jm.jobController = framework.NewInformer(
jm.jobStore.Store, jm.jobController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options)
@ -110,7 +109,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
&batch.Job{},
// TODO: Can we have much longer period here?
replicationcontroller.FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: jm.enqueueController,
UpdateFunc: func(old, cur interface{}) {
if job := cur.(*batch.Job); !IsJobFinished(job) {
@ -121,7 +120,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
},
)
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: jm.addPod,
UpdateFunc: jm.updatePod,
DeleteFunc: jm.deletePod,

View File

@ -25,7 +25,6 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@ -45,7 +44,7 @@ type NamespaceController struct {
// store that holds the namespaces
store cache.Store
// controller that observes the namespaces
controller *framework.Controller
controller *cache.Controller
// namespaces that have been queued up for processing by workers
queue workqueue.RateLimitingInterface
// list of preferred group versions and their corresponding resource set for namespace deletion
@ -95,7 +94,7 @@ func NewNamespaceController(
}
// configure the backing store/controller
store, controller := framework.NewInformer(
store, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return kubeClient.Core().Namespaces().List(options)
@ -106,7 +105,7 @@ func NewNamespaceController(
},
&api.Namespace{},
resyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
namespace := obj.(*api.Namespace)
namespaceController.enqueueNamespace(namespace)

View File

@ -33,8 +33,7 @@ import (
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@ -136,13 +135,13 @@ type NodeController struct {
maximumGracePeriod time.Duration
recorder record.EventRecorder
// Pod framework and store
podController framework.ControllerInterface
podController cache.ControllerInterface
podStore cache.StoreToPodLister
// Node framework and store
nodeController *framework.Controller
nodeController *cache.Controller
nodeStore cache.StoreToNodeLister
// DaemonSet framework and store
daemonSetController *framework.Controller
daemonSetController *cache.Controller
daemonSetStore cache.StoreToDaemonSetLister
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
cidrAllocator CIDRAllocator
@ -164,7 +163,7 @@ type NodeController struct {
// we have a personal informer, we must start it ourselves. If you start
// the controller using NewDaemonSetsController(passing SharedInformer), this
// will be null
internalPodInformer framework.SharedIndexInformer
internalPodInformer cache.SharedIndexInformer
}
// NewNodeController returns a new node controller to sync instances from cloudprovider.
@ -172,7 +171,7 @@ type NodeController struct {
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
// currently, this should be handled as a fatal error.
func NewNodeController(
podInformer framework.SharedIndexInformer,
podInformer cache.SharedIndexInformer,
cloud cloudprovider.Interface,
kubeClient clientset.Interface,
podEvictionTimeout time.Duration,
@ -241,16 +240,16 @@ func NewNodeController(
nc.enterFullDisruptionFunc = nc.HealthyQPSFunc
nc.computeZoneStateFunc = nc.ComputeZoneState
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nc.maybeDeleteTerminatingPod,
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
})
nc.podStore.Indexer = podInformer.GetIndexer()
nc.podController = podInformer.GetController()
nodeEventHandlerFuncs := framework.ResourceEventHandlerFuncs{}
nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
if nc.allocateNodeCIDRs {
nodeEventHandlerFuncs = framework.ResourceEventHandlerFuncs{
nodeEventHandlerFuncs = cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
node := obj.(*api.Node)
err := nc.cidrAllocator.AllocateOrOccupyCIDR(node)
@ -296,7 +295,7 @@ func NewNodeController(
}
}
nc.nodeStore.Store, nc.nodeController = framework.NewInformer(
nc.nodeStore.Store, nc.nodeController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return nc.kubeClient.Core().Nodes().List(options)
@ -310,7 +309,7 @@ func NewNodeController(
nodeEventHandlerFuncs,
)
nc.daemonSetStore.Store, nc.daemonSetController = framework.NewInformer(
nc.daemonSetStore.Store, nc.daemonSetController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return nc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
@ -321,7 +320,7 @@ func NewNodeController(
},
&extensions.DaemonSet{},
controller.NoResyncPeriodFunc(),
framework.ResourceEventHandlerFuncs{},
cache.ResourceEventHandlerFuncs{},
)
if allocateNodeCIDRs {

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/errors"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@ -63,12 +62,12 @@ type PetSetController struct {
// podStoreSynced returns true if the pod store has synced at least once.
podStoreSynced func() bool
// Watches changes to all pods.
podController framework.ControllerInterface
podController cache.ControllerInterface
// A store of PetSets, populated by the psController.
psStore cache.StoreToPetSetLister
// Watches changes to all PetSets.
psController *framework.Controller
psController *cache.Controller
// A store of the 1 unhealthy pet blocking progress for a given ps
blockingPetStore *unhealthyPetTracker
@ -82,7 +81,7 @@ type PetSetController struct {
}
// NewPetSetController creates a new petset controller.
func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
func NewPetSetController(podInformer cache.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
@ -98,7 +97,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"),
}
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
// lookup the petset and enqueue
AddFunc: psc.addPod,
// lookup current and old petset if labels changed
@ -109,7 +108,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
psc.podStore.Indexer = podInformer.GetIndexer()
psc.podController = podInformer.GetController()
psc.psStore.Store, psc.psController = framework.NewInformer(
psc.psStore.Store, psc.psController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
@ -120,7 +119,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
},
&apps.PetSet{},
petSetResyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: psc.enqueuePetSet,
UpdateFunc: func(old, cur interface{}) {
oldPS := old.(*apps.PetSet)

View File

@ -33,7 +33,6 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@ -61,14 +60,14 @@ type HorizontalController struct {
// A store of HPA objects, populated by the controller.
store cache.Store
// Watches changes to all HPA objects.
controller *framework.Controller
controller *cache.Controller
}
var downscaleForbiddenWindow = 5 * time.Minute
var upscaleForbiddenWindow = 3 * time.Minute
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *framework.Controller) {
return framework.NewInformer(
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *cache.Controller) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options)
@ -79,7 +78,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
},
&autoscaling.HorizontalPodAutoscaler{},
resyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
hpa := obj.(*autoscaling.HorizontalPodAutoscaler)
hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@ -44,7 +43,7 @@ const (
type PodGCController struct {
kubeClient clientset.Interface
podStore cache.StoreToPodLister
podStoreSyncer *framework.Controller
podStoreSyncer *cache.Controller
deletePod func(namespace, name string) error
threshold int
}
@ -63,7 +62,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))
gcc.podStore.Indexer, gcc.podStoreSyncer = framework.NewIndexerInformer(
gcc.podStore.Indexer, gcc.podStoreSyncer = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = terminatedSelector
@ -76,7 +75,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
},
&api.Pod{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{},
cache.ResourceEventHandlerFuncs{},
// We don't need to build a index for podStore here actually, but build one for consistency.
// It will ensure that if people start making use of the podStore in more specific ways,
// they'll get the benefits they expect. It will also reserve the name for future refactorings.

View File

@ -36,8 +36,7 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
@ -81,7 +80,7 @@ type ReplicaSetController struct {
// we have a personal informer, we must start it ourselves. If you start
// the controller using NewReplicationManager(passing SharedInformer), this
// will be null
internalPodInformer framework.SharedIndexInformer
internalPodInformer cache.SharedIndexInformer
// A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
// It resumes normal action after observing the watch events for them.
@ -95,11 +94,11 @@ type ReplicaSetController struct {
// A store of ReplicaSets, populated by the rsController
rsStore cache.StoreToReplicaSetLister
// Watches changes to all ReplicaSets
rsController *framework.Controller
rsController *cache.Controller
// A store of pods, populated by the podController
podStore cache.StoreToPodLister
// Watches changes to all pods
podController framework.ControllerInterface
podController cache.ControllerInterface
// podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podStoreSynced func() bool
@ -115,7 +114,7 @@ type ReplicaSetController struct {
}
// NewReplicaSetController creates a new ReplicaSetController.
func NewReplicaSetController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
func NewReplicaSetController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
@ -126,7 +125,7 @@ func NewReplicaSetController(podInformer framework.SharedIndexInformer, kubeClie
}
// newReplicaSetController configures a replica set controller with the specified event recorder
func newReplicaSetController(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
func newReplicaSetController(eventRecorder record.EventRecorder, podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
@ -143,7 +142,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
garbageCollectorEnabled: garbageCollectorEnabled,
}
rsc.rsStore.Store, rsc.rsController = framework.NewInformer(
rsc.rsStore.Store, rsc.rsController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
@ -155,7 +154,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
&extensions.ReplicaSet{},
// TODO: Can we have much longer period here?
FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: rsc.enqueueReplicaSet,
UpdateFunc: rsc.updateRS,
// This will enter the sync loop and no-op, because the replica set has been deleted from the store.
@ -165,7 +164,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
},
)
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: rsc.addPod,
// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
// overkill the most frequent pod update is status, and the associated ReplicaSet will only list from

View File

@ -34,8 +34,7 @@ import (
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util"
@ -86,7 +85,7 @@ type ReplicationManager struct {
// we have a personal informer, we must start it ourselves. If you start
// the controller using NewReplicationManager(passing SharedInformer), this
// will be null
internalPodInformer framework.SharedIndexInformer
internalPodInformer cache.SharedIndexInformer
// An rc is temporarily suspended after creating/deleting these many replicas.
// It resumes normal action after observing the watch events for them.
@ -100,11 +99,11 @@ type ReplicationManager struct {
// A store of replication controllers, populated by the rcController
rcStore cache.StoreToReplicationControllerLister
// Watches changes to all replication controllers
rcController *framework.Controller
rcController *cache.Controller
// A store of pods, populated by the podController
podStore cache.StoreToPodLister
// Watches changes to all pods
podController framework.ControllerInterface
podController cache.ControllerInterface
// podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podStoreSynced func() bool
@ -120,7 +119,7 @@ type ReplicationManager struct {
}
// NewReplicationManager creates a replication manager
func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
@ -130,7 +129,7 @@ func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient
}
// newReplicationManager configures a replication manager with the specified event recorder
func newReplicationManager(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
func newReplicationManager(eventRecorder record.EventRecorder, podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
}
@ -147,7 +146,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
garbageCollectorEnabled: garbageCollectorEnabled,
}
rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer(
rm.rcStore.Indexer, rm.rcController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
@ -159,7 +158,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
&api.ReplicationController{},
// TODO: Can we have much longer period here?
FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: rm.enqueueController,
UpdateFunc: rm.updateRC,
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
@ -170,7 +169,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: rm.addPod,
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
// the most frequent pod update is status, and the associated rc will only list from local storage, so

View File

@ -27,8 +27,7 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/quota/evaluator/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
@ -90,18 +89,18 @@ func ObjectReplenishmentDeleteFunc(options *ReplenishmentControllerOptions) func
type ReplenishmentControllerFactory interface {
// NewController returns a controller configured with the specified options.
// This method is NOT thread-safe.
NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error)
NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error)
}
// replenishmentControllerFactory implements ReplenishmentControllerFactory
type replenishmentControllerFactory struct {
kubeClient clientset.Interface
podInformer framework.SharedInformer
podInformer cache.SharedInformer
}
// NewReplenishmentControllerFactory returns a factory that knows how to build controllers
// to replenish resources when updated or deleted
func NewReplenishmentControllerFactory(podInformer framework.SharedInformer, kubeClient clientset.Interface) ReplenishmentControllerFactory {
func NewReplenishmentControllerFactory(podInformer cache.SharedInformer, kubeClient clientset.Interface) ReplenishmentControllerFactory {
return &replenishmentControllerFactory{
kubeClient: kubeClient,
podInformer: podInformer,
@ -112,8 +111,8 @@ func NewReplenishmentControllerFactoryFromClient(kubeClient clientset.Interface)
return NewReplenishmentControllerFactory(nil, kubeClient)
}
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) {
var result framework.ControllerInterface
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error) {
var result cache.ControllerInterface
if r.kubeClient != nil && r.kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replenishment_controller", r.kubeClient.Core().GetRESTClient().GetRateLimiter())
}
@ -121,7 +120,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
switch options.GroupKind {
case api.Kind("Pod"):
if r.podInformer != nil {
r.podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
r.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: PodReplenishmentUpdateFunc(options),
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
})
@ -133,7 +132,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
result = r.podInformer
case api.Kind("Service"):
_, result = framework.NewInformer(
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().Services(api.NamespaceAll).List(options)
@ -144,13 +143,13 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
},
&api.Service{},
options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
UpdateFunc: ServiceReplenishmentUpdateFunc(options),
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
case api.Kind("ReplicationController"):
_, result = framework.NewInformer(
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
@ -161,12 +160,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
},
&api.ReplicationController{},
options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
case api.Kind("PersistentVolumeClaim"):
_, result = framework.NewInformer(
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
@ -177,12 +176,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
},
&api.PersistentVolumeClaim{},
options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
case api.Kind("Secret"):
_, result = framework.NewInformer(
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().Secrets(api.NamespaceAll).List(options)
@ -193,12 +192,12 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
},
&api.Secret{},
options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
case api.Kind("ConfigMap"):
_, result = framework.NewInformer(
_, result = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).List(options)
@ -209,7 +208,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
},
&api.ConfigMap{},
options.ResyncPeriod(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
},
)
@ -254,7 +253,7 @@ func IsUnhandledGroupKindError(err error) bool {
// returning the first success or failure it hits. If there are no hits either way, it return an UnhandledGroupKind error
type UnionReplenishmentControllerFactory []ReplenishmentControllerFactory
func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) {
func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.ControllerInterface, error) {
for _, factory := range f {
controller, err := factory.NewController(options)
if !IsUnhandledGroupKindError(err) {

View File

@ -26,7 +26,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
@ -60,7 +59,7 @@ type ResourceQuotaController struct {
// An index of resource quota objects by namespace
rqIndexer cache.Indexer
// Watches changes to all resource quota
rqController *framework.Controller
rqController *cache.Controller
// ResourceQuota objects that need to be synchronized
queue workqueue.RateLimitingInterface
// missingUsageQueue holds objects that are missing the initial usage informatino
@ -72,7 +71,7 @@ type ResourceQuotaController struct {
// knows how to calculate usage
registry quota.Registry
// controllers monitoring to notify for replenishment
replenishmentControllers []framework.ControllerInterface
replenishmentControllers []cache.ControllerInterface
}
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController {
@ -83,7 +82,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"),
resyncPeriod: options.ResyncPeriod,
registry: options.Registry,
replenishmentControllers: []framework.ControllerInterface{},
replenishmentControllers: []cache.ControllerInterface{},
}
if options.KubeClient != nil && options.KubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().GetRESTClient().GetRateLimiter())
@ -92,7 +91,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
rq.syncHandler = rq.syncResourceQuotaFromKey
// build the controller that observes quota
rq.rqIndexer, rq.rqController = framework.NewIndexerInformer(
rq.rqIndexer, rq.rqController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options)
@ -103,7 +102,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
},
&api.ResourceQuota{},
rq.resyncPeriod(),
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: rq.addQuota,
UpdateFunc: func(old, cur interface{}) {
// We are only interested in observing updates to quota.spec to drive updates to quota.status.

View File

@ -80,7 +80,7 @@ func (rc *RouteController) reconcileNodeRoutes() error {
if err != nil {
return fmt.Errorf("error listing routes: %v", err)
}
// TODO (cjcullen): use pkg/controller/framework.NewInformer to watch this
// TODO (cjcullen): use pkg/controller/cache.NewInformer to watch this
// and reduce the number of lists needed.
nodeList, err := rc.kubeClient.Core().Nodes().List(api.ListOptions{})
if err != nil {

View File

@ -33,7 +33,6 @@ import (
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
@ -88,7 +87,7 @@ type ServiceController struct {
// A store of services, populated by the serviceController
serviceStore cache.StoreToServiceLister
// Watches changes to all services
serviceController *framework.Controller
serviceController *cache.Controller
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
nodeLister cache.StoreToNodeLister
@ -120,7 +119,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
},
workingQueue: workqueue.NewDelayingQueue(),
}
s.serviceStore.Store, s.serviceController = framework.NewInformer(
s.serviceStore.Store, s.serviceController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return s.kubeClient.Core().Services(api.NamespaceAll).List(options)
@ -131,7 +130,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
},
&api.Service{},
serviceSyncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: s.enqueueService,
UpdateFunc: func(old, cur interface{}) {
oldSvc, ok1 := old.(*api.Service)

View File

@ -26,7 +26,6 @@ import (
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
@ -80,7 +79,7 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
// If we're maintaining a single account, we can scope the accounts we watch to just that name
accountSelector = fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ServiceAccounts[0].Name})
}
e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
e.serviceAccounts, e.serviceAccountController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = accountSelector
@ -93,13 +92,13 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
},
&api.ServiceAccount{},
options.ServiceAccountResync,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
DeleteFunc: e.serviceAccountDeleted,
},
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
)
e.namespaces, e.namespaceController = framework.NewIndexerInformer(
e.namespaces, e.namespaceController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().Namespaces().List(options)
@ -110,7 +109,7 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
},
&api.Namespace{},
options.NamespaceResync,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: e.namespaceAdded,
UpdateFunc: e.namespaceUpdated,
},
@ -131,8 +130,8 @@ type ServiceAccountsController struct {
namespaces cache.Indexer
// Since we join two objects, we'll watch both of them with controllers.
serviceAccountController *framework.Controller
namespaceController *framework.Controller
serviceAccountController *cache.Controller
namespaceController *cache.Controller
}
// Runs controller loops and returns immediately

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/registry/secret"
"k8s.io/kubernetes/pkg/runtime"
@ -90,7 +89,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter())
}
e.serviceAccounts, e.serviceAccountController = framework.NewInformer(
e.serviceAccounts, e.serviceAccountController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
@ -101,7 +100,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
},
&api.ServiceAccount{},
options.ServiceAccountResync,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: e.queueServiceAccountSync,
UpdateFunc: e.queueServiceAccountUpdateSync,
DeleteFunc: e.queueServiceAccountSync,
@ -109,7 +108,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
)
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
e.secrets, e.secretController = framework.NewIndexerInformer(
e.secrets, e.secretController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = tokenSelector
@ -122,7 +121,7 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
},
&api.Secret{},
options.SecretResync,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: e.queueSecretSync,
UpdateFunc: e.queueSecretUpdateSync,
DeleteFunc: e.queueSecretSync,
@ -144,8 +143,8 @@ type TokensController struct {
secrets cache.Indexer
// Since we join two objects, we'll watch both of them with controllers.
serviceAccountController *framework.Controller
secretController *framework.Controller
serviceAccountController *cache.Controller
secretController *cache.Controller
// syncServiceAccountQueue handles service account events:
// * ensures a referenced token exists for service accounts which still exist

View File

@ -25,10 +25,10 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler"
@ -66,10 +66,10 @@ type AttachDetachController interface {
// NewAttachDetachController returns a new instance of AttachDetachController.
func NewAttachDetachController(
kubeClient internalclientset.Interface,
podInformer framework.SharedInformer,
nodeInformer framework.SharedInformer,
pvcInformer framework.SharedInformer,
pvInformer framework.SharedInformer,
podInformer kcache.SharedInformer,
nodeInformer kcache.SharedInformer,
pvcInformer kcache.SharedInformer,
pvInformer kcache.SharedInformer,
cloud cloudprovider.Interface,
plugins []volume.VolumePlugin,
recorder record.EventRecorder) (AttachDetachController, error) {
@ -94,13 +94,13 @@ func NewAttachDetachController(
cloud: cloud,
}
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
podInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: adc.podAdd,
UpdateFunc: adc.podUpdate,
DeleteFunc: adc.podDelete,
})
nodeInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
nodeInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: adc.nodeAdd,
UpdateFunc: adc.nodeUpdate,
DeleteFunc: adc.nodeDelete,
@ -143,12 +143,12 @@ type attachDetachController struct {
// pvcInformer is the shared PVC informer used to fetch and store PVC
// objects from the API server. It is shared with other controllers and
// therefore the PVC objects in its store should be treated as immutable.
pvcInformer framework.SharedInformer
pvcInformer kcache.SharedInformer
// pvInformer is the shared PV informer used to fetch and store PV objects
// from the API server. It is shared with other controllers and therefore
// the PV objects in its store should be treated as immutable.
pvInformer framework.SharedInformer
pvInformer kcache.SharedInformer
// cloud provider used by volume host
cloud cloudprovider.Interface

View File

@ -21,7 +21,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
)

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kubernetes/pkg/api"
kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
@ -48,7 +47,7 @@ type DesiredStateOfWorldPopulator interface {
// desiredStateOfWorld - the cache to populate
func NewDesiredStateOfWorldPopulator(
loopSleepDuration time.Duration,
podInformer framework.SharedInformer,
podInformer kcache.SharedInformer,
desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator {
return &desiredStateOfWorldPopulator{
loopSleepDuration: loopSleepDuration,
@ -59,7 +58,7 @@ func NewDesiredStateOfWorldPopulator(
type desiredStateOfWorldPopulator struct {
loopSleepDuration time.Duration
podInformer framework.SharedInformer
podInformer kcache.SharedInformer
desiredStateOfWorld cache.DesiredStateOfWorld
}

View File

@ -22,7 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"

View File

@ -25,8 +25,8 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/util/strategicpatch"
)
@ -42,7 +42,7 @@ type NodeStatusUpdater interface {
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
func NewNodeStatusUpdater(
kubeClient internalclientset.Interface,
nodeInformer framework.SharedInformer,
nodeInformer kcache.SharedInformer,
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
return &nodeStatusUpdater{
actualStateOfWorld: actualStateOfWorld,
@ -53,7 +53,7 @@ func NewNodeStatusUpdater(
type nodeStatusUpdater struct {
kubeClient internalclientset.Interface
nodeInformer framework.SharedInformer
nodeInformer kcache.SharedInformer
actualStateOfWorld cache.ActualStateOfWorld
}

View File

@ -29,7 +29,6 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/util/goroutinemap"
vol "k8s.io/kubernetes/pkg/volume"
@ -151,12 +150,12 @@ const createProvisionedPVInterval = 10 * time.Second
// PersistentVolumeController is a controller that synchronizes
// PersistentVolumeClaims and PersistentVolumes. It starts two
// framework.Controllers that watch PersistentVolume and PersistentVolumeClaim
// cache.Controllers that watch PersistentVolume and PersistentVolumeClaim
// changes.
type PersistentVolumeController struct {
volumeController *framework.Controller
volumeController *cache.Controller
volumeSource cache.ListerWatcher
claimController *framework.Controller
claimController *cache.Controller
claimSource cache.ListerWatcher
classReflector *cache.Reflector
classSource cache.ListerWatcher
@ -192,7 +191,7 @@ type PersistentVolumeController struct {
}
// syncClaim is the main controller method to decide what to do with a claim.
// It's invoked by appropriate framework.Controller callbacks when a claim is
// It's invoked by appropriate cache.Controller callbacks when a claim is
// created, updated or periodically synced. We do not differentiate between
// these events.
// For easier readability, it was split into syncUnboundClaim and syncBoundClaim
@ -382,7 +381,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu
}
// syncVolume is the main controller method to decide what to do with a volume.
// It's invoked by appropriate framework.Controller callbacks when a volume is
// It's invoked by appropriate cache.Controller callbacks when a volume is
// created, updated or periodically synced. We do not differentiate between
// these events.
func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error {

View File

@ -30,7 +30,6 @@ import (
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/goroutinemap"
@ -65,7 +64,7 @@ func NewPersistentVolumeController(
controller := &PersistentVolumeController{
volumes: newPersistentVolumeOrderedIndex(),
claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc),
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
kubeClient: kubeClient,
eventRecorder: eventRecorder,
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
@ -120,22 +119,22 @@ func NewPersistentVolumeController(
}
controller.classSource = classSource
_, controller.volumeController = framework.NewIndexerInformer(
_, controller.volumeController = cache.NewIndexerInformer(
volumeSource,
&api.PersistentVolume{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: controller.addVolume,
UpdateFunc: controller.updateVolume,
DeleteFunc: controller.deleteVolume,
},
cache.Indexers{"accessmodes": accessModesIndexFunc},
)
_, controller.claimController = framework.NewInformer(
_, controller.claimController = cache.NewInformer(
claimSource,
&api.PersistentVolumeClaim{},
syncPeriod,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: controller.addClaim,
UpdateFunc: controller.updateClaim,
DeleteFunc: controller.deleteClaim,
@ -144,7 +143,7 @@ func NewPersistentVolumeController(
// This is just a cache of StorageClass instances, no special actions are
// needed when a class is created/deleted/updated.
controller.classes = cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
controller.classes = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
controller.classReflector = cache.NewReflector(
classSource,
&storage.StorageClass{},
@ -212,7 +211,7 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim *api.PersistentVo
return storeObjectUpdate(ctrl.claims, claim, "claim")
}
// addVolume is callback from framework.Controller watching PersistentVolume
// addVolume is callback from cache.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
pv, ok := obj.(*api.PersistentVolume)
@ -247,7 +246,7 @@ func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
}
}
// updateVolume is callback from framework.Controller watching PersistentVolume
// updateVolume is callback from cache.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
newVolume, ok := newObj.(*api.PersistentVolume)
@ -282,7 +281,7 @@ func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{})
}
}
// deleteVolume is callback from framework.Controller watching PersistentVolume
// deleteVolume is callback from cache.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
_ = ctrl.volumes.store.Delete(obj)
@ -330,7 +329,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
}
}
// addClaim is callback from framework.Controller watching PersistentVolumeClaim
// addClaim is callback from cache.Controller watching PersistentVolumeClaim
// events.
func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
// Store the new claim version in the cache and do not process it if this is
@ -360,7 +359,7 @@ func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
}
}
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim
// updateClaim is callback from cache.Controller watching PersistentVolumeClaim
// events.
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
// Store the new claim version in the cache and do not process it if this is
@ -390,7 +389,7 @@ func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{})
}
}
// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim
// deleteClaim is callback from cache.Controller watching PersistentVolumeClaim
// events.
func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) {
_ = ctrl.claims.Delete(obj)

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/controller/framework"
fcache "k8s.io/kubernetes/pkg/client/testing/cache"
)
// Test the real controller methods (add/update/delete claim/volume) with
@ -161,8 +161,8 @@ func TestControllerSync(t *testing.T) {
// Initialize the controller
client := &fake.Clientset{}
volumeSource := framework.NewFakePVControllerSource()
claimSource := framework.NewFakePVCControllerSource()
volumeSource := fcache.NewFakePVControllerSource()
claimSource := fcache.NewFakePVCControllerSource()
ctrl := newTestController(client, volumeSource, claimSource, nil, true)
reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors)
for _, claim := range test.initialClaims {
@ -247,7 +247,7 @@ func storeVersion(t *testing.T, prefix string, c cache.Store, version string, ex
// TestControllerCache tests func storeObjectUpdate()
func TestControllerCache(t *testing.T) {
// Cache under test
c := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// Store new PV
storeVersion(t, "Step1", c, "1", true)
@ -264,7 +264,7 @@ func TestControllerCache(t *testing.T) {
}
func TestControllerCacheParsingError(t *testing.T) {
c := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// There must be something in the cache to compare with
storeVersion(t, "Step1", c, "1", true)

View File

@ -38,8 +38,8 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/record"
fcache "k8s.io/kubernetes/pkg/client/testing/cache"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
@ -125,8 +125,8 @@ type volumeReactor struct {
changedObjects []interface{}
changedSinceLastSync int
ctrl *PersistentVolumeController
volumeSource *framework.FakePVControllerSource
claimSource *framework.FakePVCControllerSource
volumeSource *fcache.FakePVControllerSource
claimSource *fcache.FakePVCControllerSource
lock sync.Mutex
errors []reactorError
}
@ -571,7 +571,7 @@ func (r *volumeReactor) addClaimEvent(claim *api.PersistentVolumeClaim) {
r.claimSource.Add(claim)
}
func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource *framework.FakePVControllerSource, claimSource *framework.FakePVCControllerSource, errors []reactorError) *volumeReactor {
func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource *fcache.FakePVControllerSource, claimSource *fcache.FakePVCControllerSource, errors []reactorError) *volumeReactor {
reactor := &volumeReactor{
volumes: make(map[string]*api.PersistentVolume),
claims: make(map[string]*api.PersistentVolumeClaim),
@ -586,13 +586,13 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController,
func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController {
if volumeSource == nil {
volumeSource = framework.NewFakePVControllerSource()
volumeSource = fcache.NewFakePVControllerSource()
}
if claimSource == nil {
claimSource = framework.NewFakePVCControllerSource()
claimSource = fcache.NewFakePVCControllerSource()
}
if classSource == nil {
classSource = framework.NewFakeControllerSource()
classSource = fcache.NewFakeControllerSource()
}
ctrl := NewPersistentVolumeController(
kubeClient,

View File

@ -33,7 +33,6 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
kcache "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
kframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/validation"
"k8s.io/kubernetes/pkg/util/wait"
@ -112,10 +111,10 @@ type KubeDNS struct {
domainPath []string
// endpointsController invokes registered callbacks when endpoints change.
endpointsController *kframework.Controller
endpointsController *kcache.Controller
// serviceController invokes registered callbacks when services change.
serviceController *kframework.Controller
serviceController *kcache.Controller
// Map of federation names that the cluster in which this kube-dns is running belongs to, to
// the corresponding domain names.
@ -188,7 +187,7 @@ func (kd *KubeDNS) GetCacheAsJSON() (string, error) {
func (kd *KubeDNS) setServicesStore() {
// Returns a cache.ListWatch that gets all changes to services.
kd.servicesStore, kd.serviceController = kframework.NewInformer(
kd.servicesStore, kd.serviceController = kcache.NewInformer(
&kcache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return kd.kubeClient.Core().Services(kapi.NamespaceAll).List(options)
@ -199,7 +198,7 @@ func (kd *KubeDNS) setServicesStore() {
},
&kapi.Service{},
resyncPeriod,
kframework.ResourceEventHandlerFuncs{
kcache.ResourceEventHandlerFuncs{
AddFunc: kd.newService,
DeleteFunc: kd.removeService,
UpdateFunc: kd.updateService,
@ -209,7 +208,7 @@ func (kd *KubeDNS) setServicesStore() {
func (kd *KubeDNS) setEndpointsStore() {
// Returns a cache.ListWatch that gets all changes to endpoints.
kd.endpointsStore, kd.endpointsController = kframework.NewInformer(
kd.endpointsStore, kd.endpointsController = kcache.NewInformer(
&kcache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return kd.kubeClient.Core().Endpoints(kapi.NamespaceAll).List(options)
@ -220,7 +219,7 @@ func (kd *KubeDNS) setEndpointsStore() {
},
&kapi.Endpoints{},
resyncPeriod,
kframework.ResourceEventHandlerFuncs{
kcache.ResourceEventHandlerFuncs{
AddFunc: kd.handleEndpointAdd,
UpdateFunc: func(oldObj, newObj interface{}) {
// TODO: Avoid unwanted updates.

View File

@ -21,13 +21,13 @@ import (
"testing"
"time"
"github.com/hashicorp/golang-lru"
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"github.com/hashicorp/golang-lru"
)
func getResourceList(cpu, memory string) api.ResourceList {

View File

@ -19,6 +19,7 @@ package autoprovision
import (
"io"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"fmt"
@ -26,8 +27,7 @@ import (
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
)
func init() {
@ -42,7 +42,7 @@ func init() {
type provision struct {
*admission.Handler
client clientset.Interface
namespaceInformer framework.SharedIndexInformer
namespaceInformer cache.SharedIndexInformer
}
var _ = admission.WantsInformerFactory(&provision{})

View File

@ -28,7 +28,7 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
)

View File

@ -19,6 +19,7 @@ package exists
import (
"io"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"fmt"
@ -26,8 +27,7 @@ import (
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
)
func init() {
@ -42,7 +42,7 @@ func init() {
type exists struct {
*admission.Handler
client clientset.Interface
namespaceInformer framework.SharedIndexInformer
namespaceInformer cache.SharedIndexInformer
}
var _ = admission.WantsInformerFactory(&exists{})

View File

@ -27,7 +27,7 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
)

View File

@ -23,9 +23,9 @@ import (
lru "github.com/hashicorp/golang-lru"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
@ -52,7 +52,7 @@ type lifecycle struct {
*admission.Handler
client clientset.Interface
immortalNamespaces sets.String
namespaceInformer framework.SharedIndexInformer
namespaceInformer cache.SharedIndexInformer
// forceLiveLookupCache holds a list of entries for namespaces that we have a strong reason to believe are stale in our local cache.
// if a namespace is in this cache, then we will ignore our local state and always fetch latest from api server.
forceLiveLookupCache *lru.Cache

View File

@ -27,7 +27,7 @@ import (
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"

View File

@ -18,6 +18,7 @@ package predicates
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
)

View File

@ -29,7 +29,6 @@ import (
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/runtime"
@ -77,8 +76,8 @@ type ConfigFactory struct {
// Close this to stop all reflectors
StopEverything chan struct{}
scheduledPodPopulator *framework.Controller
nodePopulator *framework.Controller
scheduledPodPopulator *cache.Controller
nodePopulator *cache.Controller
schedulerCache schedulercache.Cache
@ -125,11 +124,11 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
// We construct this here instead of in CreateFromKeys because
// ScheduledPodLister is something we provide to plug in functions that
// they may need to call.
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = framework.NewIndexerInformer(
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = cache.NewIndexerInformer(
c.createAssignedNonTerminatedPodLW(),
&api.Pod{},
0,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: c.addPodToCache,
UpdateFunc: c.updatePodInCache,
DeleteFunc: c.deletePodFromCache,
@ -137,11 +136,11 @@ func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffini
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
c.NodeLister.Store, c.nodePopulator = framework.NewInformer(
c.NodeLister.Store, c.nodePopulator = cache.NewInformer(
c.createNodeLW(),
&api.Node{},
0,
framework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: c.addNodeToCache,
UpdateFunc: c.updateNodeInCache,
DeleteFunc: c.deleteNodeFromCache,

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/runtime"
@ -192,7 +191,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
var ns string
var config framework.RCConfig
var controller *controllerframework.Controller
var controller *cache.Controller
var newPods cache.Store
var stopCh chan struct{}
var tracker *podTracker
@ -217,7 +216,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
stopCh = make(chan struct{})
tracker = newPodTracker()
newPods, controller = controllerframework.NewInformer(
newPods, controller = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelSelector
@ -230,7 +229,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
},
&api.Pod{},
0,
controllerframework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
tracker.remember(obj.(*api.Pod), ADD)
},

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@ -188,7 +187,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
// eLock is a lock protects the events
var eLock sync.Mutex
events := make([](*api.Event), 0)
_, controller := controllerframework.NewInformer(
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dtc.Client.Events(dtc.Namespace).List(options)
@ -199,7 +198,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
},
&api.Event{},
0,
controllerframework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
eLock.Lock()
defer eLock.Unlock()
@ -215,7 +214,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
var uLock sync.Mutex
updateCount := 0
label := labels.SelectorFromSet(labels.Set(map[string]string{"type": "densityPod"}))
_, updateController := controllerframework.NewInformer(
_, updateController := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label
@ -228,7 +227,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
},
&api.Pod{},
0,
controllerframework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, _ interface{}) {
uLock.Lock()
defer uLock.Unlock()
@ -533,7 +532,7 @@ var _ = framework.KubeDescribe("Density", func() {
}
additionalPodsPrefix = "density-latency-pod"
latencyPodsStore, controller := controllerframework.NewInformer(
latencyPodsStore, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
@ -546,7 +545,7 @@ var _ = framework.KubeDescribe("Density", func() {
},
&api.Pod{},
0,
controllerframework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod)
Expect(ok).To(Equal(true))

View File

@ -18,14 +18,15 @@ package e2e
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
"os"
"reflect"
"strconv"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)

View File

@ -29,10 +29,11 @@ import (
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
"reflect"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api/errors"
"reflect"
)
const (

View File

@ -37,7 +37,6 @@ import (
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/client/cache"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
@ -580,8 +579,8 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
stopCh := make(chan struct{})
newNode := make(chan *api.Node)
var controller *controllerframework.Controller
_, controller = controllerframework.NewInformer(
var controller *cache.Controller
_, controller = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector
@ -594,7 +593,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
},
&api.Node{},
0,
controllerframework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
n, ok := newObj.(*api.Node)
Expect(ok).To(Equal(true))

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/sets"
@ -275,7 +274,7 @@ func (eq *endpointQueries) added(e *api.Endpoints) {
// blocks until it has finished syncing.
func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
_, controller := controllerframework.NewInformer(
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return f.Client.Endpoints(f.Namespace.Name).List(options)
@ -286,7 +285,7 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
},
&api.Endpoints{},
0,
controllerframework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if e, ok := obj.(*api.Endpoints); ok {
if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache"
controllerframework "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/labels"
@ -478,7 +477,7 @@ func verifyPodStartupLatency(expect, actual framework.LatencyMetric) error {
// newInformerWatchPod creates an informer to check whether all pods are running.
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]unversioned.Time,
podType string) *controllerframework.Controller {
podType string) *cache.Controller {
ns := f.Namespace.Name
checkPodRunning := func(p *api.Pod) {
mutex.Lock()
@ -492,7 +491,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
}
}
_, controller := controllerframework.NewInformer(
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType})
@ -505,7 +504,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m
},
&api.Pod{},
0,
controllerframework.ResourceEventHandlerFuncs{
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod)
Expect(ok).To(Equal(true))

View File

@ -17,6 +17,8 @@ limitations under the License.
package objectmeta
import (
"testing"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/api"
@ -27,7 +29,6 @@ import (
etcdstorage "k8s.io/kubernetes/pkg/storage/etcd"
"k8s.io/kubernetes/pkg/storage/etcd/etcdtest"
"k8s.io/kubernetes/test/integration/framework"
"testing"
)
func TestIgnoreClusterName(t *testing.T) {

View File

@ -29,11 +29,11 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
"k8s.io/kubernetes/pkg/client/restclient"
controllerframwork "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration/framework"
@ -127,7 +127,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
return ret, nil
}
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, controllerframwork.SharedIndexInformer, clientset.Interface) {
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.EnableCoreControllers = false
_, s := framework.RunAMaster(masterConfig)
@ -160,7 +160,7 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
// wait for the podInformer to observe the pods. Call this function before
// running the RS controller to prevent the rc manager from creating new pods
// rather than adopting the existing ones.
func waitToObservePods(t *testing.T, podInformer controllerframwork.SharedIndexInformer, podNum int) {
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
if len(objects) == podNum {

View File

@ -28,11 +28,11 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
"k8s.io/kubernetes/pkg/client/restclient"
controllerframwork "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/framework/informers"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/integration/framework"
@ -124,7 +124,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
return ret, nil
}
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, controllerframwork.SharedIndexInformer, clientset.Interface) {
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.EnableCoreControllers = false
_, s := framework.RunAMaster(masterConfig)
@ -157,7 +157,7 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
// wait for the podInformer to observe the pods. Call this function before
// running the RC manager to prevent the rc manager from creating new pods
// rather than adopting the existing ones.
func waitToObservePods(t *testing.T, podInformer controllerframwork.SharedIndexInformer, podNum int) {
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
if len(objects) == podNum {

View File

@ -593,7 +593,6 @@ k8s.io/kubernetes/pkg/controller/deployment,asalkeld,0
k8s.io/kubernetes/pkg/controller/deployment/util,saad-ali,1
k8s.io/kubernetes/pkg/controller/disruption,fabioy,1
k8s.io/kubernetes/pkg/controller/endpoint,mwielgus,1
k8s.io/kubernetes/pkg/controller/framework,smarterclayton,1
k8s.io/kubernetes/pkg/controller/garbagecollector,rmmh,1
k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly,cjcullen,1
k8s.io/kubernetes/pkg/controller/gc,jdef,1

1 name owner auto-assigned
593 k8s.io/kubernetes/pkg/controller/deployment/util saad-ali 1
594 k8s.io/kubernetes/pkg/controller/disruption fabioy 1
595 k8s.io/kubernetes/pkg/controller/endpoint mwielgus 1
k8s.io/kubernetes/pkg/controller/framework smarterclayton 1
596 k8s.io/kubernetes/pkg/controller/garbagecollector rmmh 1
597 k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly cjcullen 1
598 k8s.io/kubernetes/pkg/controller/gc jdef 1