Merge pull request #37238 from deads2k/controller-02-minor-fixes

Automatic merge from submit-queue

controller manager refactors

The controller manager needs some significant cleanup.  This starts us down the patch by respecting parameters like `stopCh`, simplifying discovery checks, removing unnecessary parameters, preventing unncessary fatals, and using our client builder.

@sttts @ncdc
This commit is contained in:
Kubernetes Submit Queue 2016-11-30 20:08:19 -08:00 committed by GitHub
commit 66fe55f5ad
6 changed files with 183 additions and 203 deletions

View File

@ -20,7 +20,6 @@ go_library(
deps = [ deps = [
"//cmd/kube-controller-manager/app/options:go_default_library", "//cmd/kube-controller-manager/app/options:go_default_library",
"//pkg/api:go_default_library", "//pkg/api:go_default_library",
"//pkg/api/unversioned:go_default_library",
"//pkg/api/v1:go_default_library", "//pkg/api/v1:go_default_library",
"//pkg/apimachinery/registered:go_default_library", "//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/batch:go_default_library", "//pkg/apis/batch:go_default_library",
@ -31,6 +30,7 @@ go_library(
"//pkg/client/leaderelection/resourcelock:go_default_library", "//pkg/client/leaderelection/resourcelock:go_default_library",
"//pkg/client/record:go_default_library", "//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library", "//pkg/client/restclient:go_default_library",
"//pkg/client/typed/discovery:go_default_library",
"//pkg/client/typed/dynamic:go_default_library", "//pkg/client/typed/dynamic:go_default_library",
"//pkg/client/unversioned/clientcmd:go_default_library", "//pkg/client/unversioned/clientcmd:go_default_library",
"//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider:go_default_library",

View File

@ -33,7 +33,6 @@ import (
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options" "k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
@ -43,6 +42,7 @@ import (
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
@ -179,7 +179,7 @@ func Run(s *options.CMServer) error {
clientBuilder = rootClientBuilder clientBuilder = rootClientBuilder
} }
err := StartControllers(s, kubeconfig, rootClientBuilder, clientBuilder, stop, recorder) err := StartControllers(s, rootClientBuilder, clientBuilder, stop)
glog.Fatalf("error running controllers: %v", err) glog.Fatalf("error running controllers: %v", err)
panic("unreachable") panic("unreachable")
} }
@ -222,18 +222,54 @@ func Run(s *options.CMServer) error {
panic("unreachable") panic("unreachable")
} }
func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder) error { // TODO: In general, any controller checking this needs to be dynamic so
client := func(serviceAccountName string) clientset.Interface { // users don't have to restart their controller manager if they change the apiserver.
return rootClientBuilder.ClientOrDie(serviceAccountName) func getAvailableResources(clientBuilder controller.ControllerClientBuilder) (map[schema.GroupVersionResource]bool, error) {
var discoveryClient discovery.DiscoveryInterface
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
client, err := clientBuilder.Client("controller-discovery")
if err != nil {
glog.Errorf("Failed to get api versions from server: %v", err)
return false, nil
}
discoveryClient = client.Discovery()
return true, nil
})
if err != nil {
return nil, fmt.Errorf("failed to get api versions from server: %v", err)
} }
discoveryClient := client("controller-discovery").Discovery()
sharedInformers := informers.NewSharedInformerFactory(client("shared-informers"), nil, ResyncPeriod(s)()) resourceMap, err := discoveryClient.ServerResources()
if err != nil {
return nil, fmt.Errorf("failed to get supported resources from server: %v", err)
}
allResources := map[schema.GroupVersionResource]bool{}
for _, apiResourceList := range resourceMap {
version, err := schema.ParseGroupVersion(apiResourceList.GroupVersion)
if err != nil {
return nil, err
}
for _, apiResource := range apiResourceList.APIResources {
allResources[version.WithResource(apiResource.Name)] = true
}
}
return allResources, nil
}
func StartControllers(s *options.CMServer, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) error {
sharedInformers := informers.NewSharedInformerFactory(rootClientBuilder.ClientOrDie("shared-informers"), nil, ResyncPeriod(s)())
// always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest // always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest
if len(s.ServiceAccountKeyFile) > 0 { if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile) privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil { if err != nil {
return fmt.Errorf("Error reading key for service account token controller: %v", err) return fmt.Errorf("error reading key for service account token controller: %v", err)
} else { } else {
var rootCA []byte var rootCA []byte
if s.RootCAFile != "" { if s.RootCAFile != "" {
@ -245,7 +281,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err) return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
} }
} else { } else {
rootCA = kubeconfig.CAData rootCA = rootClientBuilder.ConfigOrDie("tokens-controller").CAData
} }
go serviceaccountcontroller.NewTokensController( go serviceaccountcontroller.NewTokensController(
@ -254,13 +290,18 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey), TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA, RootCA: rootCA,
}, },
).Run(int(s.ConcurrentSATokenSyncs), wait.NeverStop) ).Run(int(s.ConcurrentSATokenSyncs), stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
} }
} }
go endpointcontroller.NewEndpointController(sharedInformers.Pods().Informer(), client("endpoint-controller")). availableResources, err := getAvailableResources(clientBuilder)
Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop) if err != nil {
return err
}
go endpointcontroller.NewEndpointController(sharedInformers.Pods().Informer(), clientBuilder.ClientOrDie("endpoint-controller")).
Run(int(s.ConcurrentEndpointSyncs), stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
go replicationcontroller.NewReplicationManager( go replicationcontroller.NewReplicationManager(
@ -270,16 +311,16 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
replicationcontroller.BurstReplicas, replicationcontroller.BurstReplicas,
int(s.LookupCacheSizeForRC), int(s.LookupCacheSizeForRC),
s.EnableGarbageCollector, s.EnableGarbageCollector,
).Run(int(s.ConcurrentRCSyncs), wait.NeverStop) ).Run(int(s.ConcurrentRCSyncs), stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
go podgc.NewPodGC(client("pod-garbage-collector"), sharedInformers.Pods().Informer(), go podgc.NewPodGC(clientBuilder.ClientOrDie("pod-garbage-collector"), sharedInformers.Pods().Informer(),
int(s.TerminatedPodGCThreshold)).Run(wait.NeverStop) int(s.TerminatedPodGCThreshold)).Run(stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil { if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err) return fmt.Errorf("cloud provider could not be initialized: %v", err)
} }
_, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR) _, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR)
@ -292,17 +333,17 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
} }
nodeController, err := nodecontroller.NewNodeController( nodeController, err := nodecontroller.NewNodeController(
sharedInformers.Pods(), sharedInformers.Nodes(), sharedInformers.DaemonSets(), sharedInformers.Pods(), sharedInformers.Nodes(), sharedInformers.DaemonSets(),
cloud, client("node-controller"), cloud, clientBuilder.ClientOrDie("node-controller"),
s.PodEvictionTimeout.Duration, s.NodeEvictionRate, s.SecondaryNodeEvictionRate, s.LargeClusterSizeThreshold, s.UnhealthyZoneThreshold, s.NodeMonitorGracePeriod.Duration, s.PodEvictionTimeout.Duration, s.NodeEvictionRate, s.SecondaryNodeEvictionRate, s.LargeClusterSizeThreshold, s.UnhealthyZoneThreshold, s.NodeMonitorGracePeriod.Duration,
s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR,
int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs) int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
if err != nil { if err != nil {
glog.Fatalf("Failed to initialize nodecontroller: %v", err) return fmt.Errorf("failed to initialize nodecontroller: %v", err)
} }
nodeController.Run() nodeController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
serviceController, err := servicecontroller.New(cloud, client("service-controller"), s.ClusterName) serviceController, err := servicecontroller.New(cloud, clientBuilder.ClientOrDie("service-controller"), s.ClusterName)
if err != nil { if err != nil {
glog.Errorf("Failed to start service controller: %v", err) glog.Errorf("Failed to start service controller: %v", err)
} else { } else {
@ -316,7 +357,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
} else if routes, ok := cloud.Routes(); !ok { } else if routes, ok := cloud.Routes(); !ok {
glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
} else { } else {
routeController := routecontroller.New(routes, client("route-controller"), s.ClusterName, clusterCIDR) routeController := routecontroller.New(routes, clientBuilder.ClientOrDie("route-controller"), s.ClusterName, clusterCIDR)
routeController.Run(s.RouteReconciliationPeriod.Duration) routeController.Run(s.RouteReconciliationPeriod.Duration)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
} }
@ -324,7 +365,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", s.AllocateNodeCIDRs, s.ConfigureCloudRoutes) glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", s.AllocateNodeCIDRs, s.ConfigureCloudRoutes)
} }
resourceQuotaControllerClient := client("resourcequota-controller") resourceQuotaControllerClient := clientBuilder.ClientOrDie("resourcequota-controller")
resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient, sharedInformers) resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient, sharedInformers)
groupKindsToReplenish := []schema.GroupKind{ groupKindsToReplenish := []schema.GroupKind{
api.Kind("Pod"), api.Kind("Pod"),
@ -342,40 +383,20 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
ReplenishmentResyncPeriod: ResyncPeriod(s), ReplenishmentResyncPeriod: ResyncPeriod(s),
GroupKindsToReplenish: groupKindsToReplenish, GroupKindsToReplenish: groupKindsToReplenish,
} }
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop) go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
var versionStrings []string
err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
if versionStrings, err = restclient.ServerAPIVersions(kubeconfig); err == nil {
return true, nil
}
glog.Errorf("Failed to get api versions from server: %v", err)
return false, nil
})
if err != nil {
glog.Fatalf("Failed to get api versions from server: %v", err)
}
versions := &unversioned.APIVersions{Versions: versionStrings}
resourceMap, err := discoveryClient.ServerResources()
if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
// TODO: should use a dynamic RESTMapper built from the discovery results. // TODO: should use a dynamic RESTMapper built from the discovery results.
restMapper := registered.RESTMapper() restMapper := registered.RESTMapper()
// Find the list of namespaced resources via discovery that the namespace controller must manage // Find the list of namespaced resources via discovery that the namespace controller must manage
namespaceKubeClient := client("namespace-controller") namespaceKubeClient := clientBuilder.ClientOrDie("namespace-controller")
namespaceClientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "namespace-controller"), restMapper, dynamic.LegacyAPIPathResolverFunc) namespaceClientPool := dynamic.NewClientPool(rootClientBuilder.ConfigOrDie("namespace-controller"), restMapper, dynamic.LegacyAPIPathResolverFunc)
// TODO: consider using a list-watch + cache here rather than polling // TODO: consider using a list-watch + cache here rather than polling
var gvrFn func() ([]schema.GroupVersionResource, error) var gvrFn func() ([]schema.GroupVersionResource, error)
rsrcs, err := namespaceKubeClient.Discovery().ServerResources() rsrcs, err := namespaceKubeClient.Discovery().ServerResources()
if err != nil { if err != nil {
glog.Fatalf("Failed to get group version resources: %v", err) return fmt.Errorf("failed to get group version resources: %v", err)
} }
for _, rsrcList := range rsrcs { for _, rsrcList := range rsrcs {
for ix := range rsrcList.APIResources { for ix := range rsrcList.APIResources {
@ -388,125 +409,91 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
if gvrFn == nil { if gvrFn == nil {
gvr, err := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources() gvr, err := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources()
if err != nil { if err != nil {
glog.Fatalf("Failed to get resources: %v", err) return fmt.Errorf("failed to get resources: %v", err)
} }
gvrFn = func() ([]schema.GroupVersionResource, error) { gvrFn = func() ([]schema.GroupVersionResource, error) {
return gvr, nil return gvr, nil
} }
} }
namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, gvrFn, s.NamespaceSyncPeriod.Duration, v1.FinalizerKubernetes) namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, gvrFn, s.NamespaceSyncPeriod.Duration, v1.FinalizerKubernetes)
go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop) go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
groupVersion := "extensions/v1beta1" if availableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}] {
resources, found := resourceMap[groupVersion] go daemon.NewDaemonSetsController(sharedInformers.DaemonSets(), sharedInformers.Pods(), sharedInformers.Nodes(), clientBuilder.ClientOrDie("daemon-set-controller"), int(s.LookupCacheSizeForDaemonSet)).
// TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver Run(int(s.ConcurrentDaemonSetSyncs), stop)
if containsVersion(versions, groupVersion) && found { time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(sharedInformers.DaemonSets(), sharedInformers.Pods(), sharedInformers.Nodes(), client("daemon-set-controller"), int(s.LookupCacheSizeForDaemonSet)).
Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(sharedInformers.Pods().Informer(), sharedInformers.Jobs(), client("job-controller")).
Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(sharedInformers.Deployments(), sharedInformers.ReplicaSets(), sharedInformers.Pods(), client("deployment-controller")).
Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetController(sharedInformers.ReplicaSets(), sharedInformers.Pods(), client("replicaset-controller"), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS), s.EnableGarbageCollector).
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
} }
groupVersion = "autoscaling/v1" if availableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "jobs"}] {
resources, found = resourceMap[groupVersion] glog.Infof("Starting job controller")
glog.Infof("Attempting to start horizontal pod autoscaler controller, full resource map %+v", resourceMap) go job.NewJobController(sharedInformers.Pods().Informer(), sharedInformers.Jobs(), clientBuilder.ClientOrDie("job-controller")).
if containsVersion(versions, groupVersion) && found { Run(int(s.ConcurrentJobSyncs), stop)
glog.Infof("Starting %s apis", groupVersion) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if containsResource(resources, "horizontalpodautoscalers") {
glog.Infof("Starting horizontal pod controller.")
hpaClient := client("horizontal-pod-autoscaler")
metricsClient := metrics.NewHeapsterMetricsClient(
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
replicaCalc := podautoscaler.NewReplicaCalculator(metricsClient, hpaClient.Core())
go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient.Autoscaling(), replicaCalc, s.HorizontalPodAutoscalerSyncPeriod.Duration).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
} }
groupVersion = "policy/v1beta1" if availableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] {
resources, found = resourceMap[groupVersion] glog.Infof("Starting deployment controller")
glog.Infof("Attempting to start disruption controller, full resource map %+v", resourceMap) go deployment.NewDeploymentController(sharedInformers.Deployments(), sharedInformers.ReplicaSets(), sharedInformers.Pods(), clientBuilder.ClientOrDie("deployment-controller")).
if containsVersion(versions, groupVersion) && found { Run(int(s.ConcurrentDeploymentSyncs), stop)
glog.Infof("Starting %s apis", groupVersion) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if containsResource(resources, "poddisruptionbudgets") {
glog.Infof("Starting disruption controller")
go disruption.NewDisruptionController(sharedInformers.Pods().Informer(), client("disruption-controller")).Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
} }
groupVersion = "apps/v1beta1" if availableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"}] {
resources, found = resourceMap[groupVersion] glog.Infof("Starting ReplicaSet controller")
glog.Infof("Attempting to start statefulset, full resource map %+v", resourceMap) go replicaset.NewReplicaSetController(sharedInformers.ReplicaSets(), sharedInformers.Pods(), clientBuilder.ClientOrDie("replicaset-controller"), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS), s.EnableGarbageCollector).
if containsVersion(versions, groupVersion) && found { Run(int(s.ConcurrentRSSyncs), stop)
glog.Infof("Starting %s apis", groupVersion) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if containsResource(resources, "statefulsets") {
glog.Infof("Starting StatefulSet controller")
resyncPeriod := ResyncPeriod(s)()
go petset.NewStatefulSetController(
sharedInformers.Pods().Informer(),
client("statefulset-controller"),
resyncPeriod,
).Run(1, wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
} }
groupVersion = "batch/v2alpha1" if availableResources[schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"}] {
resources, found = resourceMap[groupVersion] glog.Infof("Starting horizontal pod autoscaler controller.")
if containsVersion(versions, groupVersion) && found { hpaClient := clientBuilder.ClientOrDie("horizontal-pod-autoscaler")
glog.Infof("Starting %s apis", groupVersion) metricsClient := metrics.NewHeapsterMetricsClient(
if containsResource(resources, "cronjobs") { hpaClient,
glog.Infof("Starting cronjob controller") metrics.DefaultHeapsterNamespace,
// // TODO: this is a temp fix for allowing kubeClient list v2alpha1 sj, should switch to using clientset metrics.DefaultHeapsterScheme,
kubeconfig.ContentConfig.GroupVersion = &schema.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"} metrics.DefaultHeapsterService,
go cronjob.NewCronJobController(client("cronjob-controller")). metrics.DefaultHeapsterPort,
Run(wait.NeverStop) )
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) replicaCalc := podautoscaler.NewReplicaCalculator(metricsClient, hpaClient.Core())
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient.Autoscaling(), replicaCalc, s.HorizontalPodAutoscalerSyncPeriod.Duration).
} Run(stop)
} else { time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
glog.Infof("Not starting %s apis", groupVersion) }
if availableResources[schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"}] {
glog.Infof("Starting disruption controller")
go disruption.NewDisruptionController(sharedInformers.Pods().Informer(), clientBuilder.ClientOrDie("disruption-controller")).Run(stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
if availableResources[schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"}] {
glog.Infof("Starting StatefulSet controller")
resyncPeriod := ResyncPeriod(s)()
go petset.NewStatefulSetController(
sharedInformers.Pods().Informer(),
clientBuilder.ClientOrDie("statefulset-controller"),
resyncPeriod,
).Run(1, stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
if availableResources[schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}] {
glog.Infof("Starting cronjob controller")
// TODO: this is a temp fix for allowing kubeClient list v2alpha1 sj, should switch to using clientset
cronjobConfig := rootClientBuilder.ConfigOrDie("cronjob-controller")
cronjobConfig.ContentConfig.GroupVersion = &schema.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"}
go cronjob.NewCronJobController(clientset.NewForConfigOrDie(cronjobConfig)).Run(stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
} }
alphaProvisioner, err := NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration) alphaProvisioner, err := NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil { if err != nil {
glog.Fatalf("An backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err) return fmt.Errorf("an backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
} }
params := persistentvolumecontroller.ControllerParameters{ params := persistentvolumecontroller.ControllerParameters{
KubeClient: client("persistent-volume-binder"), KubeClient: clientBuilder.ClientOrDie("persistent-volume-binder"),
SyncPeriod: s.PVClaimBinderSyncPeriod.Duration, SyncPeriod: s.PVClaimBinderSyncPeriod.Duration,
AlphaProvisioner: alphaProvisioner, AlphaProvisioner: alphaProvisioner,
VolumePlugins: ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration), VolumePlugins: ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
@ -515,64 +502,57 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
EnableDynamicProvisioning: s.VolumeConfiguration.EnableDynamicProvisioning, EnableDynamicProvisioning: s.VolumeConfiguration.EnableDynamicProvisioning,
} }
volumeController := persistentvolumecontroller.NewController(params) volumeController := persistentvolumecontroller.NewController(params)
volumeController.Run(wait.NeverStop) volumeController.Run(stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
attachDetachController, attachDetachControllerErr := attachDetachController, attachDetachControllerErr :=
attachdetach.NewAttachDetachController( attachdetach.NewAttachDetachController(
client("attachdetach-controller"), clientBuilder.ClientOrDie("attachdetach-controller"),
sharedInformers.Pods().Informer(), sharedInformers.Pods().Informer(),
sharedInformers.Nodes().Informer(), sharedInformers.Nodes().Informer(),
sharedInformers.PersistentVolumeClaims().Informer(), sharedInformers.PersistentVolumeClaims().Informer(),
sharedInformers.PersistentVolumes().Informer(), sharedInformers.PersistentVolumes().Informer(),
cloud, cloud,
ProbeAttachableVolumePlugins(s.VolumeConfiguration), ProbeAttachableVolumePlugins(s.VolumeConfiguration))
recorder)
if attachDetachControllerErr != nil { if attachDetachControllerErr != nil {
glog.Fatalf("Failed to start attach/detach controller: %v", attachDetachControllerErr) return fmt.Errorf("failed to start attach/detach controller: %v", attachDetachControllerErr)
} }
go attachDetachController.Run(wait.NeverStop) go attachDetachController.Run(stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
groupVersion = "certificates.k8s.io/v1alpha1" if availableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1alpha1", Resource: "certificatesigningrequests"}] {
resources, found = resourceMap[groupVersion] glog.Infof("Starting certificate request controller")
glog.Infof("Attempting to start certificates, full resource map %+v", resourceMap) resyncPeriod := ResyncPeriod(s)()
if containsVersion(versions, groupVersion) && found { certController, err := certcontroller.NewCertificateController(
glog.Infof("Starting %s apis", groupVersion) clientBuilder.ClientOrDie("certificate-controller"),
if containsResource(resources, "certificatesigningrequests") { resyncPeriod,
glog.Infof("Starting certificate request controller") s.ClusterSigningCertFile,
resyncPeriod := ResyncPeriod(s)() s.ClusterSigningKeyFile,
certController, err := certcontroller.NewCertificateController( s.ApproveAllKubeletCSRsForGroup,
client("certificate-controller"), )
resyncPeriod, if err != nil {
s.ClusterSigningCertFile, glog.Errorf("Failed to start certificate controller: %v", err)
s.ClusterSigningKeyFile, } else {
s.ApproveAllKubeletCSRsForGroup, go certController.Run(1, stop)
)
if err != nil {
glog.Errorf("Failed to start certificate controller: %v", err)
} else {
go certController.Run(1, wait.NeverStop)
}
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
} }
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
} }
go serviceaccountcontroller.NewServiceAccountsController( go serviceaccountcontroller.NewServiceAccountsController(
sharedInformers.ServiceAccounts(), sharedInformers.Namespaces(), sharedInformers.ServiceAccounts(), sharedInformers.Namespaces(),
client("service-account-controller"), clientBuilder.ClientOrDie("service-account-controller"),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(), serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run(1, stop) ).Run(1, stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if s.EnableGarbageCollector { if s.EnableGarbageCollector {
gcClientset := client("generic-garbage-collector") gcClientset := clientBuilder.ClientOrDie("generic-garbage-collector")
groupVersionResources, err := gcClientset.Discovery().ServerPreferredResources() groupVersionResources, err := gcClientset.Discovery().ServerPreferredResources()
if err != nil { if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err) return fmt.Errorf("failed to get supported resources from server: %v", err)
} }
config := restclient.AddUserAgent(kubeconfig, "generic-garbage-collector") config := rootClientBuilder.ConfigOrDie("generic-garbage-collector")
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()} config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc) metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
config.ContentConfig = dynamic.ContentConfig() config.ContentConfig = dynamic.ContentConfig()
@ -582,7 +562,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
glog.Errorf("Failed to start the generic garbage collector: %v", err) glog.Errorf("Failed to start the generic garbage collector: %v", err)
} else { } else {
workers := int(s.ConcurrentGCSyncs) workers := int(s.ConcurrentGCSyncs)
go garbageCollector.Run(workers, wait.NeverStop) go garbageCollector.Run(workers, stop)
} }
} }
@ -590,22 +570,3 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
select {} select {}
} }
func containsVersion(versions *unversioned.APIVersions, version string) bool {
for ix := range versions.Versions {
if versions.Versions[ix] == version {
return true
}
}
return false
}
func containsResource(resources *unversioned.APIResourceList, resourceName string) bool {
for ix := range resources.APIResources {
resource := resources.APIResources[ix]
if resource.Name == resourceName {
return true
}
}
return false
}

View File

@ -38,6 +38,7 @@ import (
// ControllerClientBuilder allow syou to get clients and configs for controllers // ControllerClientBuilder allow syou to get clients and configs for controllers
type ControllerClientBuilder interface { type ControllerClientBuilder interface {
Config(name string) (*restclient.Config, error) Config(name string) (*restclient.Config, error)
ConfigOrDie(name string) *restclient.Config
Client(name string) (clientset.Interface, error) Client(name string) (clientset.Interface, error)
ClientOrDie(name string) clientset.Interface ClientOrDie(name string) clientset.Interface
} }
@ -50,7 +51,15 @@ type SimpleControllerClientBuilder struct {
func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, error) { func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, error) {
clientConfig := *b.ClientConfig clientConfig := *b.ClientConfig
return &clientConfig, nil return restclient.AddUserAgent(&clientConfig, name), nil
}
func (b SimpleControllerClientBuilder) ConfigOrDie(name string) *restclient.Config {
clientConfig, err := b.Config(name)
if err != nil {
glog.Fatal(err)
}
return clientConfig
} }
func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, error) { func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, error) {
@ -58,7 +67,7 @@ func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface,
if err != nil { if err != nil {
return nil, err return nil, err
} }
return clientset.NewForConfig(restclient.AddUserAgent(clientConfig, name)) return clientset.NewForConfig(clientConfig)
} }
func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface { func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface {
@ -150,6 +159,14 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
return clientConfig, nil return clientConfig, nil
} }
func (b SAControllerClientBuilder) ConfigOrDie(name string) *restclient.Config {
clientConfig, err := b.Config(name)
if err != nil {
glog.Fatal(err)
}
return clientConfig
}
func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, error) { func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name) clientConfig, err := b.Config(name)
if err != nil { if err != nil {

View File

@ -19,6 +19,7 @@ go_library(
"//pkg/api/v1:go_default_library", "//pkg/api/v1:go_default_library",
"//pkg/client/cache:go_default_library", "//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library", "//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/client/clientset_generated/release_1_5/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library", "//pkg/client/record:go_default_library",
"//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider:go_default_library",
"//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library",
@ -42,7 +43,6 @@ go_test(
library = "go_default_library", library = "go_default_library",
tags = ["automanaged"], tags = ["automanaged"],
deps = [ deps = [
"//pkg/client/record:go_default_library",
"//pkg/controller/informers:go_default_library", "//pkg/controller/informers:go_default_library",
"//pkg/controller/volume/attachdetach/testing:go_default_library", "//pkg/controller/volume/attachdetach/testing:go_default_library",
], ],

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
kcache "k8s.io/kubernetes/pkg/client/cache" kcache "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
@ -76,8 +77,7 @@ func NewAttachDetachController(
pvcInformer kcache.SharedInformer, pvcInformer kcache.SharedInformer,
pvInformer kcache.SharedInformer, pvInformer kcache.SharedInformer,
cloud cloudprovider.Interface, cloud cloudprovider.Interface,
plugins []volume.VolumePlugin, plugins []volume.VolumePlugin) (AttachDetachController, error) {
recorder record.EventRecorder) (AttachDetachController, error) {
// TODO: The default resyncPeriod for shared informers is 12 hours, this is // TODO: The default resyncPeriod for shared informers is 12 hours, this is
// unacceptable for the attach/detach controller. For example, if a pod is // unacceptable for the attach/detach controller. For example, if a pod is
// skipped because the node it is scheduled to didn't set its annotation in // skipped because the node it is scheduled to didn't set its annotation in
@ -115,6 +115,11 @@ func NewAttachDetachController(
return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err) return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err)
} }
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "attachdetach"})
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr) adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr) adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
adc.attacherDetacher = adc.attacherDetacher =

View File

@ -20,7 +20,6 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
) )
@ -33,7 +32,6 @@ func Test_NewAttachDetachController_Positive(t *testing.T) {
nodeInformer := informers.NewNodeInformer(fakeKubeClient, resyncPeriod) nodeInformer := informers.NewNodeInformer(fakeKubeClient, resyncPeriod)
pvcInformer := informers.NewPVCInformer(fakeKubeClient, resyncPeriod) pvcInformer := informers.NewPVCInformer(fakeKubeClient, resyncPeriod)
pvInformer := informers.NewPVInformer(fakeKubeClient, resyncPeriod) pvInformer := informers.NewPVInformer(fakeKubeClient, resyncPeriod)
fakeRecorder := &record.FakeRecorder{}
// Act // Act
_, err := NewAttachDetachController( _, err := NewAttachDetachController(
@ -43,8 +41,7 @@ func Test_NewAttachDetachController_Positive(t *testing.T) {
pvcInformer, pvcInformer,
pvInformer, pvInformer,
nil, /* cloud */ nil, /* cloud */
nil, /* plugins */ nil /* plugins */)
fakeRecorder)
// Assert // Assert
if err != nil { if err != nil {