mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
this is a manual reversion of #20702
I can't revert with github which says "Sorry, this pull request couldn’t be
reverted automatically. It may have already been reverted, or the content may
have changed since it was merged."
Reverts commit: 0c191e787b
This commit is contained in:
parent
60cc5d10b7
commit
b1743a6887
@ -174,16 +174,16 @@ func Run(s *options.CMServer) error {
|
||||
|
||||
func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *client.Config, stop <-chan struct{}) error {
|
||||
go endpointcontroller.NewEndpointController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")), ResyncPeriod(s)).
|
||||
Run(s.EndpointControllerOptions.ConcurrentEndpointSyncs, util.NeverStop)
|
||||
Run(s.ConcurrentEndpointSyncs, util.NeverStop)
|
||||
|
||||
go replicationcontroller.NewReplicationManager(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replication-controller")),
|
||||
ResyncPeriod(s),
|
||||
replicationcontroller.BurstReplicas,
|
||||
).Run(s.ReplicationControllerOptions.ConcurrentRCSyncs, util.NeverStop)
|
||||
).Run(s.ConcurrentRCSyncs, util.NeverStop)
|
||||
|
||||
if s.GarbageCollectorOptions.TerminatedPodGCThreshold > 0 {
|
||||
go gc.New(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.GarbageCollectorOptions.TerminatedPodGCThreshold).
|
||||
if s.TerminatedPodGCThreshold > 0 {
|
||||
go gc.New(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.TerminatedPodGCThreshold).
|
||||
Run(util.NeverStop)
|
||||
}
|
||||
|
||||
@ -193,40 +193,32 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
}
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
|
||||
s.NodeControllerOptions.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.NodeControllerOptions.DeletingPodsQps, s.NodeControllerOptions.DeletingPodsBurst),
|
||||
util.NewTokenBucketRateLimiter(s.NodeControllerOptions.DeletingPodsQps, s.NodeControllerOptions.DeletingPodsBurst),
|
||||
s.NodeControllerOptions.NodeMonitorGracePeriod, s.NodeControllerOptions.NodeStartupGracePeriod, s.NodeControllerOptions.NodeMonitorPeriod,
|
||||
&s.NodeControllerOptions.ClusterCIDR, s.NodeControllerOptions.AllocateNodeCIDRs,
|
||||
)
|
||||
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod)
|
||||
|
||||
serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
|
||||
if err := serviceController.Run(s.ServiceControllerOptions.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
|
||||
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
|
||||
glog.Errorf("Failed to start service controller: %v", err)
|
||||
}
|
||||
|
||||
// TODO: Figure out what the relation between route controller and node controller should be.
|
||||
if s.NodeControllerOptions.AllocateNodeCIDRs {
|
||||
if s.AllocateNodeCIDRs {
|
||||
if cloud == nil {
|
||||
glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.")
|
||||
} else if routes, ok := cloud.Routes(); !ok {
|
||||
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
|
||||
} else {
|
||||
routeController := routecontroller.New(
|
||||
routes,
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")),
|
||||
s.ClusterName,
|
||||
&s.NodeControllerOptions.ClusterCIDR,
|
||||
)
|
||||
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, &s.ClusterCIDR)
|
||||
routeController.Run(s.NodeSyncPeriod)
|
||||
}
|
||||
} else {
|
||||
glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.NodeControllerOptions.AllocateNodeCIDRs)
|
||||
glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs)
|
||||
}
|
||||
|
||||
go resourcequotacontroller.NewResourceQuotaController(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller")),
|
||||
controller.StaticResyncPeriodFunc(s.ResourceQuotaControllerOptions.ResourceQuotaSyncPeriod)).Run(s.ResourceQuotaControllerOptions.ConcurrentResourceQuotaSyncs, util.NeverStop)
|
||||
controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop)
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
@ -248,10 +240,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
glog.Fatalf("Failed to get supported resources from server: %v", err)
|
||||
}
|
||||
|
||||
namespacecontroller.NewNamespaceController(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")),
|
||||
versions,
|
||||
s.NamespaceControllerOptions.NamespaceSyncPeriod).Run()
|
||||
namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod).Run()
|
||||
|
||||
groupVersion := "extensions/v1beta1"
|
||||
resources, found := resourceMap[groupVersion]
|
||||
@ -268,47 +257,43 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
metrics.DefaultHeapsterService,
|
||||
metrics.DefaultHeapsterPort,
|
||||
)
|
||||
// TODO: rename controller/podautoscaler into controller/horizontal
|
||||
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
|
||||
Run(s.PodAutoscalerOptions.HorizontalPodAutoscalerSyncPeriod)
|
||||
Run(s.HorizontalPodAutoscalerSyncPeriod)
|
||||
}
|
||||
|
||||
if containsResource(resources, "daemonsets") {
|
||||
glog.Infof("Starting daemon set controller")
|
||||
go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s)).
|
||||
Run(s.DaemonControllerOptions.ConcurrentDSCSyncs, util.NeverStop)
|
||||
Run(s.ConcurrentDSCSyncs, util.NeverStop)
|
||||
}
|
||||
|
||||
if containsResource(resources, "jobs") {
|
||||
glog.Infof("Starting job controller")
|
||||
go job.NewJobController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "job-controller")), ResyncPeriod(s)).
|
||||
Run(s.JobControllerOptions.ConcurrentJobSyncs, util.NeverStop)
|
||||
Run(s.ConcurrentJobSyncs, util.NeverStop)
|
||||
}
|
||||
|
||||
if containsResource(resources, "deployments") {
|
||||
glog.Infof("Starting deployment controller")
|
||||
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)).
|
||||
Run(s.DeploymentControllerOptions.ConcurrentDeploymentSyncs, util.NeverStop)
|
||||
Run(s.ConcurrentDeploymentSyncs, util.NeverStop)
|
||||
}
|
||||
}
|
||||
|
||||
volumePlugins := ProbeRecyclableVolumePlugins(s.PersistentVolumeControllerOptions.VolumeConfigFlags)
|
||||
provisioner, err := NewVolumeProvisioner(cloud, s.PersistentVolumeControllerOptions.VolumeConfigFlags)
|
||||
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)
|
||||
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfigFlags)
|
||||
if err != nil {
|
||||
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
|
||||
}
|
||||
|
||||
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")),
|
||||
s.PersistentVolumeControllerOptions.PVClaimBinderSyncPeriod,
|
||||
)
|
||||
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod)
|
||||
pvclaimBinder.Run()
|
||||
|
||||
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
|
||||
s.PersistentVolumeControllerOptions.PVClaimBinderSyncPeriod,
|
||||
s.PersistentVolumeControllerOptions.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
|
||||
ProbeRecyclableVolumePlugins(s.PersistentVolumeControllerOptions.VolumeConfigFlags),
|
||||
s.PVClaimBinderSyncPeriod,
|
||||
s.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
|
||||
ProbeRecyclableVolumePlugins(s.VolumeConfigFlags),
|
||||
cloud,
|
||||
)
|
||||
if err != nil {
|
||||
@ -317,7 +302,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
pvRecycler.Run()
|
||||
|
||||
if provisioner != nil {
|
||||
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PersistentVolumeControllerOptions.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
|
||||
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
|
||||
}
|
||||
@ -338,8 +323,8 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
rootCA = kubeconfig.CAData
|
||||
}
|
||||
|
||||
if len(s.ServiceAccountControllerOptions.ServiceAccountKeyFile) > 0 {
|
||||
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountControllerOptions.ServiceAccountKeyFile)
|
||||
if len(s.ServiceAccountKeyFile) > 0 {
|
||||
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
|
||||
if err != nil {
|
||||
glog.Errorf("Error reading key for service account token controller: %v", err)
|
||||
} else {
|
||||
|
@ -26,20 +26,6 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
daemonoptions "k8s.io/kubernetes/pkg/controller/daemon/options"
|
||||
deploymentoptions "k8s.io/kubernetes/pkg/controller/deployment/options"
|
||||
endpointoptions "k8s.io/kubernetes/pkg/controller/endpoint/options"
|
||||
gcoptions "k8s.io/kubernetes/pkg/controller/gc/options"
|
||||
joboptions "k8s.io/kubernetes/pkg/controller/job/options"
|
||||
namespaceoptions "k8s.io/kubernetes/pkg/controller/namespace/options"
|
||||
nodeoptions "k8s.io/kubernetes/pkg/controller/node/options"
|
||||
pvoptions "k8s.io/kubernetes/pkg/controller/persistentvolume/options"
|
||||
hpaoptions "k8s.io/kubernetes/pkg/controller/podautoscaler/options"
|
||||
replicationoptions "k8s.io/kubernetes/pkg/controller/replication/options"
|
||||
resourcequotaoptions "k8s.io/kubernetes/pkg/controller/resourcequota/options"
|
||||
serviceoptions "k8s.io/kubernetes/pkg/controller/service/options"
|
||||
serviceaccountoptions "k8s.io/kubernetes/pkg/controller/serviceaccount/options"
|
||||
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
@ -47,107 +33,158 @@ import (
|
||||
|
||||
// CMServer is the main context object for the controller manager.
|
||||
type CMServer struct {
|
||||
Address net.IP
|
||||
CloudConfigFile string
|
||||
CloudProvider string
|
||||
ClusterName string
|
||||
EnableProfiling bool
|
||||
KubeAPIBurst int
|
||||
KubeAPIQPS float32
|
||||
Kubeconfig string
|
||||
Master string
|
||||
MinResyncPeriod time.Duration
|
||||
Port int
|
||||
RootCAFile string
|
||||
Port int
|
||||
Address net.IP
|
||||
CloudProvider string
|
||||
CloudConfigFile string
|
||||
ConcurrentEndpointSyncs int
|
||||
ConcurrentRCSyncs int
|
||||
ConcurrentDSCSyncs int
|
||||
ConcurrentJobSyncs int
|
||||
ConcurrentResourceQuotaSyncs int
|
||||
ConcurrentDeploymentSyncs int
|
||||
ServiceSyncPeriod time.Duration
|
||||
NodeSyncPeriod time.Duration
|
||||
ResourceQuotaSyncPeriod time.Duration
|
||||
NamespaceSyncPeriod time.Duration
|
||||
PVClaimBinderSyncPeriod time.Duration
|
||||
VolumeConfigFlags VolumeConfigFlags
|
||||
TerminatedPodGCThreshold int
|
||||
HorizontalPodAutoscalerSyncPeriod time.Duration
|
||||
DeploymentControllerSyncPeriod time.Duration
|
||||
MinResyncPeriod time.Duration
|
||||
RegisterRetryCount int
|
||||
NodeMonitorGracePeriod time.Duration
|
||||
NodeStartupGracePeriod time.Duration
|
||||
NodeMonitorPeriod time.Duration
|
||||
NodeStatusUpdateRetry int
|
||||
PodEvictionTimeout time.Duration
|
||||
DeletingPodsQps float32
|
||||
DeletingPodsBurst int
|
||||
ServiceAccountKeyFile string
|
||||
RootCAFile string
|
||||
|
||||
DaemonControllerOptions daemonoptions.DaemonControllerOptions
|
||||
DeploymentControllerOptions deploymentoptions.DeploymentControllerOptions
|
||||
EndpointControllerOptions endpointoptions.EndpointControllerOptions
|
||||
GarbageCollectorOptions gcoptions.GarbageCollectorOptions
|
||||
JobControllerOptions joboptions.JobControllerOptions
|
||||
LeaderElection componentconfig.LeaderElectionConfiguration
|
||||
NamespaceControllerOptions namespaceoptions.NamespaceControllerOptions
|
||||
NodeControllerOptions nodeoptions.NodeControllerOptions
|
||||
PersistentVolumeControllerOptions pvoptions.PersistentVolumeControllerOptions
|
||||
PodAutoscalerOptions hpaoptions.PodAutoscalerOptions
|
||||
ReplicationControllerOptions replicationoptions.ReplicationControllerOptions
|
||||
ResourceQuotaControllerOptions resourcequotaoptions.ResourceQuotaControllerOptions
|
||||
ServiceControllerOptions serviceoptions.ServiceControllerOptions
|
||||
ServiceAccountControllerOptions serviceaccountoptions.ServiceAccountControllerOptions
|
||||
ClusterName string
|
||||
ClusterCIDR net.IPNet
|
||||
AllocateNodeCIDRs bool
|
||||
EnableProfiling bool
|
||||
|
||||
// TODO: split into different rates for different components (?)
|
||||
NodeSyncPeriod time.Duration
|
||||
Master string
|
||||
Kubeconfig string
|
||||
KubeAPIQPS float32
|
||||
KubeAPIBurst int
|
||||
|
||||
// deprecated
|
||||
DeploymentControllerSyncPeriod time.Duration
|
||||
RegisterRetryCount int
|
||||
LeaderElection componentconfig.LeaderElectionConfiguration
|
||||
}
|
||||
|
||||
// VolumeConfigFlags is used to bind CLI flags to variables. This top-level struct contains *all* enumerated
|
||||
// CLI flags meant to configure all volume plugins. From this config, the binary will create many instances
|
||||
// of volume.VolumeConfig which are then passed to the appropriate plugin. The ControllerManager binary is the only
|
||||
// part of the code which knows what plugins are supported and which CLI flags correspond to each plugin.
|
||||
type VolumeConfigFlags struct {
|
||||
PersistentVolumeRecyclerMaximumRetry int
|
||||
PersistentVolumeRecyclerMinimumTimeoutNFS int
|
||||
PersistentVolumeRecyclerPodTemplateFilePathNFS string
|
||||
PersistentVolumeRecyclerIncrementTimeoutNFS int
|
||||
PersistentVolumeRecyclerPodTemplateFilePathHostPath string
|
||||
PersistentVolumeRecyclerMinimumTimeoutHostPath int
|
||||
PersistentVolumeRecyclerIncrementTimeoutHostPath int
|
||||
EnableHostPathProvisioning bool
|
||||
}
|
||||
|
||||
// NewCMServer creates a new CMServer with a default config.
|
||||
func NewCMServer() *CMServer {
|
||||
s := CMServer{
|
||||
Address: net.ParseIP("0.0.0.0"),
|
||||
ClusterName: "kubernetes",
|
||||
KubeAPIQPS: 20.0,
|
||||
KubeAPIBurst: 30,
|
||||
MinResyncPeriod: 12 * time.Hour,
|
||||
Port: ports.ControllerManagerPort,
|
||||
|
||||
NodeSyncPeriod: 10 * time.Second,
|
||||
|
||||
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
|
||||
DeploymentControllerOptions: deploymentoptions.NewDeploymentControllerOptions(),
|
||||
DaemonControllerOptions: daemonoptions.NewDaemonControllerOptions(),
|
||||
EndpointControllerOptions: endpointoptions.NewEndpointControllerOptions(),
|
||||
GarbageCollectorOptions: gcoptions.NewGarbageCollectorOptions(),
|
||||
JobControllerOptions: joboptions.NewJobControllerOptions(),
|
||||
NamespaceControllerOptions: namespaceoptions.NewNamespaceControllerOptions(),
|
||||
NodeControllerOptions: nodeoptions.NewNodeControllerOptions(),
|
||||
PersistentVolumeControllerOptions: pvoptions.NewPersistentVolumeControllerOptions(),
|
||||
PodAutoscalerOptions: hpaoptions.NewPodAutoscalerOptions(),
|
||||
ReplicationControllerOptions: replicationoptions.NewReplicationControllerOptions(),
|
||||
ResourceQuotaControllerOptions: resourcequotaoptions.NewResourceQuotaControllerOptions(),
|
||||
ServiceControllerOptions: serviceoptions.NewServiceControllerOptions(),
|
||||
ServiceAccountControllerOptions: serviceaccountoptions.NewServiceAccountControllerOptions(),
|
||||
Port: ports.ControllerManagerPort,
|
||||
Address: net.ParseIP("0.0.0.0"),
|
||||
ConcurrentEndpointSyncs: 5,
|
||||
ConcurrentRCSyncs: 5,
|
||||
ConcurrentDSCSyncs: 2,
|
||||
ConcurrentJobSyncs: 5,
|
||||
ConcurrentResourceQuotaSyncs: 5,
|
||||
ConcurrentDeploymentSyncs: 5,
|
||||
ServiceSyncPeriod: 5 * time.Minute,
|
||||
NodeSyncPeriod: 10 * time.Second,
|
||||
ResourceQuotaSyncPeriod: 5 * time.Minute,
|
||||
NamespaceSyncPeriod: 5 * time.Minute,
|
||||
PVClaimBinderSyncPeriod: 10 * time.Minute,
|
||||
HorizontalPodAutoscalerSyncPeriod: 30 * time.Second,
|
||||
DeploymentControllerSyncPeriod: 30 * time.Second,
|
||||
MinResyncPeriod: 12 * time.Hour,
|
||||
RegisterRetryCount: 10,
|
||||
PodEvictionTimeout: 5 * time.Minute,
|
||||
NodeMonitorGracePeriod: 40 * time.Second,
|
||||
NodeStartupGracePeriod: 60 * time.Second,
|
||||
NodeMonitorPeriod: 5 * time.Second,
|
||||
ClusterName: "kubernetes",
|
||||
TerminatedPodGCThreshold: 12500,
|
||||
VolumeConfigFlags: VolumeConfigFlags{
|
||||
// default values here
|
||||
PersistentVolumeRecyclerMaximumRetry: 3,
|
||||
PersistentVolumeRecyclerMinimumTimeoutNFS: 300,
|
||||
PersistentVolumeRecyclerIncrementTimeoutNFS: 30,
|
||||
PersistentVolumeRecyclerMinimumTimeoutHostPath: 60,
|
||||
PersistentVolumeRecyclerIncrementTimeoutHostPath: 30,
|
||||
EnableHostPathProvisioning: false,
|
||||
},
|
||||
KubeAPIQPS: 20.0,
|
||||
KubeAPIBurst: 30,
|
||||
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
// AddFlags adds flags for a specific CMServer to the specified FlagSet
|
||||
func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IPVar(&s.Address, "address", s.Address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
|
||||
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
|
||||
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
|
||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
|
||||
fs.DurationVar(&s.MinResyncPeriod, "min-resync-period", s.MinResyncPeriod, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod")
|
||||
fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on")
|
||||
fs.StringVar(&s.RootCAFile, "root-ca-file", s.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.")
|
||||
|
||||
fs.IPVar(&s.Address, "address", s.Address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
|
||||
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more reponsive replica management, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more reponsive deployments, but more CPU (and network) load")
|
||||
fs.DurationVar(&s.ServiceSyncPeriod, "service-sync-period", s.ServiceSyncPeriod, "The period for syncing services with their external load balancers")
|
||||
fs.DurationVar(&s.NodeSyncPeriod, "node-sync-period", s.NodeSyncPeriod, ""+
|
||||
"The period for syncing nodes from cloudprovider. Longer periods will result in "+
|
||||
"fewer calls to cloud provider, but may delay addition of new nodes to cluster.")
|
||||
s.LeaderElection.AddFlags(fs)
|
||||
s.DaemonControllerOptions.AddFlags(fs)
|
||||
s.DeploymentControllerOptions.AddFlags(fs)
|
||||
s.EndpointControllerOptions.AddFlags(fs)
|
||||
s.GarbageCollectorOptions.AddFlags(fs)
|
||||
s.JobControllerOptions.AddFlags(fs)
|
||||
s.NamespaceControllerOptions.AddFlags(fs)
|
||||
s.NodeControllerOptions.AddFlags(fs)
|
||||
s.PersistentVolumeControllerOptions.AddFlags(fs)
|
||||
s.PodAutoscalerOptions.AddFlags(fs)
|
||||
s.ReplicationControllerOptions.AddFlags(fs)
|
||||
s.ResourceQuotaControllerOptions.AddFlags(fs)
|
||||
s.ServiceControllerOptions.AddFlags(fs)
|
||||
s.ServiceAccountControllerOptions.AddFlags(fs)
|
||||
fs.DurationVar(&s.DeploymentControllerSyncPeriod, "deployment-controller-sync-period", 0, "Period for syncing the deployments.")
|
||||
fs.MarkDeprecated("deployment-controller-sync-period", "This flag is currently no-op and will be deleted.")
|
||||
fs.IntVar(&s.RegisterRetryCount, "register-retry-count", 0, ""+
|
||||
fs.DurationVar(&s.ResourceQuotaSyncPeriod, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod, "The period for syncing quota usage status in the system")
|
||||
fs.DurationVar(&s.NamespaceSyncPeriod, "namespace-sync-period", s.NamespaceSyncPeriod, "The period for syncing namespace life-cycle updates")
|
||||
fs.DurationVar(&s.PVClaimBinderSyncPeriod, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod, "The period for syncing persistent volumes and persistent volume claims")
|
||||
fs.DurationVar(&s.MinResyncPeriod, "min-resync-period", s.MinResyncPeriod, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod")
|
||||
fs.StringVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling")
|
||||
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
|
||||
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
|
||||
fs.StringVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.BoolVar(&s.VolumeConfigFlags.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfigFlags.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
|
||||
fs.IntVar(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.")
|
||||
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod, "The period for syncing the number of pods in horizontal pod autoscaler.")
|
||||
fs.DurationVar(&s.DeploymentControllerSyncPeriod, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod, "Period for syncing the deployments.")
|
||||
fs.DurationVar(&s.PodEvictionTimeout, "pod-eviction-timeout", s.PodEvictionTimeout, "The grace period for deleting pods on failed nodes.")
|
||||
fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
|
||||
fs.IntVar(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
||||
fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
|
||||
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
|
||||
fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.")
|
||||
fs.DurationVar(&s.NodeMonitorGracePeriod, "node-monitor-grace-period", s.NodeMonitorGracePeriod,
|
||||
"Amount of time which we allow running Node to be unresponsive before marking it unhealty. "+
|
||||
"Must be N times more than kubelet's nodeStatusUpdateFrequency, "+
|
||||
"where N means number of retries allowed for kubelet to post node status.")
|
||||
fs.DurationVar(&s.NodeStartupGracePeriod, "node-startup-grace-period", s.NodeStartupGracePeriod,
|
||||
"Amount of time which we allow starting Node to be unresponsive before marking it unhealty.")
|
||||
fs.DurationVar(&s.NodeMonitorPeriod, "node-monitor-period", s.NodeMonitorPeriod,
|
||||
"The period for syncing NodeStatus in NodeController.")
|
||||
fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA key used to sign service account tokens.")
|
||||
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
|
||||
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
|
||||
fs.IPNetVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.")
|
||||
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
|
||||
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
|
||||
fs.StringVar(&s.RootCAFile, "root-ca-file", s.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.")
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/controller/persistentvolume/options"
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
||||
|
||||
// Cloud providers
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers"
|
||||
|
@ -128,13 +128,13 @@ func (s *CMServer) Run(_ []string) error {
|
||||
}()
|
||||
|
||||
endpoints := s.createEndpointController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")))
|
||||
go endpoints.Run(s.EndpointControllerOptions.ConcurrentEndpointSyncs, util.NeverStop)
|
||||
go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
|
||||
|
||||
go replicationcontroller.NewReplicationManager(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas).
|
||||
Run(s.ReplicationControllerOptions.ConcurrentRCSyncs, util.NeverStop)
|
||||
Run(s.ConcurrentRCSyncs, util.NeverStop)
|
||||
|
||||
if s.GarbageCollectorOptions.TerminatedPodGCThreshold > 0 {
|
||||
go gc.New(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), s.resyncPeriod, s.GarbageCollectorOptions.TerminatedPodGCThreshold).
|
||||
if s.TerminatedPodGCThreshold > 0 {
|
||||
go gc.New(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), s.resyncPeriod, s.TerminatedPodGCThreshold).
|
||||
Run(util.NeverStop)
|
||||
}
|
||||
|
||||
@ -148,37 +148,32 @@ func (s *CMServer) Run(_ []string) error {
|
||||
}
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
|
||||
s.NodeControllerOptions.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.NodeControllerOptions.DeletingPodsQps, s.NodeControllerOptions.DeletingPodsBurst),
|
||||
util.NewTokenBucketRateLimiter(s.NodeControllerOptions.DeletingPodsQps, s.NodeControllerOptions.DeletingPodsBurst),
|
||||
s.NodeControllerOptions.NodeMonitorGracePeriod, s.NodeControllerOptions.NodeStartupGracePeriod, s.NodeControllerOptions.NodeMonitorPeriod,
|
||||
(*net.IPNet)(&s.NodeControllerOptions.ClusterCIDR), s.NodeControllerOptions.AllocateNodeCIDRs)
|
||||
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod)
|
||||
|
||||
nodeStatusUpdaterController := node.NewStatusUpdater(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-status-controller")),
|
||||
s.NodeControllerOptions.NodeMonitorPeriod, time.Now)
|
||||
nodeStatusUpdaterController := node.NewStatusUpdater(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-status-controller")), s.NodeMonitorPeriod, time.Now)
|
||||
if err := nodeStatusUpdaterController.Run(util.NeverStop); err != nil {
|
||||
glog.Fatalf("Failed to start node status update controller: %v", err)
|
||||
}
|
||||
|
||||
serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
|
||||
if err := serviceController.Run(s.ServiceControllerOptions.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
|
||||
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
|
||||
glog.Errorf("Failed to start service controller: %v", err)
|
||||
}
|
||||
|
||||
if s.NodeControllerOptions.AllocateNodeCIDRs {
|
||||
if s.AllocateNodeCIDRs {
|
||||
routes, ok := cloud.Routes()
|
||||
if !ok {
|
||||
glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set")
|
||||
}
|
||||
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, (*net.IPNet)(&s.NodeControllerOptions.ClusterCIDR))
|
||||
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
|
||||
routeController.Run(s.NodeSyncPeriod)
|
||||
}
|
||||
|
||||
go resourcequotacontroller.NewResourceQuotaController(
|
||||
clientset.NewForConfigOrDie(
|
||||
client.AddUserAgent(kubeconfig, "resource-quota-controller")),
|
||||
controller.StaticResyncPeriodFunc(s.ResourceQuotaControllerOptions.ResourceQuotaSyncPeriod),
|
||||
).Run(s.ResourceQuotaControllerOptions.ConcurrentResourceQuotaSyncs, util.NeverStop)
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller")), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop)
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
@ -200,9 +195,7 @@ func (s *CMServer) Run(_ []string) error {
|
||||
glog.Fatalf("Failed to get supported resources from server: %v", err)
|
||||
}
|
||||
|
||||
namespaceController := namespacecontroller.NewNamespaceController(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")),
|
||||
&unversioned.APIVersions{}, s.NamespaceControllerOptions.NamespaceSyncPeriod)
|
||||
namespaceController := namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), &unversioned.APIVersions{}, s.NamespaceSyncPeriod)
|
||||
namespaceController.Run()
|
||||
|
||||
groupVersion := "extensions/v1beta1"
|
||||
@ -221,52 +214,54 @@ func (s *CMServer) Run(_ []string) error {
|
||||
metrics.DefaultHeapsterPort,
|
||||
)
|
||||
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
|
||||
Run(s.PodAutoscalerOptions.HorizontalPodAutoscalerSyncPeriod)
|
||||
Run(s.HorizontalPodAutoscalerSyncPeriod)
|
||||
}
|
||||
|
||||
if containsResource(resources, "daemonsets") {
|
||||
glog.Infof("Starting daemon set controller")
|
||||
go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod).
|
||||
Run(s.DaemonControllerOptions.ConcurrentDSCSyncs, util.NeverStop)
|
||||
Run(s.ConcurrentDSCSyncs, util.NeverStop)
|
||||
}
|
||||
|
||||
if containsResource(resources, "jobs") {
|
||||
glog.Infof("Starting job controller")
|
||||
go job.NewJobController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "job-controller")), s.resyncPeriod).
|
||||
Run(s.JobControllerOptions.ConcurrentJobSyncs, util.NeverStop)
|
||||
Run(s.ConcurrentJobSyncs, util.NeverStop)
|
||||
}
|
||||
|
||||
if containsResource(resources, "deployments") {
|
||||
glog.Infof("Starting deployment controller")
|
||||
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "deployment-controller")), s.resyncPeriod).
|
||||
Run(s.DeploymentControllerOptions.ConcurrentDeploymentSyncs, util.NeverStop)
|
||||
Run(s.ConcurrentDeploymentSyncs, util.NeverStop)
|
||||
}
|
||||
}
|
||||
|
||||
volumePlugins := kubecontrollermanager.ProbeRecyclableVolumePlugins(s.PersistentVolumeControllerOptions.VolumeConfigFlags)
|
||||
provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.PersistentVolumeControllerOptions.VolumeConfigFlags)
|
||||
volumePlugins := kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)
|
||||
provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfigFlags)
|
||||
if err != nil {
|
||||
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
|
||||
}
|
||||
|
||||
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PersistentVolumeControllerOptions.PVClaimBinderSyncPeriod)
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")),
|
||||
s.PVClaimBinderSyncPeriod,
|
||||
)
|
||||
pvclaimBinder.Run()
|
||||
|
||||
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
|
||||
s.PersistentVolumeControllerOptions.PVClaimBinderSyncPeriod,
|
||||
s.PersistentVolumeControllerOptions.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
|
||||
kubecontrollermanager.ProbeRecyclableVolumePlugins(s.PersistentVolumeControllerOptions.VolumeConfigFlags), cloud)
|
||||
s.PVClaimBinderSyncPeriod,
|
||||
s.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
|
||||
kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags),
|
||||
cloud,
|
||||
)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
|
||||
}
|
||||
pvRecycler.Run()
|
||||
|
||||
if provisioner != nil {
|
||||
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(
|
||||
persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-controller"))),
|
||||
s.PersistentVolumeControllerOptions.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
|
||||
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-controller"))), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
|
||||
}
|
||||
@ -287,8 +282,8 @@ func (s *CMServer) Run(_ []string) error {
|
||||
rootCA = kubeconfig.CAData
|
||||
}
|
||||
|
||||
if len(s.ServiceAccountControllerOptions.ServiceAccountKeyFile) > 0 {
|
||||
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountControllerOptions.ServiceAccountKeyFile)
|
||||
if len(s.ServiceAccountKeyFile) > 0 {
|
||||
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
|
||||
if err != nil {
|
||||
glog.Errorf("Error reading key for service account token controller: %v", err)
|
||||
} else {
|
||||
|
@ -48,15 +48,15 @@ func Test_nodeWithUpdatedStatus(t *testing.T) {
|
||||
|
||||
cm := cmoptions.NewCMServer()
|
||||
kubecfg := kubeletoptions.NewKubeletServer()
|
||||
assert.True(t, kubecfg.NodeStatusUpdateFrequency.Duration*3 < cm.NodeControllerOptions.NodeMonitorGracePeriod) // sanity check for defaults
|
||||
assert.True(t, kubecfg.NodeStatusUpdateFrequency.Duration*3 < cm.NodeMonitorGracePeriod) // sanity check for defaults
|
||||
|
||||
n := testNode(0, api.ConditionTrue, "KubeletReady")
|
||||
su := NewStatusUpdater(nil, cm.NodeControllerOptions.NodeMonitorPeriod, func() time.Time { return now })
|
||||
su := NewStatusUpdater(nil, cm.NodeMonitorPeriod, func() time.Time { return now })
|
||||
_, updated, err := su.nodeWithUpdatedStatus(n)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, updated, "no update expected b/c kubelet updated heartbeat just now")
|
||||
|
||||
n = testNode(-cm.NodeControllerOptions.NodeMonitorGracePeriod, api.ConditionTrue, "KubeletReady")
|
||||
n = testNode(-cm.NodeMonitorGracePeriod, api.ConditionTrue, "KubeletReady")
|
||||
n2, updated, err := su.nodeWithUpdatedStatus(n)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, updated, "update expected b/c kubelet's update is older than DefaultNodeMonitorGracePeriod")
|
||||
|
@ -61,15 +61,14 @@ kube-controller-manager
|
||||
--cloud-provider="": The provider for cloud services. Empty string for no provider.
|
||||
--cluster-cidr=<nil>: CIDR Range for Pods in cluster.
|
||||
--cluster-name="kubernetes": The instance prefix for the cluster
|
||||
--concurrent-daemonset-controller-syncs=2: The number of deamon sets that are allowed to sync concurrently.
|
||||
--concurrent-deployment-syncs=5: The number of deployment objects that are allowed to sync concurrently. Larger number = more reponsive deployments, but more CPU (and network) load
|
||||
--concurrent-endpoint-syncs=5: The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load
|
||||
--concurrent-job-syncs=5: The number of job objects that are allowed to sync concurrently.
|
||||
--concurrent-resource-quota-syncs=5: The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load
|
||||
--concurrent_rc_syncs=5: The number of replication controllers that are allowed to sync concurrently. Larger number = more reponsive replica management, but more CPU (and network) load
|
||||
--deleting-pods-burst=10: Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.
|
||||
--deleting-pods-qps=0.1: Number of nodes per second on which pods are deleted in case of node failure.
|
||||
--enable-hostpath-provisioner[=false]: Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.
|
||||
--deployment-controller-sync-period=30s: Period for syncing the deployments.
|
||||
--enable-hostpath-provisioner[=false]: Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.
|
||||
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
||||
--horizontal-pod-autoscaler-sync-period=30s: The period for syncing the number of pods in horizontal pod autoscaler.
|
||||
--kube-api-burst=30: Burst to use while talking with kubernetes apiserver
|
||||
@ -91,12 +90,11 @@ kube-controller-manager
|
||||
--port=10252: The port that the controller-manager's http service runs on
|
||||
--profiling[=true]: Enable profiling via web interface host:port/debug/pprof/
|
||||
--pv-recycler-increment-timeout-nfs=30: the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod
|
||||
--pv-recycler-maximum-retry=3: Maximum number of attempts to recycle or delete a persistent volume
|
||||
--pv-recycler-minimum-timeout-hostpath=60: The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.
|
||||
--pv-recycler-minimum-timeout-hostpath=60: The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.
|
||||
--pv-recycler-minimum-timeout-nfs=300: The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod
|
||||
--pv-recycler-pod-template-filepath-hostpath="": The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.
|
||||
--pv-recycler-pod-template-filepath-nfs="": The file path to a pod definition used as a template for NFS persistent volume recycling
|
||||
--pv-recycler-timeout-increment-hostpath=30: the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.
|
||||
--pv-recycler-timeout-increment-hostpath=30: the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.
|
||||
--pvclaimbinder-sync-period=10m0s: The period for syncing persistent volumes and persistent volume claims
|
||||
--resource-quota-sync-period=5m0s: The period for syncing quota usage status in the system
|
||||
--root-ca-file="": If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.
|
||||
|
@ -45,10 +45,8 @@ cluster-dns
|
||||
cluster-domain
|
||||
cluster-name
|
||||
cluster-tag
|
||||
concurrent-daemonset-controller-syncs
|
||||
concurrent-deployment-syncs
|
||||
concurrent-endpoint-syncs
|
||||
concurrent-job-syncs
|
||||
concurrent-resource-quota-syncs
|
||||
config-sync-period
|
||||
configure-cbr0
|
||||
|
@ -16,11 +16,7 @@ limitations under the License.
|
||||
|
||||
package componentconfig
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
import "k8s.io/kubernetes/pkg/api/unversioned"
|
||||
|
||||
type KubeProxyConfiguration struct {
|
||||
unversioned.TypeMeta
|
||||
@ -345,24 +341,3 @@ type LeaderElectionConfiguration struct {
|
||||
// leader election is enabled.
|
||||
RetryPeriod unversioned.Duration `json:"retryPeriod"`
|
||||
}
|
||||
|
||||
// AddFlags binds the common LeaderElectionCLIConfig flags to a flagset
|
||||
func (l *LeaderElectionConfiguration) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+
|
||||
"Start a leader election client and gain leadership before "+
|
||||
"executing the main loop. Enable this when running replicated "+
|
||||
"components for high availability.")
|
||||
fs.DurationVar(&l.LeaseDuration.Duration, "leader-elect-lease-duration", l.LeaseDuration.Duration, ""+
|
||||
"The duration that non-leader candidates will wait after observing a leadership "+
|
||||
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
|
||||
"slot. This is effectively the maximum duration that a leader can be stopped "+
|
||||
"before it is replaced by another candidate. This is only applicable if leader "+
|
||||
"election is enabled.")
|
||||
fs.DurationVar(&l.RenewDeadline.Duration, "leader-elect-renew-deadline", l.RenewDeadline.Duration, ""+
|
||||
"The interval between attempts by the acting master to renew a leadership slot "+
|
||||
"before it stops leading. This must be less than or equal to the lease duration. "+
|
||||
"This is only applicable if leader election is enabled.")
|
||||
fs.DurationVar(&l.RetryPeriod.Duration, "leader-elect-retry-period", l.RetryPeriod.Duration, ""+
|
||||
"The duration the clients should wait between attempting acquisition and renewal "+
|
||||
"of a leadership. This is only applicable if leader election is enabled.")
|
||||
}
|
||||
|
@ -65,6 +65,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -340,3 +341,24 @@ func DefaultLeaderElectionConfiguration() componentconfig.LeaderElectionConfigur
|
||||
RetryPeriod: unversioned.Duration{DefaultRetryPeriod},
|
||||
}
|
||||
}
|
||||
|
||||
// BindFlags binds the common LeaderElectionCLIConfig flags to a flagset
|
||||
func BindFlags(l *componentconfig.LeaderElectionConfiguration, fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+
|
||||
"Start a leader election client and gain leadership before "+
|
||||
"executing the main loop. Enable this when running replicated "+
|
||||
"components for high availability.")
|
||||
fs.DurationVar(&l.LeaseDuration.Duration, "leader-elect-lease-duration", l.LeaseDuration.Duration, ""+
|
||||
"The duration that non-leader candidates will wait after observing a leadership "+
|
||||
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
|
||||
"slot. This is effectively the maximum duration that a leader can be stopped "+
|
||||
"before it is replaced by another candidate. This is only applicable if leader "+
|
||||
"election is enabled.")
|
||||
fs.DurationVar(&l.RenewDeadline.Duration, "leader-elect-renew-deadline", l.RenewDeadline.Duration, ""+
|
||||
"The interval between attempts by the acting master to renew a leadership slot "+
|
||||
"before it stops leading. This must be less than or equal to the lease duration. "+
|
||||
"This is only applicable if leader election is enabled.")
|
||||
fs.DurationVar(&l.RetryPeriod.Duration, "leader-elect-retry-period", l.RetryPeriod.Duration, ""+
|
||||
"The duration the clients should wait between attempting acquisition and renewal "+
|
||||
"of a leadership. This is only applicable if leader election is enabled.")
|
||||
}
|
||||
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type DaemonControllerOptions struct {
|
||||
ConcurrentDSCSyncs int
|
||||
}
|
||||
|
||||
func NewDaemonControllerOptions() DaemonControllerOptions {
|
||||
return DaemonControllerOptions{
|
||||
ConcurrentDSCSyncs: 2,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *DaemonControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&o.ConcurrentDSCSyncs, "concurrent-daemonset-controller-syncs", o.ConcurrentDSCSyncs, "The number of deamon sets that are allowed to sync concurrently.")
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type DeploymentControllerOptions struct {
|
||||
ConcurrentDeploymentSyncs int
|
||||
}
|
||||
|
||||
func NewDeploymentControllerOptions() DeploymentControllerOptions {
|
||||
return DeploymentControllerOptions{
|
||||
ConcurrentDeploymentSyncs: 5,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *DeploymentControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&o.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", o.ConcurrentDeploymentSyncs,
|
||||
"The number of deployment objects that are allowed to sync concurrently. Larger number = more reponsive deployments, but more CPU (and network) load")
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type EndpointControllerOptions struct {
|
||||
ConcurrentEndpointSyncs int
|
||||
}
|
||||
|
||||
func NewEndpointControllerOptions() EndpointControllerOptions {
|
||||
return EndpointControllerOptions{
|
||||
ConcurrentEndpointSyncs: 5,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *EndpointControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&o.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", o.ConcurrentEndpointSyncs,
|
||||
"The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type GarbageCollectorOptions struct {
|
||||
TerminatedPodGCThreshold int
|
||||
}
|
||||
|
||||
func NewGarbageCollectorOptions() GarbageCollectorOptions {
|
||||
return GarbageCollectorOptions{
|
||||
TerminatedPodGCThreshold: 12500,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *GarbageCollectorOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&o.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", o.TerminatedPodGCThreshold,
|
||||
"Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. "+
|
||||
"If <= 0, the terminated pod garbage collector is disabled.")
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type JobControllerOptions struct {
|
||||
ConcurrentJobSyncs int
|
||||
}
|
||||
|
||||
func NewJobControllerOptions() JobControllerOptions {
|
||||
return JobControllerOptions{
|
||||
ConcurrentJobSyncs: 5,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *JobControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&o.ConcurrentJobSyncs, "concurrent-job-syncs", o.ConcurrentJobSyncs, "The number of job objects that are allowed to sync concurrently.")
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type NamespaceControllerOptions struct {
|
||||
NamespaceSyncPeriod time.Duration
|
||||
}
|
||||
|
||||
func NewNamespaceControllerOptions() NamespaceControllerOptions {
|
||||
return NamespaceControllerOptions{
|
||||
NamespaceSyncPeriod: 5 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *NamespaceControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&o.NamespaceSyncPeriod, "namespace-sync-period", o.NamespaceSyncPeriod, "The period for syncing namespace life-cycle updates")
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type NodeControllerOptions struct {
|
||||
AllocateNodeCIDRs bool
|
||||
ClusterCIDR net.IPNet
|
||||
DeletingPodsBurst int
|
||||
DeletingPodsQps float32
|
||||
NodeMonitorGracePeriod time.Duration
|
||||
NodeMonitorPeriod time.Duration
|
||||
NodeStartupGracePeriod time.Duration
|
||||
PodEvictionTimeout time.Duration
|
||||
}
|
||||
|
||||
func NewNodeControllerOptions() NodeControllerOptions {
|
||||
return NodeControllerOptions{
|
||||
AllocateNodeCIDRs: false,
|
||||
DeletingPodsBurst: 10,
|
||||
DeletingPodsQps: 0.1,
|
||||
NodeMonitorGracePeriod: 40 * time.Second,
|
||||
NodeMonitorPeriod: 5 * time.Second,
|
||||
NodeStartupGracePeriod: 60 * time.Second,
|
||||
PodEvictionTimeout: 5 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *NodeControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&o.AllocateNodeCIDRs, "allocate-node-cidrs", o.AllocateNodeCIDRs, "Should CIDRs for Pods be allocated and set on the cloud provider.")
|
||||
fs.IPNetVar(&o.ClusterCIDR, "cluster-cidr", o.ClusterCIDR, "CIDR Range for Pods in cluster.")
|
||||
fs.IntVar(&o.DeletingPodsBurst, "deleting-pods-burst", o.DeletingPodsBurst,
|
||||
"Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
||||
fs.Float32Var(&o.DeletingPodsQps, "deleting-pods-qps", o.DeletingPodsQps,
|
||||
"Number of nodes per second on which pods are deleted in case of node failure.")
|
||||
fs.DurationVar(&o.NodeMonitorGracePeriod, "node-monitor-grace-period", o.NodeMonitorGracePeriod,
|
||||
"Amount of time which we allow running Node to be unresponsive before marking it unhealty. "+
|
||||
"Must be N times more than kubelet's nodeStatusUpdateFrequency, "+
|
||||
"where N means number of retries allowed for kubelet to post node status.")
|
||||
fs.DurationVar(&o.NodeMonitorPeriod, "node-monitor-period", o.NodeMonitorPeriod,
|
||||
"The period for syncing NodeStatus in NodeController.")
|
||||
fs.DurationVar(&o.NodeStartupGracePeriod, "node-startup-grace-period", o.NodeStartupGracePeriod,
|
||||
"Amount of time which we allow starting Node to be unresponsive before marking it unhealty.")
|
||||
fs.DurationVar(&o.PodEvictionTimeout, "pod-eviction-timeout", o.PodEvictionTimeout,
|
||||
"The grace period for deleting pods on failed nodes.")
|
||||
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type PodAutoscalerOptions struct {
|
||||
HorizontalPodAutoscalerSyncPeriod time.Duration
|
||||
}
|
||||
|
||||
func NewPodAutoscalerOptions() PodAutoscalerOptions {
|
||||
return PodAutoscalerOptions{
|
||||
HorizontalPodAutoscalerSyncPeriod: 30 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *PodAutoscalerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&o.HorizontalPodAutoscalerSyncPeriod, "horizontal-pod-autoscaler-sync-period", o.HorizontalPodAutoscalerSyncPeriod,
|
||||
"The period for syncing the number of pods in horizontal pod autoscaler.",
|
||||
)
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type ReplicationControllerOptions struct {
|
||||
ConcurrentRCSyncs int
|
||||
}
|
||||
|
||||
func NewReplicationControllerOptions() ReplicationControllerOptions {
|
||||
return ReplicationControllerOptions{
|
||||
ConcurrentRCSyncs: 5,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *ReplicationControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&o.ConcurrentRCSyncs, "concurrent_rc_syncs", o.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more reponsive replica management, but more CPU (and network) load")
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type ResourceQuotaControllerOptions struct {
|
||||
ConcurrentResourceQuotaSyncs int
|
||||
ResourceQuotaSyncPeriod time.Duration
|
||||
}
|
||||
|
||||
func NewResourceQuotaControllerOptions() ResourceQuotaControllerOptions {
|
||||
return ResourceQuotaControllerOptions{
|
||||
ConcurrentResourceQuotaSyncs: 5,
|
||||
ResourceQuotaSyncPeriod: 5 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *ResourceQuotaControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&o.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", o.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
|
||||
fs.DurationVar(&o.ResourceQuotaSyncPeriod, "resource-quota-sync-period", o.ResourceQuotaSyncPeriod, "The period for syncing quota usage status in the system")
|
||||
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type ServiceControllerOptions struct {
|
||||
ServiceSyncPeriod time.Duration
|
||||
}
|
||||
|
||||
func NewServiceControllerOptions() ServiceControllerOptions {
|
||||
return ServiceControllerOptions{
|
||||
ServiceSyncPeriod: 5 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *ServiceControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&o.ServiceSyncPeriod, "service-sync-period", o.ServiceSyncPeriod, "The period for syncing services with their external load balancers")
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type ServiceAccountControllerOptions struct {
|
||||
ServiceAccountKeyFile string
|
||||
}
|
||||
|
||||
func NewServiceAccountControllerOptions() ServiceAccountControllerOptions {
|
||||
return ServiceAccountControllerOptions{}
|
||||
}
|
||||
|
||||
func (o *ServiceAccountControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&o.ServiceAccountKeyFile, "service-account-private-key-file", o.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA key used to sign service account tokens.")
|
||||
}
|
@ -21,6 +21,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
@ -65,5 +66,5 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'")
|
||||
s.LeaderElection.AddFlags(fs)
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user