mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 07:27:21 +00:00
move controller manager to compoenent config
This commit is contained in:
@@ -125,7 +125,7 @@ func Run(s *options.CMServer) error {
|
||||
mux.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
server := &http.Server{
|
||||
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)),
|
||||
Handler: mux,
|
||||
}
|
||||
glog.Fatal(server.ListenAndServe())
|
||||
@@ -193,14 +193,16 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
glog.Fatalf("Cloud provider could not be initialized: %v", err)
|
||||
}
|
||||
|
||||
// this cidr has been validated already
|
||||
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
|
||||
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
|
||||
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.PodEvictionTimeout.Duration, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod)
|
||||
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod.Duration)
|
||||
|
||||
serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
|
||||
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
|
||||
if err := serviceController.Run(s.ServiceSyncPeriod.Duration, s.NodeSyncPeriod.Duration); err != nil {
|
||||
glog.Errorf("Failed to start service controller: %v", err)
|
||||
}
|
||||
|
||||
@@ -210,8 +212,8 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
} else if routes, ok := cloud.Routes(); !ok {
|
||||
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
|
||||
} else {
|
||||
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, &s.ClusterCIDR)
|
||||
routeController.Run(s.NodeSyncPeriod)
|
||||
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, clusterCIDR)
|
||||
routeController.Run(s.NodeSyncPeriod.Duration)
|
||||
}
|
||||
} else {
|
||||
glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs)
|
||||
@@ -219,7 +221,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
|
||||
go resourcequotacontroller.NewResourceQuotaController(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller")),
|
||||
controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
|
||||
controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
@@ -241,7 +243,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
glog.Fatalf("Failed to get supported resources from server: %v", err)
|
||||
}
|
||||
|
||||
namespaceController := namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod)
|
||||
namespaceController := namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod.Duration)
|
||||
go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop)
|
||||
|
||||
groupVersion := "extensions/v1beta1"
|
||||
@@ -260,13 +262,13 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
metrics.DefaultHeapsterPort,
|
||||
)
|
||||
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
|
||||
Run(s.HorizontalPodAutoscalerSyncPeriod)
|
||||
Run(s.HorizontalPodAutoscalerSyncPeriod.Duration)
|
||||
}
|
||||
|
||||
if containsResource(resources, "daemonsets") {
|
||||
glog.Infof("Starting daemon set controller")
|
||||
go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s)).
|
||||
Run(s.ConcurrentDSCSyncs, wait.NeverStop)
|
||||
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
|
||||
}
|
||||
|
||||
if containsResource(resources, "jobs") {
|
||||
@@ -288,20 +290,20 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
}
|
||||
}
|
||||
|
||||
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)
|
||||
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfigFlags)
|
||||
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
|
||||
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
|
||||
if err != nil {
|
||||
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
|
||||
}
|
||||
|
||||
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod)
|
||||
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration)
|
||||
pvclaimBinder.Run()
|
||||
|
||||
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
|
||||
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
|
||||
s.PVClaimBinderSyncPeriod,
|
||||
s.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
|
||||
ProbeRecyclableVolumePlugins(s.VolumeConfigFlags),
|
||||
s.PVClaimBinderSyncPeriod.Duration,
|
||||
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
|
||||
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
|
||||
cloud,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -310,7 +312,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
||||
pvRecycler.Run()
|
||||
|
||||
if provisioner != nil {
|
||||
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
|
||||
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod.Duration, volumePlugins, provisioner, cloud)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
|
||||
}
|
||||
|
||||
@@ -21,9 +21,9 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
@@ -33,107 +33,55 @@ import (
|
||||
|
||||
// CMServer is the main context object for the controller manager.
|
||||
type CMServer struct {
|
||||
Port int
|
||||
Address net.IP
|
||||
CloudProvider string
|
||||
CloudConfigFile string
|
||||
ConcurrentEndpointSyncs int
|
||||
ConcurrentRCSyncs int
|
||||
ConcurrentRSSyncs int
|
||||
ConcurrentDSCSyncs int
|
||||
ConcurrentJobSyncs int
|
||||
ConcurrentResourceQuotaSyncs int
|
||||
ConcurrentDeploymentSyncs int
|
||||
ConcurrentNamespaceSyncs int
|
||||
ServiceSyncPeriod time.Duration
|
||||
NodeSyncPeriod time.Duration
|
||||
ResourceQuotaSyncPeriod time.Duration
|
||||
NamespaceSyncPeriod time.Duration
|
||||
PVClaimBinderSyncPeriod time.Duration
|
||||
VolumeConfigFlags VolumeConfigFlags
|
||||
TerminatedPodGCThreshold int
|
||||
HorizontalPodAutoscalerSyncPeriod time.Duration
|
||||
DeploymentControllerSyncPeriod time.Duration
|
||||
MinResyncPeriod time.Duration
|
||||
RegisterRetryCount int
|
||||
NodeMonitorGracePeriod time.Duration
|
||||
NodeStartupGracePeriod time.Duration
|
||||
NodeMonitorPeriod time.Duration
|
||||
NodeStatusUpdateRetry int
|
||||
PodEvictionTimeout time.Duration
|
||||
DeletingPodsQps float32
|
||||
DeletingPodsBurst int
|
||||
ServiceAccountKeyFile string
|
||||
RootCAFile string
|
||||
componentconfig.KubeControllerManagerConfiguration
|
||||
|
||||
ClusterName string
|
||||
ClusterCIDR net.IPNet
|
||||
AllocateNodeCIDRs bool
|
||||
EnableProfiling bool
|
||||
|
||||
Master string
|
||||
Kubeconfig string
|
||||
KubeAPIQPS float32
|
||||
KubeAPIBurst int
|
||||
|
||||
LeaderElection componentconfig.LeaderElectionConfiguration
|
||||
}
|
||||
|
||||
// VolumeConfigFlags is used to bind CLI flags to variables. This top-level struct contains *all* enumerated
|
||||
// CLI flags meant to configure all volume plugins. From this config, the binary will create many instances
|
||||
// of volume.VolumeConfig which are then passed to the appropriate plugin. The ControllerManager binary is the only
|
||||
// part of the code which knows what plugins are supported and which CLI flags correspond to each plugin.
|
||||
type VolumeConfigFlags struct {
|
||||
PersistentVolumeRecyclerMaximumRetry int
|
||||
PersistentVolumeRecyclerMinimumTimeoutNFS int
|
||||
PersistentVolumeRecyclerPodTemplateFilePathNFS string
|
||||
PersistentVolumeRecyclerIncrementTimeoutNFS int
|
||||
PersistentVolumeRecyclerPodTemplateFilePathHostPath string
|
||||
PersistentVolumeRecyclerMinimumTimeoutHostPath int
|
||||
PersistentVolumeRecyclerIncrementTimeoutHostPath int
|
||||
EnableHostPathProvisioning bool
|
||||
Master string
|
||||
Kubeconfig string
|
||||
}
|
||||
|
||||
// NewCMServer creates a new CMServer with a default config.
|
||||
func NewCMServer() *CMServer {
|
||||
s := CMServer{
|
||||
Port: ports.ControllerManagerPort,
|
||||
Address: net.ParseIP("0.0.0.0"),
|
||||
ConcurrentEndpointSyncs: 5,
|
||||
ConcurrentRCSyncs: 5,
|
||||
ConcurrentRSSyncs: 5,
|
||||
ConcurrentDSCSyncs: 2,
|
||||
ConcurrentJobSyncs: 5,
|
||||
ConcurrentResourceQuotaSyncs: 5,
|
||||
ConcurrentDeploymentSyncs: 5,
|
||||
ConcurrentNamespaceSyncs: 2,
|
||||
ServiceSyncPeriod: 5 * time.Minute,
|
||||
NodeSyncPeriod: 10 * time.Second,
|
||||
ResourceQuotaSyncPeriod: 5 * time.Minute,
|
||||
NamespaceSyncPeriod: 5 * time.Minute,
|
||||
PVClaimBinderSyncPeriod: 10 * time.Minute,
|
||||
HorizontalPodAutoscalerSyncPeriod: 30 * time.Second,
|
||||
DeploymentControllerSyncPeriod: 30 * time.Second,
|
||||
MinResyncPeriod: 12 * time.Hour,
|
||||
RegisterRetryCount: 10,
|
||||
PodEvictionTimeout: 5 * time.Minute,
|
||||
NodeMonitorGracePeriod: 40 * time.Second,
|
||||
NodeStartupGracePeriod: 60 * time.Second,
|
||||
NodeMonitorPeriod: 5 * time.Second,
|
||||
ClusterName: "kubernetes",
|
||||
TerminatedPodGCThreshold: 12500,
|
||||
VolumeConfigFlags: VolumeConfigFlags{
|
||||
// default values here
|
||||
PersistentVolumeRecyclerMaximumRetry: 3,
|
||||
PersistentVolumeRecyclerMinimumTimeoutNFS: 300,
|
||||
PersistentVolumeRecyclerIncrementTimeoutNFS: 30,
|
||||
PersistentVolumeRecyclerMinimumTimeoutHostPath: 60,
|
||||
PersistentVolumeRecyclerIncrementTimeoutHostPath: 30,
|
||||
EnableHostPathProvisioning: false,
|
||||
KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{
|
||||
Port: ports.ControllerManagerPort,
|
||||
Address: "0.0.0.0",
|
||||
ConcurrentEndpointSyncs: 5,
|
||||
ConcurrentRCSyncs: 5,
|
||||
ConcurrentRSSyncs: 5,
|
||||
ConcurrentDaemonSetSyncs: 2,
|
||||
ConcurrentJobSyncs: 5,
|
||||
ConcurrentResourceQuotaSyncs: 5,
|
||||
ConcurrentDeploymentSyncs: 5,
|
||||
ConcurrentNamespaceSyncs: 2,
|
||||
ServiceSyncPeriod: unversioned.Duration{5 * time.Minute},
|
||||
NodeSyncPeriod: unversioned.Duration{10 * time.Second},
|
||||
ResourceQuotaSyncPeriod: unversioned.Duration{5 * time.Minute},
|
||||
NamespaceSyncPeriod: unversioned.Duration{5 * time.Minute},
|
||||
PVClaimBinderSyncPeriod: unversioned.Duration{10 * time.Minute},
|
||||
HorizontalPodAutoscalerSyncPeriod: unversioned.Duration{30 * time.Second},
|
||||
DeploymentControllerSyncPeriod: unversioned.Duration{30 * time.Second},
|
||||
MinResyncPeriod: unversioned.Duration{12 * time.Hour},
|
||||
RegisterRetryCount: 10,
|
||||
PodEvictionTimeout: unversioned.Duration{5 * time.Minute},
|
||||
NodeMonitorGracePeriod: unversioned.Duration{40 * time.Second},
|
||||
NodeStartupGracePeriod: unversioned.Duration{60 * time.Second},
|
||||
NodeMonitorPeriod: unversioned.Duration{5 * time.Second},
|
||||
ClusterName: "kubernetes",
|
||||
TerminatedPodGCThreshold: 12500,
|
||||
VolumeConfiguration: componentconfig.VolumeConfiguration{
|
||||
EnableHostPathProvisioning: false,
|
||||
PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{
|
||||
MaximumRetry: 3,
|
||||
MinimumTimeoutNFS: 300,
|
||||
IncrementTimeoutNFS: 30,
|
||||
MinimumTimeoutHostPath: 60,
|
||||
IncrementTimeoutHostPath: 30,
|
||||
},
|
||||
},
|
||||
KubeAPIQPS: 20.0,
|
||||
KubeAPIBurst: 30,
|
||||
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
|
||||
},
|
||||
KubeAPIQPS: 20.0,
|
||||
KubeAPIBurst: 30,
|
||||
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
|
||||
}
|
||||
return &s
|
||||
}
|
||||
@@ -141,7 +89,7 @@ func NewCMServer() *CMServer {
|
||||
// AddFlags adds flags for a specific CMServer to the specified FlagSet
|
||||
func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on")
|
||||
fs.IPVar(&s.Address, "address", s.Address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.Var(componentconfig.IPVar{&s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
|
||||
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
|
||||
@@ -150,42 +98,42 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load")
|
||||
fs.IntVar(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load")
|
||||
fs.DurationVar(&s.ServiceSyncPeriod, "service-sync-period", s.ServiceSyncPeriod, "The period for syncing services with their external load balancers")
|
||||
fs.DurationVar(&s.NodeSyncPeriod, "node-sync-period", s.NodeSyncPeriod, ""+
|
||||
fs.DurationVar(&s.ServiceSyncPeriod.Duration, "service-sync-period", s.ServiceSyncPeriod.Duration, "The period for syncing services with their external load balancers")
|
||||
fs.DurationVar(&s.NodeSyncPeriod.Duration, "node-sync-period", s.NodeSyncPeriod.Duration, ""+
|
||||
"The period for syncing nodes from cloudprovider. Longer periods will result in "+
|
||||
"fewer calls to cloud provider, but may delay addition of new nodes to cluster.")
|
||||
fs.DurationVar(&s.ResourceQuotaSyncPeriod, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod, "The period for syncing quota usage status in the system")
|
||||
fs.DurationVar(&s.NamespaceSyncPeriod, "namespace-sync-period", s.NamespaceSyncPeriod, "The period for syncing namespace life-cycle updates")
|
||||
fs.DurationVar(&s.PVClaimBinderSyncPeriod, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod, "The period for syncing persistent volumes and persistent volume claims")
|
||||
fs.DurationVar(&s.MinResyncPeriod, "min-resync-period", s.MinResyncPeriod, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod")
|
||||
fs.StringVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling")
|
||||
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
|
||||
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
|
||||
fs.StringVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.BoolVar(&s.VolumeConfigFlags.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfigFlags.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
|
||||
fs.DurationVar(&s.ResourceQuotaSyncPeriod.Duration, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod.Duration, "The period for syncing quota usage status in the system")
|
||||
fs.DurationVar(&s.NamespaceSyncPeriod.Duration, "namespace-sync-period", s.NamespaceSyncPeriod.Duration, "The period for syncing namespace life-cycle updates")
|
||||
fs.DurationVar(&s.PVClaimBinderSyncPeriod.Duration, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod.Duration, "The period for syncing persistent volumes and persistent volume claims")
|
||||
fs.DurationVar(&s.MinResyncPeriod.Duration, "min-resync-period", s.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod")
|
||||
fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling")
|
||||
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
|
||||
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
|
||||
fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.BoolVar(&s.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
|
||||
fs.IntVar(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.")
|
||||
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod, "The period for syncing the number of pods in horizontal pod autoscaler.")
|
||||
fs.DurationVar(&s.DeploymentControllerSyncPeriod, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod, "Period for syncing the deployments.")
|
||||
fs.DurationVar(&s.PodEvictionTimeout, "pod-eviction-timeout", s.PodEvictionTimeout, "The grace period for deleting pods on failed nodes.")
|
||||
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.")
|
||||
fs.DurationVar(&s.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.")
|
||||
fs.DurationVar(&s.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.")
|
||||
fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
|
||||
fs.IntVar(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
||||
fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
|
||||
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
|
||||
fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.")
|
||||
fs.DurationVar(&s.NodeMonitorGracePeriod, "node-monitor-grace-period", s.NodeMonitorGracePeriod,
|
||||
fs.DurationVar(&s.NodeMonitorGracePeriod.Duration, "node-monitor-grace-period", s.NodeMonitorGracePeriod.Duration,
|
||||
"Amount of time which we allow running Node to be unresponsive before marking it unhealty. "+
|
||||
"Must be N times more than kubelet's nodeStatusUpdateFrequency, "+
|
||||
"where N means number of retries allowed for kubelet to post node status.")
|
||||
fs.DurationVar(&s.NodeStartupGracePeriod, "node-startup-grace-period", s.NodeStartupGracePeriod,
|
||||
fs.DurationVar(&s.NodeStartupGracePeriod.Duration, "node-startup-grace-period", s.NodeStartupGracePeriod.Duration,
|
||||
"Amount of time which we allow starting Node to be unresponsive before marking it unhealty.")
|
||||
fs.DurationVar(&s.NodeMonitorPeriod, "node-monitor-period", s.NodeMonitorPeriod,
|
||||
fs.DurationVar(&s.NodeMonitorPeriod.Duration, "node-monitor-period", s.NodeMonitorPeriod.Duration,
|
||||
"The period for syncing NodeStatus in NodeController.")
|
||||
fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA key used to sign service account tokens.")
|
||||
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
|
||||
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
|
||||
fs.IPNetVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.")
|
||||
fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.")
|
||||
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
|
||||
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
|
||||
|
||||
@@ -23,9 +23,8 @@ import (
|
||||
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
||||
|
||||
// Cloud providers
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers"
|
||||
|
||||
// Volume plugins
|
||||
@@ -45,7 +44,7 @@ import (
|
||||
)
|
||||
|
||||
// ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list.
|
||||
func ProbeRecyclableVolumePlugins(flags options.VolumeConfigFlags) []volume.VolumePlugin {
|
||||
func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []volume.VolumePlugin {
|
||||
allPlugins := []volume.VolumePlugin{}
|
||||
|
||||
// The list of plugins to probe is decided by this binary, not
|
||||
@@ -53,27 +52,27 @@ func ProbeRecyclableVolumePlugins(flags options.VolumeConfigFlags) []volume.Volu
|
||||
// initialized later.
|
||||
|
||||
// Each plugin can make use of VolumeConfig. The single arg to this func contains *all* enumerated
|
||||
// CLI flags meant to configure volume plugins. From that single config, create an instance of volume.VolumeConfig
|
||||
// options meant to configure volume plugins. From that single config, create an instance of volume.VolumeConfig
|
||||
// for a specific plugin and pass that instance to the plugin's ProbeVolumePlugins(config) func.
|
||||
|
||||
// HostPath recycling is for testing and development purposes only!
|
||||
hostPathConfig := volume.VolumeConfig{
|
||||
RecyclerMinimumTimeout: flags.PersistentVolumeRecyclerMinimumTimeoutHostPath,
|
||||
RecyclerTimeoutIncrement: flags.PersistentVolumeRecyclerIncrementTimeoutHostPath,
|
||||
RecyclerMinimumTimeout: config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath,
|
||||
RecyclerTimeoutIncrement: config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath,
|
||||
RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(),
|
||||
}
|
||||
if err := AttemptToLoadRecycler(flags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, &hostPathConfig); err != nil {
|
||||
glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", flags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, err)
|
||||
if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil {
|
||||
glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err)
|
||||
}
|
||||
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...)
|
||||
|
||||
nfsConfig := volume.VolumeConfig{
|
||||
RecyclerMinimumTimeout: flags.PersistentVolumeRecyclerMinimumTimeoutNFS,
|
||||
RecyclerTimeoutIncrement: flags.PersistentVolumeRecyclerIncrementTimeoutNFS,
|
||||
RecyclerMinimumTimeout: config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS,
|
||||
RecyclerTimeoutIncrement: config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS,
|
||||
RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(),
|
||||
}
|
||||
if err := AttemptToLoadRecycler(flags.PersistentVolumeRecyclerPodTemplateFilePathNFS, &nfsConfig); err != nil {
|
||||
glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", flags.PersistentVolumeRecyclerPodTemplateFilePathNFS, err)
|
||||
if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil {
|
||||
glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err)
|
||||
}
|
||||
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...)
|
||||
|
||||
@@ -88,9 +87,9 @@ func ProbeRecyclableVolumePlugins(flags options.VolumeConfigFlags) []volume.Volu
|
||||
// The beta implementation of provisioning allows 1 implied provisioner per cloud, until we allow configuration of many.
|
||||
// We explicitly map clouds to volume plugins here which allows us to configure many later without backwards compatibility issues.
|
||||
// Not all cloudproviders have provisioning capability, which is the reason for the bool in the return to tell the caller to expect one or not.
|
||||
func NewVolumeProvisioner(cloud cloudprovider.Interface, flags options.VolumeConfigFlags) (volume.ProvisionableVolumePlugin, error) {
|
||||
func NewVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) (volume.ProvisionableVolumePlugin, error) {
|
||||
switch {
|
||||
case cloud == nil && flags.EnableHostPathProvisioning:
|
||||
case cloud == nil && config.EnableHostPathProvisioning:
|
||||
return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins(volume.VolumeConfig{}))
|
||||
case cloud != nil && aws.ProviderName == cloud.ProviderName():
|
||||
return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins())
|
||||
|
||||
Reference in New Issue
Block a user