modify cloud-controller manager config struct to adapt option change

This commit is contained in:
stewart-yu 2018-05-16 16:26:48 +08:00
parent bbb48fd068
commit 14f7b959ff
2 changed files with 65 additions and 55 deletions

View File

@ -17,25 +17,39 @@ limitations under the License.
package app
import (
"time"
apiserver "k8s.io/apiserver/pkg/server"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app"
"k8s.io/kubernetes/pkg/apis/componentconfig"
)
// ExtraConfig are part of Config, also can place your custom config here.
type ExtraConfig struct {
NodeStatusUpdateFrequency time.Duration
}
// Config is the main context object for the cloud controller manager.
type Config struct {
Generic genericcontrollermanager.Config
Extra ExtraConfig
ComponentConfig componentconfig.CloudControllerManagerConfiguration
SecureServing *apiserver.SecureServingInfo
// TODO: remove deprecated insecure serving
InsecureServing *genericcontrollermanager.InsecureServingInfo
Authentication apiserver.AuthenticationInfo
Authorization apiserver.AuthorizationInfo
// the general kube client
Client *clientset.Clientset
// the client only used for leader election
LeaderElectionClient *clientset.Clientset
// the rest config for the master
Kubeconfig *restclient.Config
// the event sink
EventRecorder record.EventRecorder
}
type completedConfig struct {
Generic genericcontrollermanager.CompletedConfig
Extra *ExtraConfig
*Config
}
// CompletedConfig same as Config, just to swap private object.
@ -46,10 +60,6 @@ type CompletedConfig struct {
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
func (c *Config) Complete() *CompletedConfig {
cc := completedConfig{
c.Generic.Complete(),
&c.Extra,
}
cc := completedConfig{c}
return &CompletedConfig{&cc}
}

View File

@ -85,13 +85,13 @@ the cloud specific control loops shipped with Kubernetes.`,
func resyncPeriod(c *cloudcontrollerconfig.CompletedConfig) func() time.Duration {
return func() time.Duration {
factor := rand.Float64() + 1
return time.Duration(float64(c.Generic.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor)
return time.Duration(float64(c.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor)
}
}
// Run runs the ExternalCMServer. This should never exit.
func Run(c *cloudcontrollerconfig.CompletedConfig) error {
cloud, err := cloudprovider.InitCloudProvider(c.Generic.ComponentConfig.CloudProvider.Name, c.Generic.ComponentConfig.CloudProvider.CloudConfigFile)
cloud, err := cloudprovider.InitCloudProvider(c.ComponentConfig.CloudProvider.Name, c.ComponentConfig.CloudProvider.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
@ -100,7 +100,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
}
if cloud.HasClusterID() == false {
if c.Generic.ComponentConfig.KubeCloudShared.AllowUntaggedCloud == true {
if c.ComponentConfig.KubeCloudShared.AllowUntaggedCloud == true {
glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues")
} else {
glog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option")
@ -109,38 +109,38 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
// setup /configz endpoint
if cz, err := configz.New("componentconfig"); err == nil {
cz.Set(c.Generic.ComponentConfig)
cz.Set(c.ComponentConfig)
} else {
glog.Errorf("unable to register configz: %c", err)
}
// Start the controller manager HTTP server
stopCh := make(chan struct{})
if c.Generic.SecureServing != nil {
handler := genericcontrollermanager.NewBaseHandler(&c.Generic)
handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Generic)
if err := c.Generic.SecureServing.Serve(handler, 0, stopCh); err != nil {
if c.SecureServing != nil {
handler := genericcontrollermanager.NewBaseHandler(&c.ComponentConfig.Debugging)
handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Authorization, &c.Authentication)
if err := c.SecureServing.Serve(handler, 0, stopCh); err != nil {
return err
}
}
if c.Generic.InsecureServing != nil {
handler := genericcontrollermanager.NewBaseHandler(&c.Generic)
handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Generic)
if err := c.Generic.InsecureServing.Serve(handler, 0, stopCh); err != nil {
if c.InsecureServing != nil {
handler := genericcontrollermanager.NewBaseHandler(&c.ComponentConfig.Debugging)
handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Authorization, &c.Authentication)
if err := c.InsecureServing.Serve(handler, 0, stopCh); err != nil {
return err
}
}
run := func(stop <-chan struct{}) {
rootClientBuilder := controller.SimpleControllerClientBuilder{
ClientConfig: c.Generic.Kubeconfig,
ClientConfig: c.Kubeconfig,
}
var clientBuilder controller.ControllerClientBuilder
if c.Generic.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials {
if c.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials {
clientBuilder = controller.SAControllerClientBuilder{
ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig),
CoreClient: c.Generic.Client.CoreV1(),
AuthenticationClient: c.Generic.Client.AuthenticationV1(),
ClientConfig: restclient.AnonymousClientConfig(c.Kubeconfig),
CoreClient: c.Client.CoreV1(),
AuthenticationClient: c.Client.AuthenticationV1(),
Namespace: "kube-system",
}
} else {
@ -152,7 +152,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
}
}
if !c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaderElect {
if !c.ComponentConfig.GenericComponent.LeaderElection.LeaderElect {
run(nil)
panic("unreachable")
}
@ -166,13 +166,13 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
id = id + "_" + string(uuid.NewUUID())
// Lock required for leader election
rl, err := resourcelock.New(c.Generic.ComponentConfig.GenericComponent.LeaderElection.ResourceLock,
rl, err := resourcelock.New(c.ComponentConfig.GenericComponent.LeaderElection.ResourceLock,
"kube-system",
"cloud-controller-manager",
c.Generic.LeaderElectionClient.CoreV1(),
c.LeaderElectionClient.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: c.Generic.EventRecorder,
EventRecorder: c.EventRecorder,
})
if err != nil {
glog.Fatalf("error creating lock: %v", err)
@ -181,9 +181,9 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
// Try and become the leader and start cloud controller manager loops
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration,
RenewDeadline: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration,
RetryPeriod: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration,
LeaseDuration: c.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration,
RenewDeadline: c.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration,
RetryPeriod: c.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
@ -213,16 +213,16 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, rootClientBuilde
nodeController := cloudcontrollers.NewCloudNodeController(
sharedInformers.Core().V1().Nodes(),
client("cloud-node-controller"), cloud,
c.Generic.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
c.Extra.NodeStatusUpdateFrequency)
c.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
c.ComponentConfig.NodeStatusUpdateFrequency.Duration)
nodeController.Run(stop)
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
time.Sleep(wait.Jitter(c.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
// Start the PersistentVolumeLabelController
pvlController := cloudcontrollers.NewPersistentVolumeLabelController(client("pvl-controller"), cloud)
go pvlController.Run(5, stop)
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
time.Sleep(wait.Jitter(c.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
// Start the service controller
serviceController, err := servicecontroller.New(
@ -230,34 +230,34 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, rootClientBuilde
client("service-controller"),
sharedInformers.Core().V1().Services(),
sharedInformers.Core().V1().Nodes(),
c.Generic.ComponentConfig.KubeCloudShared.ClusterName,
c.ComponentConfig.KubeCloudShared.ClusterName,
)
if err != nil {
glog.Errorf("Failed to start service controller: %v", err)
} else {
go serviceController.Run(stop, int(c.Generic.ComponentConfig.ServiceController.ConcurrentServiceSyncs))
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
go serviceController.Run(stop, int(c.ComponentConfig.ServiceController.ConcurrentServiceSyncs))
time.Sleep(wait.Jitter(c.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
}
// If CIDRs should be allocated for pods and set on the CloudProvider, then start the route controller
if c.Generic.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs && c.Generic.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes {
if c.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs && c.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes {
if routes, ok := cloud.Routes(); !ok {
glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
} else {
var clusterCIDR *net.IPNet
if len(strings.TrimSpace(c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
_, clusterCIDR, err = net.ParseCIDR(c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR)
if len(strings.TrimSpace(c.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
_, clusterCIDR, err = net.ParseCIDR(c.ComponentConfig.KubeCloudShared.ClusterCIDR)
if err != nil {
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
}
}
routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.Generic.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR)
go routeController.Run(stop, c.Generic.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR)
go routeController.Run(stop, c.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
time.Sleep(wait.Jitter(c.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
}
} else {
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.Generic.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, c.Generic.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes)
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, c.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes)
}
// If apiserver is not running we should wait for some time and fail only then. This is particularly