mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Refactor getComponentCommand()
for readability
This commit is contained in:
parent
be1996ee64
commit
b7b0822654
@ -43,7 +43,7 @@ func createKubeProxyPodSpec(cfg *kubeadmapi.MasterConfiguration, architecture st
|
||||
Containers: []api.Container{{
|
||||
Name: kubeProxy,
|
||||
Image: images.GetCoreImage(images.KubeProxyImage, cfg, envParams["hyperkube_image"]),
|
||||
Command: append(getComponentCommand("proxy", cfg), "--kubeconfig=/run/kubeconfig"),
|
||||
Command: append(getProxyCommand(cfg), "--kubeconfig=/run/kubeconfig"),
|
||||
SecurityContext: &api.SecurityContext{Privileged: &privilegedTrue},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
{
|
||||
|
@ -60,7 +60,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
kubeAPIServer: componentPod(api.Container{
|
||||
Name: kubeAPIServer,
|
||||
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, envParams["hyperkube_image"]),
|
||||
Command: getComponentCommand(apiServer, cfg),
|
||||
Command: getAPIServerCommand(cfg),
|
||||
VolumeMounts: []api.VolumeMount{certsVolumeMount(), k8sVolumeMount()},
|
||||
LivenessProbe: componentProbe(8080, "/healthz"),
|
||||
Resources: componentResources("250m"),
|
||||
@ -68,7 +68,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
kubeControllerManager: componentPod(api.Container{
|
||||
Name: kubeControllerManager,
|
||||
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, envParams["hyperkube_image"]),
|
||||
Command: getComponentCommand(controllerManager, cfg),
|
||||
Command: getControllerManagerCommand(cfg),
|
||||
VolumeMounts: []api.VolumeMount{certsVolumeMount(), k8sVolumeMount()},
|
||||
LivenessProbe: componentProbe(10252, "/healthz"),
|
||||
Resources: componentResources("200m"),
|
||||
@ -76,7 +76,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
kubeScheduler: componentPod(api.Container{
|
||||
Name: kubeScheduler,
|
||||
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, envParams["hyperkube_image"]),
|
||||
Command: getComponentCommand(scheduler, cfg),
|
||||
Command: getSchedulerCommand(cfg),
|
||||
LivenessProbe: componentProbe(10251, "/healthz"),
|
||||
Resources: componentResources("100m"),
|
||||
}),
|
||||
@ -221,88 +221,100 @@ func componentPod(container api.Container, volumes ...api.Volume) api.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func getComponentCommand(component string, cfg *kubeadmapi.MasterConfiguration) (command []string) {
|
||||
baseFlags := map[string][]string{
|
||||
apiServer: {
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=" + cfg.Networking.ServiceSubnet,
|
||||
"--service-account-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--tls-cert-file=" + pkiDir + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + pkiDir + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", cfg.API.BindPort),
|
||||
"--allow-privileged",
|
||||
},
|
||||
controllerManager: {
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--service-account-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + pkiDir + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + pkiDir + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
},
|
||||
scheduler: {
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
},
|
||||
proxy: {},
|
||||
}
|
||||
|
||||
func getComponentBaseCommand(component string) (command []string) {
|
||||
envParams := kubeadmapi.GetEnvParams()
|
||||
if envParams["hyperkube_image"] != "" {
|
||||
command = []string{"/hyperkube", component}
|
||||
} else {
|
||||
command = []string{"/usr/local/bin/kube-" + component}
|
||||
command = []string{"kube-" + component}
|
||||
}
|
||||
|
||||
command = append(command, envParams["component_loglevel"])
|
||||
command = append(command, baseFlags[component]...)
|
||||
return
|
||||
}
|
||||
|
||||
if component == apiServer {
|
||||
// Use first address we are given
|
||||
if len(cfg.API.AdvertiseAddresses) > 0 {
|
||||
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
|
||||
}
|
||||
// Check if the user decided to use an external etcd cluster
|
||||
if len(cfg.Etcd.Endpoints) > 0 {
|
||||
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
|
||||
} else {
|
||||
command = append(command, "--etcd-servers=http://127.0.0.1:2379")
|
||||
}
|
||||
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) (command []string) {
|
||||
command = append(getComponentBaseCommand(apiServer),
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range="+cfg.Networking.ServiceSubnet,
|
||||
"--service-account-key-file="+pkiDir+"/apiserver-key.pem",
|
||||
"--client-ca-file="+pkiDir+"/ca.pem",
|
||||
"--tls-cert-file="+pkiDir+"/apiserver.pem",
|
||||
"--tls-private-key-file="+pkiDir+"/apiserver-key.pem",
|
||||
"--token-auth-file="+pkiDir+"/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", cfg.API.BindPort),
|
||||
"--allow-privileged",
|
||||
)
|
||||
|
||||
// Is etcd secured?
|
||||
if cfg.Etcd.CAFile != "" {
|
||||
command = append(command, fmt.Sprintf("--etcd-cafile=%s", cfg.Etcd.CAFile))
|
||||
}
|
||||
if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" {
|
||||
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", cfg.Etcd.CertFile)
|
||||
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", cfg.Etcd.KeyFile)
|
||||
command = append(command, etcdClientFileArg, etcdKeyFileArg)
|
||||
}
|
||||
// Use first address we are given
|
||||
if len(cfg.API.AdvertiseAddresses) > 0 {
|
||||
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
|
||||
}
|
||||
|
||||
if component == controllerManager {
|
||||
if cfg.CloudProvider != "" {
|
||||
command = append(command, "--cloud-provider="+cfg.CloudProvider)
|
||||
// Check if the user decided to use an external etcd cluster
|
||||
if len(cfg.Etcd.Endpoints) > 0 {
|
||||
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
|
||||
} else {
|
||||
command = append(command, "--etcd-servers=http://127.0.0.1:2379")
|
||||
}
|
||||
|
||||
// Only append the --cloud-config option if there's a such file
|
||||
// TODO(phase1+) this won't work unless it's in one of the few directories we bind-mount
|
||||
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
|
||||
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
|
||||
}
|
||||
}
|
||||
// Let the controller-manager allocate Node CIDRs for the Pod network.
|
||||
// Each node will get a subspace of the address CIDR provided with --pod-network-cidr.
|
||||
if cfg.Networking.PodSubnet != "" {
|
||||
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet)
|
||||
}
|
||||
// Is etcd secured?
|
||||
if cfg.Etcd.CAFile != "" {
|
||||
command = append(command, fmt.Sprintf("--etcd-cafile=%s", cfg.Etcd.CAFile))
|
||||
}
|
||||
if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" {
|
||||
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", cfg.Etcd.CertFile)
|
||||
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", cfg.Etcd.KeyFile)
|
||||
command = append(command, etcdClientFileArg, etcdKeyFileArg)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) (command []string) {
|
||||
command = append(getComponentBaseCommand(controllerManager),
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name="+DefaultClusterName,
|
||||
"--root-ca-file="+pkiDir+"/ca.pem",
|
||||
"--service-account-private-key-file="+pkiDir+"/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file="+pkiDir+"/ca.pem",
|
||||
"--cluster-signing-key-file="+pkiDir+"/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
)
|
||||
|
||||
if cfg.CloudProvider != "" {
|
||||
command = append(command, "--cloud-provider="+cfg.CloudProvider)
|
||||
|
||||
// Only append the --cloud-config option if there's a such file
|
||||
// TODO(phase1+) this won't work unless it's in one of the few directories we bind-mount
|
||||
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
|
||||
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Let the controller-manager allocate Node CIDRs for the Pod network.
|
||||
// Each node will get a subspace of the address CIDR provided with --pod-network-cidr.
|
||||
if cfg.Networking.PodSubnet != "" {
|
||||
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration) (command []string) {
|
||||
command = append(getComponentBaseCommand(scheduler),
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getProxyCommand(cfg *kubeadmapi.MasterConfiguration) (command []string) {
|
||||
command = getComponentBaseCommand(proxy)
|
||||
|
||||
return
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user