2018-02-13 00:47:56 +00:00
|
|
|
package cluster
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"strconv"
|
2018-02-14 20:58:35 +00:00
|
|
|
"strings"
|
2018-02-13 00:47:56 +00:00
|
|
|
|
|
|
|
"github.com/rancher/rke/hosts"
|
|
|
|
"github.com/rancher/rke/pki"
|
|
|
|
"github.com/rancher/rke/services"
|
|
|
|
"github.com/rancher/types/apis/management.cattle.io/v3"
|
|
|
|
)
|
|
|
|
|
2018-02-14 20:58:35 +00:00
|
|
|
const (
|
|
|
|
EtcdPathPrefix = "/registry"
|
|
|
|
)
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
func GeneratePlan(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig) (v3.RKEPlan, error) {
|
|
|
|
clusterPlan := v3.RKEPlan{}
|
2018-02-20 11:51:57 +00:00
|
|
|
myCluster, _ := ParseCluster(ctx, rkeConfig, "", "", nil, nil, nil)
|
2018-02-13 00:47:56 +00:00
|
|
|
// rkeConfig.Nodes are already unique. But they don't have role flags. So I will use the parsed cluster.Hosts to make use of the role flags.
|
|
|
|
uniqHosts := hosts.GetUniqueHostList(myCluster.EtcdHosts, myCluster.ControlPlaneHosts, myCluster.WorkerHosts)
|
|
|
|
for _, host := range uniqHosts {
|
|
|
|
clusterPlan.Nodes = append(clusterPlan.Nodes, BuildRKEConfigNodePlan(ctx, myCluster, host))
|
|
|
|
}
|
|
|
|
return clusterPlan, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts.Host) v3.RKEConfigNodePlan {
|
2018-02-24 13:08:46 +00:00
|
|
|
processes := map[string]v3.Process{}
|
2018-02-13 00:47:56 +00:00
|
|
|
portChecks := []v3.PortCheck{}
|
|
|
|
// Everybody gets a sidecar and a kubelet..
|
2018-02-24 13:08:46 +00:00
|
|
|
processes[services.SidekickContainerName] = myCluster.BuildSidecarProcess()
|
|
|
|
processes[services.KubeletContainerName] = myCluster.BuildKubeletProcess(host)
|
|
|
|
processes[services.KubeproxyContainerName] = myCluster.BuildKubeProxyProcess()
|
2018-02-13 00:47:56 +00:00
|
|
|
|
|
|
|
portChecks = append(portChecks, BuildPortChecksFromPortList(host, WorkerPortList, ProtocolTCP)...)
|
|
|
|
// Do we need an nginxProxy for this one ?
|
|
|
|
if host.IsWorker && !host.IsControl {
|
2018-02-24 13:08:46 +00:00
|
|
|
processes[services.NginxProxyContainerName] = myCluster.BuildProxyProcess()
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
if host.IsControl {
|
2018-02-24 13:08:46 +00:00
|
|
|
processes[services.KubeAPIContainerName] = myCluster.BuildKubeAPIProcess()
|
|
|
|
processes[services.KubeControllerContainerName] = myCluster.BuildKubeControllerProcess()
|
|
|
|
processes[services.SchedulerContainerName] = myCluster.BuildSchedulerProcess()
|
2018-02-13 00:47:56 +00:00
|
|
|
|
|
|
|
portChecks = append(portChecks, BuildPortChecksFromPortList(host, ControlPlanePortList, ProtocolTCP)...)
|
|
|
|
}
|
|
|
|
if host.IsEtcd {
|
2018-02-24 13:08:46 +00:00
|
|
|
processes[services.EtcdContainerName] = myCluster.BuildEtcdProcess(host, nil)
|
2018-02-13 00:47:56 +00:00
|
|
|
|
|
|
|
portChecks = append(portChecks, BuildPortChecksFromPortList(host, EtcdPortList, ProtocolTCP)...)
|
|
|
|
}
|
|
|
|
return v3.RKEConfigNodePlan{
|
|
|
|
Address: host.Address,
|
|
|
|
Processes: processes,
|
|
|
|
PortChecks: portChecks,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) BuildKubeAPIProcess() v3.Process {
|
2018-02-14 20:58:35 +00:00
|
|
|
// check if external etcd is used
|
|
|
|
etcdConnectionString := services.GetEtcdConnString(c.EtcdHosts)
|
|
|
|
etcdPathPrefix := EtcdPathPrefix
|
|
|
|
etcdClientCert := pki.GetCertPath(pki.KubeNodeCertName)
|
|
|
|
etcdClientKey := pki.GetKeyPath(pki.KubeNodeCertName)
|
|
|
|
etcdCAClientCert := pki.GetCertPath(pki.CACertName)
|
|
|
|
if len(c.Services.Etcd.ExternalURLs) > 0 {
|
|
|
|
etcdConnectionString = strings.Join(c.Services.Etcd.ExternalURLs, ",")
|
|
|
|
etcdPathPrefix = c.Services.Etcd.Path
|
|
|
|
etcdClientCert = pki.GetCertPath(pki.EtcdClientCertName)
|
|
|
|
etcdClientKey = pki.GetKeyPath(pki.EtcdClientCertName)
|
|
|
|
etcdCAClientCert = pki.GetCertPath(pki.EtcdClientCACertName)
|
|
|
|
}
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
Command := []string{
|
|
|
|
"/opt/rke/entrypoint.sh",
|
|
|
|
"kube-apiserver",
|
|
|
|
}
|
2018-03-14 23:37:04 +00:00
|
|
|
|
|
|
|
CommandArgs := map[string]string{
|
|
|
|
"insecure-bind-address": "127.0.0.1",
|
|
|
|
"bind-address": "0.0.0.0",
|
|
|
|
"insecure-port": "0",
|
|
|
|
"secure-port": "6443",
|
|
|
|
"cloud-provider": "",
|
|
|
|
"allow_privileged": "true",
|
|
|
|
"kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
|
|
|
|
"service-cluster-ip-range": c.Services.KubeAPI.ServiceClusterIPRange,
|
|
|
|
"admission-control": "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds",
|
|
|
|
"storage-backend": "etcd3",
|
|
|
|
"client-ca-file": pki.GetCertPath(pki.CACertName),
|
|
|
|
"tls-cert-file": pki.GetCertPath(pki.KubeAPICertName),
|
|
|
|
"tls-private-key-file": pki.GetKeyPath(pki.KubeAPICertName),
|
|
|
|
"kubelet-client-certificate": pki.GetCertPath(pki.KubeAPICertName),
|
|
|
|
"kubelet-client-key": pki.GetKeyPath(pki.KubeAPICertName),
|
|
|
|
"service-account-key-file": pki.GetKeyPath(pki.KubeAPICertName),
|
|
|
|
}
|
|
|
|
|
2018-02-14 20:58:35 +00:00
|
|
|
args := []string{
|
|
|
|
"--etcd-cafile=" + etcdCAClientCert,
|
|
|
|
"--etcd-certfile=" + etcdClientCert,
|
|
|
|
"--etcd-keyfile=" + etcdClientKey,
|
|
|
|
"--etcd-servers=" + etcdConnectionString,
|
|
|
|
"--etcd-prefix=" + etcdPathPrefix,
|
|
|
|
}
|
2018-02-13 00:47:56 +00:00
|
|
|
|
|
|
|
if c.Authorization.Mode == services.RBACAuthorizationMode {
|
2018-03-14 23:37:04 +00:00
|
|
|
CommandArgs["authorization-mode"] = "Node,RBAC"
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
if c.Services.KubeAPI.PodSecurityPolicy {
|
2018-03-14 23:37:04 +00:00
|
|
|
CommandArgs["runtime-config"] = "extensions/v1beta1/podsecuritypolicy=true"
|
|
|
|
CommandArgs["admission-control"] = CommandArgs["admission-control"] + ",PodSecurityPolicy"
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VolumesFrom := []string{
|
|
|
|
services.SidekickContainerName,
|
|
|
|
}
|
|
|
|
Binds := []string{
|
2018-02-13 18:30:15 +00:00
|
|
|
"/etc/kubernetes:/etc/kubernetes:z",
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
|
2018-03-14 23:37:04 +00:00
|
|
|
// Override args if they exist, add additional args
|
2018-02-13 00:47:56 +00:00
|
|
|
for arg, value := range c.Services.KubeAPI.ExtraArgs {
|
2018-03-14 23:37:04 +00:00
|
|
|
if _, ok := c.Services.KubeAPI.ExtraArgs[arg]; ok {
|
|
|
|
CommandArgs[arg] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range CommandArgs {
|
2018-02-13 00:47:56 +00:00
|
|
|
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
|
|
|
Command = append(Command, cmd)
|
|
|
|
}
|
2018-03-16 03:20:42 +00:00
|
|
|
|
|
|
|
Binds = append(Binds, c.Services.KubeAPI.ExtraBinds...)
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
healthCheck := v3.HealthCheck{
|
|
|
|
URL: services.GetHealthCheckURL(true, services.KubeAPIPort),
|
|
|
|
}
|
|
|
|
return v3.Process{
|
2018-02-24 13:08:46 +00:00
|
|
|
Name: services.KubeAPIContainerName,
|
2018-02-13 00:47:56 +00:00
|
|
|
Command: Command,
|
|
|
|
Args: args,
|
|
|
|
VolumesFrom: VolumesFrom,
|
|
|
|
Binds: Binds,
|
|
|
|
NetworkMode: "host",
|
|
|
|
RestartPolicy: "always",
|
|
|
|
Image: c.Services.KubeAPI.Image,
|
|
|
|
HealthCheck: healthCheck,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) BuildKubeControllerProcess() v3.Process {
|
2018-03-14 23:37:04 +00:00
|
|
|
Command := []string{
|
|
|
|
"/opt/rke/entrypoint.sh",
|
2018-02-13 00:47:56 +00:00
|
|
|
"kube-controller-manager",
|
|
|
|
}
|
2018-03-14 23:37:04 +00:00
|
|
|
|
|
|
|
CommandArgs := map[string]string{
|
|
|
|
"address": "0.0.0.0",
|
|
|
|
"cloud-provider": "",
|
|
|
|
"leader-elect": "true",
|
|
|
|
"kubeconfig": pki.GetConfigPath(pki.KubeControllerCertName),
|
|
|
|
"enable-hostpath-provisioner": "false",
|
|
|
|
"node-monitor-grace-period": "40s",
|
|
|
|
"pod-eviction-timeout": "5m0s",
|
|
|
|
"v": "2",
|
|
|
|
"allocate-node-cidrs": "true",
|
|
|
|
"cluster-cidr": c.ClusterCIDR,
|
|
|
|
"service-cluster-ip-range": c.Services.KubeController.ServiceClusterIPRange,
|
|
|
|
"service-account-private-key-file": pki.GetKeyPath(pki.KubeAPICertName),
|
|
|
|
"root-ca-file": pki.GetCertPath(pki.CACertName),
|
|
|
|
}
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
args := []string{}
|
|
|
|
if c.Authorization.Mode == services.RBACAuthorizationMode {
|
|
|
|
args = append(args, "--use-service-account-credentials=true")
|
|
|
|
}
|
|
|
|
VolumesFrom := []string{
|
|
|
|
services.SidekickContainerName,
|
|
|
|
}
|
|
|
|
Binds := []string{
|
2018-02-13 18:30:15 +00:00
|
|
|
"/etc/kubernetes:/etc/kubernetes:z",
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range c.Services.KubeController.ExtraArgs {
|
2018-03-14 23:37:04 +00:00
|
|
|
if _, ok := c.Services.KubeController.ExtraArgs[arg]; ok {
|
|
|
|
CommandArgs[arg] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range CommandArgs {
|
2018-02-13 00:47:56 +00:00
|
|
|
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
|
|
|
Command = append(Command, cmd)
|
|
|
|
}
|
2018-03-16 03:20:42 +00:00
|
|
|
|
|
|
|
Binds = append(Binds, c.Services.KubeController.ExtraBinds...)
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
healthCheck := v3.HealthCheck{
|
|
|
|
URL: services.GetHealthCheckURL(false, services.KubeControllerPort),
|
|
|
|
}
|
|
|
|
return v3.Process{
|
2018-02-24 13:08:46 +00:00
|
|
|
Name: services.KubeControllerContainerName,
|
2018-02-13 00:47:56 +00:00
|
|
|
Command: Command,
|
|
|
|
Args: args,
|
|
|
|
VolumesFrom: VolumesFrom,
|
|
|
|
Binds: Binds,
|
|
|
|
NetworkMode: "host",
|
|
|
|
RestartPolicy: "always",
|
|
|
|
Image: c.Services.KubeController.Image,
|
|
|
|
HealthCheck: healthCheck,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) BuildKubeletProcess(host *hosts.Host) v3.Process {
|
|
|
|
|
2018-03-14 23:37:04 +00:00
|
|
|
Command := []string{
|
|
|
|
"/opt/rke/entrypoint.sh",
|
2018-02-13 00:47:56 +00:00
|
|
|
"kubelet",
|
2018-03-14 23:37:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CommandArgs := map[string]string{
|
|
|
|
"v": "2",
|
|
|
|
"address": "0.0.0.0",
|
|
|
|
"cadvisor-port": "0",
|
|
|
|
"read-only-port": "0",
|
|
|
|
"cluster-domain": c.ClusterDomain,
|
|
|
|
"pod-infra-container-image": c.Services.Kubelet.InfraContainerImage,
|
|
|
|
"cgroups-per-qos": "True",
|
|
|
|
"enforce-node-allocatable": "",
|
|
|
|
"hostname-override": host.HostnameOverride,
|
|
|
|
"cluster-dns": c.ClusterDNSServer,
|
|
|
|
"network-plugin": "cni",
|
|
|
|
"cni-conf-dir": "/etc/cni/net.d",
|
|
|
|
"cni-bin-dir": "/opt/cni/bin",
|
|
|
|
"resolv-conf": "/etc/resolv.conf",
|
|
|
|
"allow-privileged": "true",
|
|
|
|
"cloud-provider": "",
|
|
|
|
"kubeconfig": pki.GetConfigPath(pki.KubeNodeCertName),
|
|
|
|
"client-ca-file": pki.GetCertPath(pki.CACertName),
|
|
|
|
"anonymous-auth": "false",
|
|
|
|
"volume-plugin-dir": "/var/lib/kubelet/volumeplugins",
|
|
|
|
"require-kubeconfig": "True",
|
|
|
|
"fail-swap-on": strconv.FormatBool(c.Services.Kubelet.FailSwapOn),
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VolumesFrom := []string{
|
|
|
|
services.SidekickContainerName,
|
|
|
|
}
|
|
|
|
Binds := []string{
|
2018-02-13 18:30:15 +00:00
|
|
|
"/etc/kubernetes:/etc/kubernetes:z",
|
|
|
|
"/etc/cni:/etc/cni:ro,z",
|
|
|
|
"/opt/cni:/opt/cni:ro,z",
|
2018-02-09 20:18:58 +00:00
|
|
|
"/var/lib/cni:/var/lib/cni:z",
|
2018-02-13 00:47:56 +00:00
|
|
|
"/etc/resolv.conf:/etc/resolv.conf",
|
2018-03-05 08:42:49 +00:00
|
|
|
"/sys:/sys:rprivate",
|
2018-03-06 02:49:20 +00:00
|
|
|
host.DockerInfo.DockerRootDir + ":" + host.DockerInfo.DockerRootDir + ":rw,rprivate,z",
|
2018-02-13 18:30:15 +00:00
|
|
|
"/var/lib/kubelet:/var/lib/kubelet:shared,z",
|
2018-03-05 08:42:49 +00:00
|
|
|
"/var/run:/var/run:rw,rprivate",
|
|
|
|
"/run:/run:rprivate",
|
2018-02-13 00:47:56 +00:00
|
|
|
"/etc/ceph:/etc/ceph",
|
2018-03-05 08:42:49 +00:00
|
|
|
"/dev:/host/dev,rprivate",
|
2018-02-13 18:30:15 +00:00
|
|
|
"/var/log/containers:/var/log/containers:z",
|
|
|
|
"/var/log/pods:/var/log/pods:z",
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range c.Services.Kubelet.ExtraArgs {
|
2018-03-14 23:37:04 +00:00
|
|
|
if _, ok := c.Services.Kubelet.ExtraArgs[arg]; ok {
|
|
|
|
CommandArgs[arg] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range CommandArgs {
|
2018-02-13 00:47:56 +00:00
|
|
|
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
|
|
|
Command = append(Command, cmd)
|
|
|
|
}
|
2018-03-16 03:20:42 +00:00
|
|
|
|
|
|
|
Binds = append(Binds, c.Services.Kubelet.ExtraBinds...)
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
healthCheck := v3.HealthCheck{
|
|
|
|
URL: services.GetHealthCheckURL(true, services.KubeletPort),
|
|
|
|
}
|
|
|
|
return v3.Process{
|
2018-02-24 13:08:46 +00:00
|
|
|
Name: services.KubeletContainerName,
|
2018-02-13 00:47:56 +00:00
|
|
|
Command: Command,
|
|
|
|
VolumesFrom: VolumesFrom,
|
|
|
|
Binds: Binds,
|
|
|
|
NetworkMode: "host",
|
|
|
|
RestartPolicy: "always",
|
|
|
|
Image: c.Services.Kubelet.Image,
|
|
|
|
PidMode: "host",
|
|
|
|
Privileged: true,
|
|
|
|
HealthCheck: healthCheck,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) BuildKubeProxyProcess() v3.Process {
|
2018-03-14 23:37:04 +00:00
|
|
|
Command := []string{
|
|
|
|
"/opt/rke/entrypoint.sh",
|
2018-02-13 00:47:56 +00:00
|
|
|
"kube-proxy",
|
|
|
|
}
|
2018-03-14 23:37:04 +00:00
|
|
|
|
|
|
|
CommandArgs := map[string]string{
|
|
|
|
"v": "2",
|
|
|
|
"healthz-bind-address": "0.0.0.0",
|
|
|
|
"kubeconfig": pki.GetConfigPath(pki.KubeProxyCertName),
|
|
|
|
}
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
VolumesFrom := []string{
|
|
|
|
services.SidekickContainerName,
|
|
|
|
}
|
|
|
|
Binds := []string{
|
2018-02-13 18:30:15 +00:00
|
|
|
"/etc/kubernetes:/etc/kubernetes:z",
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range c.Services.Kubeproxy.ExtraArgs {
|
2018-03-14 23:37:04 +00:00
|
|
|
if _, ok := c.Services.Kubeproxy.ExtraArgs[arg]; ok {
|
|
|
|
CommandArgs[arg] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range CommandArgs {
|
2018-02-13 00:47:56 +00:00
|
|
|
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
|
|
|
Command = append(Command, cmd)
|
|
|
|
}
|
2018-03-16 03:20:42 +00:00
|
|
|
|
|
|
|
Binds = append(Binds, c.Services.Kubeproxy.ExtraBinds...)
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
healthCheck := v3.HealthCheck{
|
|
|
|
URL: services.GetHealthCheckURL(false, services.KubeproxyPort),
|
|
|
|
}
|
|
|
|
return v3.Process{
|
2018-02-24 13:08:46 +00:00
|
|
|
Name: services.KubeproxyContainerName,
|
2018-02-13 00:47:56 +00:00
|
|
|
Command: Command,
|
|
|
|
VolumesFrom: VolumesFrom,
|
|
|
|
Binds: Binds,
|
|
|
|
NetworkMode: "host",
|
|
|
|
RestartPolicy: "always",
|
|
|
|
PidMode: "host",
|
|
|
|
Privileged: true,
|
|
|
|
HealthCheck: healthCheck,
|
|
|
|
Image: c.Services.Kubeproxy.Image,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) BuildProxyProcess() v3.Process {
|
|
|
|
nginxProxyEnv := ""
|
|
|
|
for i, host := range c.ControlPlaneHosts {
|
|
|
|
nginxProxyEnv += fmt.Sprintf("%s", host.InternalAddress)
|
|
|
|
if i < (len(c.ControlPlaneHosts) - 1) {
|
|
|
|
nginxProxyEnv += ","
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Env := []string{fmt.Sprintf("%s=%s", services.NginxProxyEnvName, nginxProxyEnv)}
|
|
|
|
|
|
|
|
return v3.Process{
|
2018-02-24 13:08:46 +00:00
|
|
|
Name: services.NginxProxyContainerName,
|
2018-02-13 00:47:56 +00:00
|
|
|
Env: Env,
|
2018-02-15 03:25:36 +00:00
|
|
|
Args: Env,
|
2018-02-13 00:47:56 +00:00
|
|
|
NetworkMode: "host",
|
|
|
|
RestartPolicy: "always",
|
|
|
|
HealthCheck: v3.HealthCheck{},
|
|
|
|
Image: c.SystemImages.NginxProxy,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) BuildSchedulerProcess() v3.Process {
|
2018-03-14 23:37:04 +00:00
|
|
|
Command := []string{
|
|
|
|
"/opt/rke/entrypoint.sh",
|
2018-02-13 00:47:56 +00:00
|
|
|
"kube-scheduler",
|
|
|
|
}
|
2018-03-14 23:37:04 +00:00
|
|
|
|
|
|
|
CommandArgs := map[string]string{
|
|
|
|
"leader-elect": "true",
|
|
|
|
"v": "2",
|
|
|
|
"address": "0.0.0.0",
|
|
|
|
"kubeconfig": pki.GetConfigPath(pki.KubeSchedulerCertName),
|
|
|
|
}
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
VolumesFrom := []string{
|
|
|
|
services.SidekickContainerName,
|
|
|
|
}
|
|
|
|
Binds := []string{
|
2018-02-13 18:30:15 +00:00
|
|
|
"/etc/kubernetes:/etc/kubernetes:z",
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range c.Services.Scheduler.ExtraArgs {
|
2018-03-14 23:37:04 +00:00
|
|
|
if _, ok := c.Services.Scheduler.ExtraArgs[arg]; ok {
|
|
|
|
CommandArgs[arg] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range CommandArgs {
|
2018-02-13 00:47:56 +00:00
|
|
|
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
|
|
|
Command = append(Command, cmd)
|
|
|
|
}
|
2018-03-16 03:20:42 +00:00
|
|
|
|
|
|
|
Binds = append(Binds, c.Services.Scheduler.ExtraBinds...)
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
healthCheck := v3.HealthCheck{
|
|
|
|
URL: services.GetHealthCheckURL(false, services.SchedulerPort),
|
|
|
|
}
|
|
|
|
return v3.Process{
|
2018-02-24 13:08:46 +00:00
|
|
|
Name: services.SchedulerContainerName,
|
2018-02-13 00:47:56 +00:00
|
|
|
Command: Command,
|
|
|
|
Binds: Binds,
|
|
|
|
VolumesFrom: VolumesFrom,
|
|
|
|
NetworkMode: "host",
|
|
|
|
RestartPolicy: "always",
|
|
|
|
Image: c.Services.Scheduler.Image,
|
|
|
|
HealthCheck: healthCheck,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) BuildSidecarProcess() v3.Process {
|
|
|
|
return v3.Process{
|
2018-02-24 13:08:46 +00:00
|
|
|
Name: services.SidekickContainerName,
|
2018-02-13 00:47:56 +00:00
|
|
|
NetworkMode: "none",
|
|
|
|
Image: c.SystemImages.KubernetesServicesSidecar,
|
|
|
|
HealthCheck: v3.HealthCheck{},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) BuildEtcdProcess(host *hosts.Host, etcdHosts []*hosts.Host) v3.Process {
|
|
|
|
nodeName := pki.GetEtcdCrtName(host.InternalAddress)
|
|
|
|
initCluster := ""
|
|
|
|
if len(etcdHosts) == 0 {
|
|
|
|
initCluster = services.GetEtcdInitialCluster(c.EtcdHosts)
|
|
|
|
} else {
|
|
|
|
initCluster = services.GetEtcdInitialCluster(etcdHosts)
|
|
|
|
}
|
|
|
|
|
|
|
|
clusterState := "new"
|
|
|
|
if host.ExistingEtcdCluster {
|
|
|
|
clusterState = "existing"
|
|
|
|
}
|
2018-03-14 23:37:04 +00:00
|
|
|
args := []string{
|
|
|
|
"/usr/local/bin/etcd",
|
2018-02-13 00:47:56 +00:00
|
|
|
"--peer-client-cert-auth",
|
|
|
|
"--client-cert-auth",
|
2018-03-14 23:37:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CommandArgs := map[string]string{
|
|
|
|
"name": "etcd-" + host.HostnameOverride,
|
|
|
|
"data-dir": "/var/lib/rancher/etcd",
|
|
|
|
"advertise-client-urls": "https://" + host.InternalAddress + ":2379,https://" + host.InternalAddress + ":4001",
|
|
|
|
"listen-client-urls": "https://0.0.0.0:2379",
|
|
|
|
"initial-advertise-peer-urls": "https://" + host.InternalAddress + ":2380",
|
|
|
|
"listen-peer-urls": "https://0.0.0.0:2380",
|
|
|
|
"initial-cluster-token": "etcd-cluster-1",
|
|
|
|
"initial-cluster": initCluster,
|
|
|
|
"initial-cluster-state": clusterState,
|
|
|
|
"trusted-ca-file": pki.GetCertPath(pki.CACertName),
|
|
|
|
"peer-trusted-ca-file": pki.GetCertPath(pki.CACertName),
|
|
|
|
"cert-file": pki.GetCertPath(nodeName),
|
|
|
|
"key-file": pki.GetKeyPath(nodeName),
|
|
|
|
"peer-cert-file": pki.GetCertPath(nodeName),
|
|
|
|
"peer-key-file": pki.GetKeyPath(nodeName),
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Binds := []string{
|
2018-02-23 01:44:51 +00:00
|
|
|
"/var/lib/etcd:/var/lib/rancher/etcd:z",
|
2018-02-13 00:47:56 +00:00
|
|
|
"/etc/kubernetes:/etc/kubernetes:z",
|
|
|
|
}
|
2018-03-16 03:20:42 +00:00
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
for arg, value := range c.Services.Etcd.ExtraArgs {
|
2018-03-14 23:37:04 +00:00
|
|
|
if _, ok := c.Services.Etcd.ExtraArgs[arg]; ok {
|
|
|
|
CommandArgs[arg] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for arg, value := range CommandArgs {
|
2018-02-13 00:47:56 +00:00
|
|
|
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
|
|
|
args = append(args, cmd)
|
|
|
|
}
|
2018-03-16 03:20:42 +00:00
|
|
|
|
|
|
|
Binds = append(Binds, c.Services.Etcd.ExtraBinds...)
|
|
|
|
|
2018-02-13 00:47:56 +00:00
|
|
|
healthCheck := v3.HealthCheck{
|
|
|
|
URL: services.EtcdHealthCheckURL,
|
|
|
|
}
|
|
|
|
return v3.Process{
|
2018-02-26 21:43:24 +00:00
|
|
|
Name: services.EtcdContainerName,
|
|
|
|
Args: args,
|
|
|
|
Binds: Binds,
|
|
|
|
NetworkMode: "host",
|
|
|
|
RestartPolicy: "always",
|
|
|
|
Image: c.Services.Etcd.Image,
|
|
|
|
HealthCheck: healthCheck,
|
2018-02-13 00:47:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BuildPortChecksFromPortList(host *hosts.Host, portList []string, proto string) []v3.PortCheck {
|
|
|
|
portChecks := []v3.PortCheck{}
|
|
|
|
for _, port := range portList {
|
|
|
|
intPort, _ := strconv.Atoi(port)
|
|
|
|
portChecks = append(portChecks, v3.PortCheck{
|
|
|
|
Address: host.Address,
|
|
|
|
Port: intPort,
|
|
|
|
Protocol: proto,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return portChecks
|
|
|
|
}
|