mirror of
https://github.com/rancher/rke.git
synced 2025-09-03 07:54:14 +00:00
Add default cluster config file and return service container pointer
Add more generic functions to go services Add x509 authentication
This commit is contained in:
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -19,8 +20,13 @@ func ClusterCommand() cli.Command {
|
||||
cli.StringFlag{
|
||||
Name: "cluster-file",
|
||||
Usage: "Specify an alternate cluster YAML file (default: cluster.yml)",
|
||||
Value: "cluster.yml",
|
||||
EnvVar: "CLUSTER_FILE",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "force-crts",
|
||||
Usage: "Force rotating the Kubernetes components certificates",
|
||||
},
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "cluster",
|
||||
@@ -42,14 +48,12 @@ func clusterUp(ctx *cli.Context) error {
|
||||
logrus.Infof("Building up Kubernetes cluster")
|
||||
clusterFile, err := resolveClusterFile(ctx)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to bring cluster up: %v", err)
|
||||
return err
|
||||
return fmt.Errorf("Failed to bring cluster up: %v", err)
|
||||
}
|
||||
logrus.Debugf("Parsing cluster file [%v]", clusterFile)
|
||||
servicesLookup, k8shosts, err := parseClusterFile(clusterFile)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse the cluster file: %v", err)
|
||||
return err
|
||||
return fmt.Errorf("Failed to parse the cluster file: %v", err)
|
||||
}
|
||||
for i := range k8shosts {
|
||||
// Set up socket tunneling
|
||||
@@ -60,36 +64,39 @@ func clusterUp(ctx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
etcdHosts, cpHosts, workerHosts := hosts.DivideHosts(k8shosts)
|
||||
KubernetesServiceIP, err := services.GetKubernetesServiceIp(servicesLookup.Services.KubeAPI.ServiceClusterIPRange)
|
||||
clusterDomain := servicesLookup.Services.Kubelet.ClusterDomain
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pki.StartCertificatesGeneration(ctx, cpHosts, workerHosts, clusterDomain, KubernetesServiceIP)
|
||||
if err != nil {
|
||||
return fmt.Errorf("[Certificates] Failed to generate Kubernetes certificates: %v", err)
|
||||
}
|
||||
err = services.RunEtcdPlane(etcdHosts, servicesLookup.Services.Etcd)
|
||||
if err != nil {
|
||||
logrus.Errorf("[Etcd] Failed to bring up Etcd Plane: %v", err)
|
||||
return err
|
||||
return fmt.Errorf("[Etcd] Failed to bring up Etcd Plane: %v", err)
|
||||
}
|
||||
err = services.RunControlPlane(cpHosts, etcdHosts, servicesLookup.Services)
|
||||
if err != nil {
|
||||
logrus.Errorf("[ControlPlane] Failed to bring up Control Plane: %v", err)
|
||||
return err
|
||||
return fmt.Errorf("[ControlPlane] Failed to bring up Control Plane: %v", err)
|
||||
}
|
||||
err = services.RunWorkerPlane(cpHosts, workerHosts, servicesLookup.Services)
|
||||
if err != nil {
|
||||
logrus.Errorf("[WorkerPlane] Failed to bring up Worker Plane: %v", err)
|
||||
return err
|
||||
return fmt.Errorf("[WorkerPlane] Failed to bring up Worker Plane: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveClusterFile(ctx *cli.Context) (string, error) {
|
||||
clusterFile := ctx.String("cluster-file")
|
||||
if len(clusterFile) == 0 {
|
||||
clusterFile = "cluster.yml"
|
||||
}
|
||||
fp, err := filepath.Abs(clusterFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to lookup current directory name: %v", err)
|
||||
}
|
||||
file, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Can not find cluster.yml: %v", err)
|
||||
return "", fmt.Errorf("Can not find cluster configuration file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
buf, err := ioutil.ReadAll(file)
|
||||
@@ -101,33 +108,32 @@ func resolveClusterFile(ctx *cli.Context) (string, error) {
|
||||
return clusterFile, nil
|
||||
}
|
||||
|
||||
func parseClusterFile(clusterFile string) (services.Container, []hosts.Host, error) {
|
||||
logrus.Debugf("cluster file: \n%s", clusterFile)
|
||||
func parseClusterFile(clusterFile string) (*services.Container, []hosts.Host, error) {
|
||||
// parse hosts
|
||||
k8shosts := hosts.Hosts{}
|
||||
err := yaml.Unmarshal([]byte(clusterFile), &k8shosts)
|
||||
if err != nil {
|
||||
return services.Container{}, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
for i, host := range k8shosts.Hosts {
|
||||
if len(host.Hostname) == 0 {
|
||||
return services.Container{}, nil, fmt.Errorf("Hostname for host (%d) is not provided", i+1)
|
||||
return nil, nil, fmt.Errorf("Hostname for host (%d) is not provided", i+1)
|
||||
} else if len(host.User) == 0 {
|
||||
return services.Container{}, nil, fmt.Errorf("User for host (%d) is not provided", i+1)
|
||||
return nil, nil, fmt.Errorf("User for host (%d) is not provided", i+1)
|
||||
} else if len(host.Role) == 0 {
|
||||
return services.Container{}, nil, fmt.Errorf("Role for host (%d) is not provided", i+1)
|
||||
return nil, nil, fmt.Errorf("Role for host (%d) is not provided", i+1)
|
||||
}
|
||||
for _, role := range host.Role {
|
||||
if role != services.ETCDRole && role != services.MasterRole && role != services.WorkerRole {
|
||||
return services.Container{}, nil, fmt.Errorf("Role [%s] for host (%d) is not recognized", role, i+1)
|
||||
if role != services.ETCDRole && role != services.ControlRole && role != services.WorkerRole {
|
||||
return nil, nil, fmt.Errorf("Role [%s] for host (%d) is not recognized", role, i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
// parse services
|
||||
k8sPlanes := services.Container{}
|
||||
err = yaml.Unmarshal([]byte(clusterFile), &k8sPlanes)
|
||||
var servicesContainer services.Container
|
||||
err = yaml.Unmarshal([]byte(clusterFile), &servicesContainer)
|
||||
if err != nil {
|
||||
return services.Container{}, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return k8sPlanes, k8shosts.Hosts, nil
|
||||
return &servicesContainer, k8shosts.Hosts, nil
|
||||
}
|
||||
|
71
docker/docker.go
Normal file
71
docker/docker.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
func DoRunContainer(imageCfg *container.Config, hostCfg *container.HostConfig, containerName string, host *hosts.Host, plane string) error {
|
||||
isRunning, err := IsContainerRunning(host, containerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[%s] Container %s is already running on host [%s]", plane, containerName, host.Hostname)
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("[%s] Pulling Image on host [%s]", plane, host.Hostname)
|
||||
err = PullImage(host, imageCfg.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[%s] Successfully pulled %s image on host [%s]", plane, containerName, host.Hostname)
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, containerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create %s container on host [%s]: %v", containerName, host.Hostname, err)
|
||||
}
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start %s container on host [%s]: %v", containerName, host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[%s] Successfully started %s container: %s", plane, containerName, resp.ID)
|
||||
logrus.Infof("[%s] Successfully started %s container on host [%s]", plane, containerName, host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsContainerRunning(host *hosts.Host, containerName string) (bool, error) {
|
||||
logrus.Debugf("Checking if container %s is running on host [%s]", containerName, host.Hostname)
|
||||
containers, err := host.DClient.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Can't get Docker containers for host [%s]: %v", host.Hostname, err)
|
||||
|
||||
}
|
||||
for _, container := range containers {
|
||||
if container.Names[0] == "/"+containerName {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func PullImage(host *hosts.Host, containerImage string) error {
|
||||
out, err := host.DClient.ImagePull(context.Background(), containerImage, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't pull Docker image %s for host [%s]: %v", containerImage, host.Hostname, err)
|
||||
}
|
||||
defer out.Close()
|
||||
if logrus.GetLevel() == logrus.DebugLevel {
|
||||
io.Copy(os.Stdout, out)
|
||||
} else {
|
||||
io.Copy(ioutil.Discard, out)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -44,7 +44,7 @@ func (d *dialer) Dial(network, addr string) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func (h *Host) TunnelUp(ctx *cli.Context) error {
|
||||
logrus.Infof("[SSH] Start tunnel for host [%s]", h.Hostname)
|
||||
logrus.Infof("[ssh] Start tunnel for host [%s]", h.Hostname)
|
||||
|
||||
dialer := &dialer{
|
||||
host: h,
|
||||
|
@@ -1,8 +1,8 @@
|
||||
package hosts
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
type Hosts struct {
|
||||
|
6
package/certdownloader/Dockerfile
Normal file
6
package/certdownloader/Dockerfile
Normal file
@@ -0,0 +1,6 @@
|
||||
FROM alpine:3.4
|
||||
|
||||
RUN apk update && apk add bash
|
||||
|
||||
COPY entrypoint.sh /tmp/entrypoint.sh
|
||||
CMD ["/tmp/entrypoint.sh"]
|
22
package/certdownloader/entrypoint.sh
Executable file
22
package/certdownloader/entrypoint.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
SSL_CRTS_DIR=/etc/kubernetes/ssl
|
||||
mkdir -p $SSL_CRTS_DIR
|
||||
|
||||
for i in $(env | grep -o KUBE_.*=); do
|
||||
name="$(echo "$i" | cut -f1 -d"=" | tr '[:upper:]' '[:lower:]' | tr '_' '-').pem"
|
||||
env=$(echo "$i" | cut -f1 -d"=")
|
||||
value=$(echo "${!env}")
|
||||
if [ ! -f $SSL_CRTS_DIR/$name ] || [ "$FORCE_DEPLOY" == "true" ]; then
|
||||
echo "$value" > $SSL_CRTS_DIR/$name
|
||||
fi
|
||||
done
|
||||
|
||||
for i in $(env | grep -o KUBECFG_.*=); do
|
||||
name="$(echo "$i" | cut -f1 -d"=" | tr '[:upper:]' '[:lower:]' | tr '_' '-').yaml"
|
||||
env=$(echo "$i" | cut -f1 -d"=")
|
||||
value=$(echo "${!env}")
|
||||
if [ ! -f $SSL_CRTS_DIR/$name ]; then
|
||||
echo "$value" > $SSL_CRTS_DIR/$name
|
||||
fi
|
||||
done
|
55
pki/constants.go
Normal file
55
pki/constants.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package pki
|
||||
|
||||
const (
|
||||
CrtDownloaderImage = "husseingalal/crt-downloader:latest"
|
||||
CrtDownloaderContainer = "cert-deployer"
|
||||
|
||||
CACertName = "kube-ca"
|
||||
CACertENVName = "KUBE_CA"
|
||||
CAKeyENVName = "KUBE_CA_KEY"
|
||||
CACertPath = "/etc/kubernetes/ssl/kube-ca.pem"
|
||||
CAKeyPath = "/etc/kubernetes/ssl/kube-ca-key.pem"
|
||||
|
||||
KubeAPICertName = "kube-apiserver"
|
||||
KubeAPICertENVName = "KUBE_API"
|
||||
KubeAPIKeyENVName = "KUBE_API_KEY"
|
||||
KubeAPICertPath = "/etc/kubernetes/ssl/kube-api.pem"
|
||||
KubeAPIKeyPath = "/etc/kubernetes/ssl/kube-api-key.pem"
|
||||
|
||||
KubeControllerName = "kube-controller-manager"
|
||||
KubeControllerCommonName = "system:kube-controller-manager"
|
||||
KubeControllerCertENVName = "KUBE_CONTROLLER_MANAGER"
|
||||
KubeControllerKeyENVName = "KUBE_CONTROLLER_MANAGER_KEY"
|
||||
KubeControllerConfigENVName = "KUBECFG_CONTROLLER_MANAGER"
|
||||
KubeControllerCertPath = "/etc/kubernetes/ssl/kube-controller-manager.pem"
|
||||
KubeControllerKeyPath = "/etc/kubernetes/ssl/kube-controller-manager-key.pem"
|
||||
KubeControllerConfigPath = "/etc/kubernetes/ssl/kubecfg-controller-manager.yaml"
|
||||
|
||||
KubeSchedulerName = "kube-scheduler"
|
||||
KubeSchedulerCommonName = "system:kube-scheduler"
|
||||
KubeSchedulerCertENVName = "KUBE_SCHEDULER"
|
||||
KubeSchedulerKeyENVName = "KUBE_SCHEDULER_KEY"
|
||||
KubeSchedulerConfigENVName = "KUBECFG_SCHEDULER"
|
||||
KubeSchedulerCertPath = "/etc/kubernetes/ssl/kube-scheduler.pem"
|
||||
KubeSchedulerKeyPath = "/etc/kubernetes/ssl/kube-scheduler-key.pem"
|
||||
KubeSchedulerConfigPath = "/etc/kubernetes/ssl/kubecfg-scheduler.yaml"
|
||||
|
||||
KubeProxyName = "kube-proxy"
|
||||
KubeProxyCommonName = "system:kube-proxy"
|
||||
KubeProxyCertENVName = "KUBE_PROXY"
|
||||
KubeProxyKeyENVName = "KUBE_PROXY_KEY"
|
||||
KubeProxyConfigENVName = "KUBECFG_KUBE_PROXY"
|
||||
KubeProxyCertPath = "/etc/kubernetes/ssl/kube-proxy.pem"
|
||||
KubeProxyKeyPath = "/etc/kubernetes/ssl/kube-proxy-key.pem"
|
||||
KubeProxyConfigPath = "/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml"
|
||||
|
||||
KubeNodeName = "kube-node"
|
||||
KubeNodeCommonName = "kube-node"
|
||||
KubeNodeOrganizationName = "system:nodes"
|
||||
KubeNodeCertENVName = "KUBE_NODE"
|
||||
KubeNodeKeyENVName = "KUBE_NODE_KEY"
|
||||
KubeNodeConfigENVName = "KUBECFG_KUBE_NODE"
|
||||
KubeNodeCertPath = "/etc/kubernetes/ssl/kube-node.pem"
|
||||
KubeNodeKeyPath = "/etc/kubernetes/ssl/kube-node-key.pem"
|
||||
KubeNodeConfigPath = "/etc/kubernetes/ssl/kubecfg-kube-node.yaml"
|
||||
)
|
129
pki/deploy.go
Normal file
129
pki/deploy.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package pki
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"k8s.io/client-go/util/cert"
|
||||
)
|
||||
|
||||
func convertCrtToENV(name string, certificate *x509.Certificate) string {
|
||||
encodedCrt := cert.EncodeCertPEM(certificate)
|
||||
return name + "=" + string(encodedCrt)
|
||||
}
|
||||
|
||||
func convertKeyToENV(name string, key *rsa.PrivateKey) string {
|
||||
encodedKey := cert.EncodePrivateKeyPEM(key)
|
||||
return name + "=" + string(encodedKey)
|
||||
}
|
||||
|
||||
func convertConfigToENV(name string, config string) string {
|
||||
return name + "=" + config
|
||||
}
|
||||
|
||||
func deployCertificatesOnMasters(cpHosts []hosts.Host, crtMap map[string]CertificatePKI, forceDeploy bool) error {
|
||||
forceDeployEnv := "FORCE_DEPLOY=false"
|
||||
if forceDeploy {
|
||||
forceDeployEnv = "FORCE_DEPLOY=true"
|
||||
}
|
||||
env := []string{
|
||||
forceDeployEnv,
|
||||
convertCrtToENV(CACertENVName, crtMap[CACertName].certificate),
|
||||
convertKeyToENV(CAKeyENVName, crtMap[CACertName].key),
|
||||
convertCrtToENV(KubeAPICertENVName, crtMap[KubeAPICertName].certificate),
|
||||
convertKeyToENV(KubeAPIKeyENVName, crtMap[KubeAPICertName].key),
|
||||
convertCrtToENV(KubeControllerCertENVName, crtMap[KubeControllerName].certificate),
|
||||
convertKeyToENV(KubeControllerKeyENVName, crtMap[KubeControllerName].key),
|
||||
convertConfigToENV(KubeControllerConfigENVName, crtMap[KubeControllerName].config),
|
||||
convertCrtToENV(KubeSchedulerCertENVName, crtMap[KubeSchedulerName].certificate),
|
||||
convertKeyToENV(KubeSchedulerKeyENVName, crtMap[KubeSchedulerName].key),
|
||||
convertConfigToENV(KubeSchedulerConfigENVName, crtMap[KubeSchedulerName].config),
|
||||
convertCrtToENV(KubeProxyCertENVName, crtMap[KubeProxyName].certificate),
|
||||
convertKeyToENV(KubeProxyKeyENVName, crtMap[KubeProxyName].key),
|
||||
convertConfigToENV(KubeProxyConfigENVName, crtMap[KubeProxyName].config),
|
||||
convertCrtToENV(KubeNodeCertENVName, crtMap[KubeNodeName].certificate),
|
||||
convertKeyToENV(KubeNodeKeyENVName, crtMap[KubeNodeName].key),
|
||||
convertConfigToENV(KubeNodeConfigENVName, crtMap[KubeNodeName].config),
|
||||
}
|
||||
for _, host := range cpHosts {
|
||||
err := doRunDeployer(&host, env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deployCertificatesOnWorkers(workerHosts []hosts.Host, crtMap map[string]CertificatePKI, forceDeploy bool) error {
|
||||
forceDeployEnv := "FORCE_DEPLOY=false"
|
||||
if forceDeploy {
|
||||
forceDeployEnv = "FORCE_DEPLOY=true"
|
||||
}
|
||||
env := []string{
|
||||
forceDeployEnv,
|
||||
convertCrtToENV(CACertENVName, crtMap[CACertName].certificate),
|
||||
convertCrtToENV(KubeProxyCertENVName, crtMap[KubeProxyName].certificate),
|
||||
convertKeyToENV(KubeProxyKeyENVName, crtMap[KubeProxyName].key),
|
||||
convertConfigToENV(KubeProxyConfigENVName, crtMap[KubeProxyName].config),
|
||||
convertCrtToENV(KubeNodeCertENVName, crtMap[KubeNodeName].certificate),
|
||||
convertKeyToENV(KubeNodeKeyENVName, crtMap[KubeNodeName].key),
|
||||
convertConfigToENV(KubeNodeConfigENVName, crtMap[KubeNodeName].config),
|
||||
}
|
||||
for _, host := range workerHosts {
|
||||
err := doRunDeployer(&host, env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunDeployer(host *hosts.Host, containerEnv []string) error {
|
||||
logrus.Debugf("[certificates] Pulling Certificate downloader Image on host [%s]", host.Hostname)
|
||||
err := docker.PullImage(host, CrtDownloaderImage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
imageCfg := &container.Config{
|
||||
Image: CrtDownloaderImage,
|
||||
Env: containerEnv,
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
Privileged: true,
|
||||
RestartPolicy: container.RestartPolicy{Name: "never"},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, CrtDownloaderContainer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Certificates deployer container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Certificates deployer container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[certificates] Successfully started Certificate deployer container: %s", resp.ID)
|
||||
for {
|
||||
isDeployerRunning, err := docker.IsContainerRunning(host, CrtDownloaderContainer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isDeployerRunning {
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
if err := host.DClient.ContainerRemove(context.Background(), resp.ID, types.ContainerRemoveOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to delete Certificates deployer container on host[%s]: %v", host.Hostname, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
23
pki/kubeconfig.go
Normal file
23
pki/kubeconfig.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package pki
|
||||
|
||||
func getKubeConfigX509(kubernetesURL string, componentName string, caPath string, crtPath string, keyPath string) string {
|
||||
return `apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
api-version: v1
|
||||
certificate-authority: ` + caPath + `
|
||||
server: "` + kubernetesURL + `"
|
||||
name: "local"
|
||||
contexts:
|
||||
- context:
|
||||
cluster: "local"
|
||||
user: "` + componentName + `"
|
||||
name: "Default"
|
||||
current-context: "Default"
|
||||
users:
|
||||
- name: "` + componentName + `"
|
||||
user:
|
||||
client-certificate: ` + crtPath + `
|
||||
client-key: ` + keyPath + ``
|
||||
}
|
192
pki/pki.go
Normal file
192
pki/pki.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package pki
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/urfave/cli"
|
||||
"k8s.io/client-go/util/cert"
|
||||
)
|
||||
|
||||
type CertificatePKI struct {
|
||||
certificate *x509.Certificate
|
||||
key *rsa.PrivateKey
|
||||
config string
|
||||
}
|
||||
|
||||
// StartCertificatesGeneration ...
|
||||
func StartCertificatesGeneration(ctx *cli.Context, cpHosts []hosts.Host, workerHosts []hosts.Host, clusterDomain string, KubernetesServiceIP net.IP) error {
|
||||
forceDeploy := ctx.Bool("force-crts")
|
||||
logrus.Infof("[certificates] Generating kubernetes certificates")
|
||||
certs, err := generateCerts(cpHosts, clusterDomain, KubernetesServiceIP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deployCertificatesOnMasters(cpHosts, certs, forceDeploy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deployCertificatesOnWorkers(workerHosts, certs, forceDeploy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateCerts(cpHosts []hosts.Host, clusterDomain string, KubernetesServiceIP net.IP) (map[string]CertificatePKI, error) {
|
||||
certs := make(map[string]CertificatePKI)
|
||||
// generate CA certificate and key
|
||||
logrus.Infof("[certificates] Generating CA kubernetes certificates")
|
||||
caCrt, caKey, err := generateCACertAndKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("[certificates] CA Certificate: %s", string(cert.EncodeCertPEM(caCrt)))
|
||||
certs[CACertName] = CertificatePKI{
|
||||
certificate: caCrt,
|
||||
key: caKey,
|
||||
}
|
||||
|
||||
// generate API certificate and key
|
||||
logrus.Infof("[certificates] Generating Kubernetes API server certificates")
|
||||
kubeAPIAltNames := getAltNames(cpHosts, clusterDomain, KubernetesServiceIP)
|
||||
kubeAPICrt, kubeAPIKey, err := generateKubeAPICertAndKey(caCrt, caKey, kubeAPIAltNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("[certificates] Kube API Certificate: %s", string(cert.EncodeCertPEM(kubeAPICrt)))
|
||||
certs[KubeAPICertName] = CertificatePKI{
|
||||
certificate: kubeAPICrt,
|
||||
key: kubeAPIKey,
|
||||
}
|
||||
|
||||
// generate Kube controller-manager certificate and key
|
||||
logrus.Infof("[certificates] Generating Kube Controller certificates")
|
||||
kubeControllerCrt, kubeControllerKey, err := generateClientCertAndKey(caCrt, caKey, KubeControllerCommonName, []string{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("[certificates] Kube Controller Certificate: %s", string(cert.EncodeCertPEM(kubeControllerCrt)))
|
||||
certs[KubeControllerName] = CertificatePKI{
|
||||
certificate: kubeControllerCrt,
|
||||
key: kubeControllerKey,
|
||||
config: getKubeConfigX509("https://"+cpHosts[0].IP+":6443", KubeControllerName, CACertPath, KubeControllerCertPath, KubeControllerKeyPath),
|
||||
}
|
||||
|
||||
// generate Kube scheduler certificate and key
|
||||
logrus.Infof("[certificates] Generating Kube Scheduler certificates")
|
||||
kubeSchedulerCrt, kubeSchedulerKey, err := generateClientCertAndKey(caCrt, caKey, KubeSchedulerCommonName, []string{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("[certificates] Kube Scheduler Certificate: %s", string(cert.EncodeCertPEM(kubeSchedulerCrt)))
|
||||
certs[KubeSchedulerName] = CertificatePKI{
|
||||
certificate: kubeSchedulerCrt,
|
||||
key: kubeSchedulerKey,
|
||||
config: getKubeConfigX509("https://"+cpHosts[0].IP+":6443", KubeSchedulerName, CACertPath, KubeSchedulerCertPath, KubeSchedulerKeyPath),
|
||||
}
|
||||
|
||||
// generate Kube Proxy certificate and key
|
||||
logrus.Infof("[certificates] Generating Kube Proxy certificates")
|
||||
kubeProxyCrt, kubeProxyKey, err := generateClientCertAndKey(caCrt, caKey, KubeProxyCommonName, []string{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("[certificates] Kube Proxy Certificate: %s", string(cert.EncodeCertPEM(kubeProxyCrt)))
|
||||
certs[KubeProxyName] = CertificatePKI{
|
||||
certificate: kubeProxyCrt,
|
||||
key: kubeProxyKey,
|
||||
config: getKubeConfigX509("https://"+cpHosts[0].IP+":6443", KubeProxyName, CACertPath, KubeProxyCertPath, KubeProxyKeyPath),
|
||||
}
|
||||
|
||||
logrus.Infof("[certificates] Generating Node certificate")
|
||||
nodeCrt, nodeKey, err := generateClientCertAndKey(caCrt, caKey, KubeNodeCommonName, []string{KubeNodeOrganizationName})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("[certificates] Node Certificate: %s", string(cert.EncodeCertPEM(kubeProxyCrt)))
|
||||
certs[KubeNodeName] = CertificatePKI{
|
||||
certificate: nodeCrt,
|
||||
key: nodeKey,
|
||||
config: getKubeConfigX509("https://"+cpHosts[0].IP+":6443", KubeNodeName, CACertPath, KubeNodeCertPath, KubeNodeKeyPath),
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
func generateClientCertAndKey(caCrt *x509.Certificate, caKey *rsa.PrivateKey, commonName string, orgs []string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
rootKey, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to generate private key for %s certificate: %v", commonName, err)
|
||||
}
|
||||
caConfig := cert.Config{
|
||||
CommonName: commonName,
|
||||
Organization: orgs,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
clientCert, err := cert.NewSignedCert(caConfig, rootKey, caCrt, caKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to generate %s certificate: %v", commonName, err)
|
||||
}
|
||||
|
||||
return clientCert, rootKey, nil
|
||||
}
|
||||
|
||||
func generateKubeAPICertAndKey(caCrt *x509.Certificate, caKey *rsa.PrivateKey, altNames *cert.AltNames) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
rootKey, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to generate private key for kube-apiserver certificate: %v", err)
|
||||
}
|
||||
caConfig := cert.Config{
|
||||
CommonName: KubeAPICertName,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
|
||||
AltNames: *altNames,
|
||||
}
|
||||
kubeCACert, err := cert.NewSignedCert(caConfig, rootKey, caCrt, caKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to generate kube-apiserver certificate: %v", err)
|
||||
}
|
||||
|
||||
return kubeCACert, rootKey, nil
|
||||
}
|
||||
|
||||
func generateCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
rootKey, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to generate private key for CA certificate: %v", err)
|
||||
}
|
||||
caConfig := cert.Config{
|
||||
CommonName: CACertName,
|
||||
}
|
||||
kubeCACert, err := cert.NewSelfSignedCACert(caConfig, rootKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to generate CA certificate: %v", err)
|
||||
}
|
||||
|
||||
return kubeCACert, rootKey, nil
|
||||
}
|
||||
|
||||
func getAltNames(cpHosts []hosts.Host, clusterDomain string, KubernetesServiceIP net.IP) *cert.AltNames {
|
||||
ips := []net.IP{}
|
||||
dnsNames := []string{}
|
||||
for _, host := range cpHosts {
|
||||
ips = append(ips, net.ParseIP(host.IP))
|
||||
dnsNames = append(dnsNames, host.Hostname)
|
||||
}
|
||||
ips = append(ips, net.ParseIP("127.0.0.1"))
|
||||
ips = append(ips, KubernetesServiceIP)
|
||||
dnsNames = append(dnsNames, []string{
|
||||
"localhost",
|
||||
"kubernetes",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc." + clusterDomain,
|
||||
}...)
|
||||
return &cert.AltNames{
|
||||
IPs: ips,
|
||||
DNSNames: dnsNames,
|
||||
}
|
||||
}
|
@@ -5,24 +5,25 @@ import (
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
func RunControlPlane(masterHosts []hosts.Host, etcdHosts []hosts.Host, masterServices Services) error {
|
||||
logrus.Infof("[ControlPlane] Building up Controller Plane..")
|
||||
for _, host := range masterHosts {
|
||||
func RunControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, controlServices Services) error {
|
||||
logrus.Infof("[%s] Building up Controller Plane..", ControlRole)
|
||||
for _, host := range controlHosts {
|
||||
// run kubeapi
|
||||
err := runKubeAPI(host, etcdHosts, masterServices.KubeAPI)
|
||||
err := runKubeAPI(host, etcdHosts, controlServices.KubeAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run kubecontroller
|
||||
err = runKubeController(host, masterServices.KubeController)
|
||||
err = runKubeController(host, controlServices.KubeController)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run scheduler
|
||||
err = runScheduler(host, masterServices.Scheduler)
|
||||
err = runScheduler(host, controlServices.Scheduler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
logrus.Infof("[%s] Successfully started Controller Plane..", ControlRole)
|
||||
return nil
|
||||
}
|
||||
|
@@ -1,13 +1,10 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
@@ -17,40 +14,19 @@ type Etcd struct {
|
||||
}
|
||||
|
||||
func RunEtcdPlane(etcdHosts []hosts.Host, etcdService Etcd) error {
|
||||
logrus.Infof("[Etcd] Building up Etcd Plane..")
|
||||
logrus.Infof("[%s] Building up Etcd Plane..", ETCDRole)
|
||||
for _, host := range etcdHosts {
|
||||
isRunning, err := IsContainerRunning(host, EtcdContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[Etcd] Container is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runEtcdContainer(host, etcdService)
|
||||
imageCfg, hostCfg := buildEtcdConfig(host, etcdService)
|
||||
err := docker.DoRunContainer(imageCfg, hostCfg, EtcdContainerName, &host, ETCDRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
logrus.Infof("[%s] Successfully started Etcd Plane..", ETCDRole)
|
||||
return nil
|
||||
}
|
||||
|
||||
func runEtcdContainer(host hosts.Host, etcdService Etcd) error {
|
||||
logrus.Debugf("[Etcd] Pulling Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, etcdService.Image+":"+etcdService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[Etcd] Successfully pulled Etcd image on host [%s]", host.Hostname)
|
||||
err = doRunEtcd(host, etcdService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[Etcd] Successfully ran Etcd container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunEtcd(host hosts.Host, etcdService Etcd) error {
|
||||
func buildEtcdConfig(host hosts.Host, etcdService Etcd) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: etcdService.Image + ":" + etcdService.Version,
|
||||
Cmd: []string{"/usr/local/bin/etcd",
|
||||
@@ -82,16 +58,7 @@ func doRunEtcd(host hosts.Host, etcdService Etcd) error {
|
||||
},
|
||||
},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, EtcdContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Etcd container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Etcd container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[Etcd] Successfully started Etcd container: %s", resp.ID)
|
||||
return nil
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
||||
func getEtcdConnString(hosts []hosts.Host) string {
|
||||
|
@@ -1,55 +1,30 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
)
|
||||
|
||||
type KubeAPI struct {
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
ServiceClusterIPRange string `yaml:"service_cluster_ip_range"`
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
ServiceClusterIPRange string `yaml:"service_cluster_ip_range"`
|
||||
}
|
||||
|
||||
func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService KubeAPI) error {
|
||||
isRunning, err := IsContainerRunning(host, KubeAPIContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[ControlPlane] KubeAPI is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
etcdConnString := getEtcdConnString(etcdHosts)
|
||||
err = runKubeAPIContainer(host, kubeAPIService, etcdConnString)
|
||||
imageCfg, hostCfg := buildKubeAPIConfig(host, kubeAPIService, etcdConnString)
|
||||
err := docker.DoRunContainer(imageCfg, hostCfg, KubeAPIContainerName, &host, ControlRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runKubeAPIContainer(host hosts.Host, kubeAPIService KubeAPI, etcdConnString string) error {
|
||||
logrus.Debugf("[ControlPlane] Pulling Kube API Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, kubeAPIService.Image+":"+kubeAPIService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully pulled Kube API image on host [%s]", host.Hostname)
|
||||
err = doRunKubeAPI(host, kubeAPIService, etcdConnString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully ran Kube API container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunKubeAPI(host hosts.Host, kubeAPIService KubeAPI, etcdConnString string) error {
|
||||
func buildKubeAPIConfig(host hosts.Host, kubeAPIService KubeAPI, etcdConnString string) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeAPIService.Image + ":" + kubeAPIService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
@@ -59,14 +34,21 @@ func doRunKubeAPI(host hosts.Host, kubeAPIService KubeAPI, etcdConnString string
|
||||
"--cloud-provider=",
|
||||
"--allow_privileged=true",
|
||||
"--service-cluster-ip-range=" + kubeAPIService.ServiceClusterIPRange,
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds",
|
||||
"--admission-control=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds",
|
||||
"--runtime-config=batch/v2alpha1",
|
||||
"--runtime-config=authentication.k8s.io/v1beta1=true",
|
||||
"--storage-backend=etcd3",
|
||||
"--etcd-servers=" + etcdConnString,
|
||||
"--advertise-address=" + host.IP},
|
||||
"--advertise-address=" + host.IP,
|
||||
"--client-ca-file=" + pki.CACertPath,
|
||||
"--tls-cert-file=" + pki.KubeAPICertPath,
|
||||
"--tls-private-key-file=" + pki.KubeAPIKeyPath,
|
||||
"--service-account-key-file=" + pki.KubeAPIKeyPath},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
NetworkMode: "host",
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
PortBindings: nat.PortMap{
|
||||
@@ -78,14 +60,5 @@ func doRunKubeAPI(host hosts.Host, kubeAPIService KubeAPI, etcdConnString string
|
||||
},
|
||||
},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, KubeAPIContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Kube API container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Kube API container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[ControlPlane] Successfully started Kube API container: %s", resp.ID)
|
||||
return nil
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@@ -1,81 +1,52 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
)
|
||||
|
||||
type KubeController struct {
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
ClusterCIDR string `yaml:"cluster_cider"`
|
||||
ServiceClusterIPRange string `yaml:"service_cluster_ip_range"`
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
ClusterCIDR string `yaml:"cluster_cider"`
|
||||
ServiceClusterIPRange string `yaml:"service_cluster_ip_range"`
|
||||
}
|
||||
|
||||
func runKubeController(host hosts.Host, kubeControllerService KubeController) error {
|
||||
isRunning, err := IsContainerRunning(host, KubeControllerContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[ControlPlane] Kube-Controller is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runKubeControllerContainer(host, kubeControllerService)
|
||||
imageCfg, hostCfg := buildKubeControllerConfig(kubeControllerService)
|
||||
err := docker.DoRunContainer(imageCfg, hostCfg, KubeControllerContainerName, &host, ControlRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runKubeControllerContainer(host hosts.Host, kubeControllerService KubeController) error {
|
||||
logrus.Debugf("[ControlPlane] Pulling Kube Controller Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, kubeControllerService.Image+":"+kubeControllerService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully pulled Kube Controller image on host [%s]", host.Hostname)
|
||||
|
||||
err = doRunKubeController(host, kubeControllerService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully ran Kube Controller container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunKubeController(host hosts.Host, kubeControllerService KubeController) error {
|
||||
func buildKubeControllerConfig(kubeControllerService KubeController) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeControllerService.Image + ":" + kubeControllerService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"controller-manager",
|
||||
"--address=0.0.0.0",
|
||||
"--cloud-provider=",
|
||||
"--master=http://" + host.IP + ":8080",
|
||||
"--kubeconfig=" + pki.KubeControllerConfigPath,
|
||||
"--enable-hostpath-provisioner=false",
|
||||
"--node-monitor-grace-period=40s",
|
||||
"--pod-eviction-timeout=5m0s",
|
||||
"--v=2",
|
||||
"--allocate-node-cidrs=true",
|
||||
"--cluster-cidr=" + kubeControllerService.ClusterCIDR,
|
||||
"--service-cluster-ip-range=" + kubeControllerService.ServiceClusterIPRange},
|
||||
"--service-cluster-ip-range=" + kubeControllerService.ServiceClusterIPRange,
|
||||
"--service-account-private-key-file=" + pki.KubeAPIKeyPath,
|
||||
"--root-ca-file=" + pki.CACertPath,
|
||||
},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, KubeControllerContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Kube Controller container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Kube Controller container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[ControlPlane] Successfully started Kube Controller container: %s", resp.ID)
|
||||
return nil
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@@ -1,56 +1,30 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
)
|
||||
|
||||
type Kubelet struct {
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
ClusterDomain string `yaml:"cluster_domain"`
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
ClusterDomain string `yaml:"cluster_domain"`
|
||||
InfraContainerImage string `yaml:"infra_container_image"`
|
||||
}
|
||||
|
||||
func runKubelet(host hosts.Host, masterHost hosts.Host, kubeletService Kubelet, isMaster bool) error {
|
||||
isRunning, err := IsContainerRunning(host, KubeletContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[WorkerPlane] Kubelet is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runKubeletContainer(host, masterHost, kubeletService, isMaster)
|
||||
func runKubelet(host hosts.Host, kubeletService Kubelet, isMaster bool) error {
|
||||
imageCfg, hostCfg := buildKubeletConfig(host, kubeletService, isMaster)
|
||||
err := docker.DoRunContainer(imageCfg, hostCfg, KubeletContainerName, &host, WorkerRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runKubeletContainer(host hosts.Host, masterHost hosts.Host, kubeletService Kubelet, isMaster bool) error {
|
||||
logrus.Debugf("[WorkerPlane] Pulling Kubelet Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, kubeletService.Image+":"+kubeletService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[WorkerPlane] Successfully pulled Kubelet image on host [%s]", host.Hostname)
|
||||
|
||||
err = doRunKubelet(host, masterHost, kubeletService, isMaster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[WorkerPlane] Successfully ran Kubelet container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunKubelet(host hosts.Host, masterHost hosts.Host, kubeletService Kubelet, isMaster bool) error {
|
||||
func buildKubeletConfig(host hosts.Host, kubeletService Kubelet, isMaster bool) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeletService.Image + ":" + kubeletService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
@@ -70,7 +44,8 @@ func doRunKubelet(host hosts.Host, masterHost hosts.Host, kubeletService Kubelet
|
||||
"--resolv-conf=/etc/resolv.conf",
|
||||
"--allow-privileged=true",
|
||||
"--cloud-provider=",
|
||||
"--api-servers=http://" + masterHost.IP + ":8080/",
|
||||
"--kubeconfig=" + pki.KubeNodeConfigPath,
|
||||
"--require-kubeconfig=True",
|
||||
},
|
||||
}
|
||||
if isMaster {
|
||||
@@ -79,6 +54,7 @@ func doRunKubelet(host hosts.Host, masterHost hosts.Host, kubeletService Kubelet
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
"/etc/cni:/etc/cni:ro",
|
||||
"/opt/cni:/opt/cni:ro",
|
||||
"/etc/resolv.conf:/etc/resolv.conf",
|
||||
@@ -101,14 +77,5 @@ func doRunKubelet(host hosts.Host, masterHost hosts.Host, kubeletService Kubelet
|
||||
},
|
||||
},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, KubeletContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Kubelet container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Kubelet container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[WorkerPlane] Successfully started Kubelet container: %s", resp.ID)
|
||||
return nil
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@@ -1,13 +1,10 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
)
|
||||
|
||||
type Kubeproxy struct {
|
||||
@@ -15,59 +12,33 @@ type Kubeproxy struct {
|
||||
Image string `yaml:"image"`
|
||||
}
|
||||
|
||||
func runKubeproxy(host hosts.Host, masterHost hosts.Host, kubeproxyService Kubeproxy) error {
|
||||
isRunning, err := IsContainerRunning(host, KubeproxyContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[WorkerPlane] Kubeproxy is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runKubeproxyContainer(host, masterHost, kubeproxyService)
|
||||
func runKubeproxy(host hosts.Host, kubeproxyService Kubeproxy) error {
|
||||
imageCfg, hostCfg := buildKubeproxyConfig(host, kubeproxyService)
|
||||
err := docker.DoRunContainer(imageCfg, hostCfg, KubeproxyContainerName, &host, WorkerRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runKubeproxyContainer(host hosts.Host, masterHost hosts.Host, kubeproxyService Kubeproxy) error {
|
||||
logrus.Debugf("[WorkerPlane] Pulling KubeProxy Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, kubeproxyService.Image+":"+kubeproxyService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[WorkerPlane] Successfully pulled KubeProxy image on host [%s]", host.Hostname)
|
||||
|
||||
err = doRunKubeProxy(host, masterHost, kubeproxyService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[WorkerPlane] Successfully ran KubeProxy container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunKubeProxy(host hosts.Host, masterHost hosts.Host, kubeproxyService Kubeproxy) error {
|
||||
func buildKubeproxyConfig(host hosts.Host, kubeproxyService Kubeproxy) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeproxyService.Image + ":" + kubeproxyService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"proxy",
|
||||
"--v=2",
|
||||
"--healthz-bind-address=0.0.0.0",
|
||||
"--master=http://" + masterHost.IP + ":8080/"},
|
||||
"--kubeconfig=" + pki.KubeProxyConfigPath,
|
||||
},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
NetworkMode: "host",
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
Privileged: true,
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, KubeproxyContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create KubeProxy container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start KubeProxy container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[WorkerPlane] Successfully started KubeProxy container: %s", resp.ID)
|
||||
return nil
|
||||
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@@ -1,13 +1,10 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
)
|
||||
|
||||
type Scheduler struct {
|
||||
@@ -16,57 +13,29 @@ type Scheduler struct {
|
||||
}
|
||||
|
||||
func runScheduler(host hosts.Host, schedulerService Scheduler) error {
|
||||
isRunning, err := IsContainerRunning(host, SchedulerContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[ControlPlane] Scheduler is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runSchedulerContainer(host, schedulerService)
|
||||
imageCfg, hostCfg := buildSchedulerConfig(host, schedulerService)
|
||||
err := docker.DoRunContainer(imageCfg, hostCfg, SchedulerContainerName, &host, ControlRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runSchedulerContainer(host hosts.Host, schedulerService Scheduler) error {
|
||||
logrus.Debugf("[ControlPlane] Pulling Scheduler Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, schedulerService.Image+":"+schedulerService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully pulled Scheduler image on host [%s]", host.Hostname)
|
||||
|
||||
err = doRunScheduler(host, schedulerService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully ran Scheduler container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunScheduler(host hosts.Host, schedulerService Scheduler) error {
|
||||
func buildSchedulerConfig(host hosts.Host, schedulerService Scheduler) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: schedulerService.Image + ":" + schedulerService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"scheduler",
|
||||
"--v=2",
|
||||
"--address=0.0.0.0",
|
||||
"--master=http://" + host.IP + ":8080/"},
|
||||
"--kubeconfig=" + pki.KubeSchedulerConfigPath,
|
||||
},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, SchedulerContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Scheduler container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Scheduler container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[ControlPlane] Successfully started Scheduler container: %s", resp.ID)
|
||||
return nil
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@@ -2,14 +2,7 @@ package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"golang.org/x/net/context"
|
||||
"net"
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
@@ -26,9 +19,10 @@ type Services struct {
|
||||
}
|
||||
|
||||
const (
|
||||
ETCDRole = "etcd"
|
||||
MasterRole = "controlplane"
|
||||
WorkerRole = "worker"
|
||||
ETCDRole = "etcd"
|
||||
ControlRole = "controlplane"
|
||||
WorkerRole = "worker"
|
||||
|
||||
KubeAPIContainerName = "kube-api"
|
||||
KubeletContainerName = "kubelet"
|
||||
KubeproxyContainerName = "kube-proxy"
|
||||
@@ -37,32 +31,17 @@ const (
|
||||
EtcdContainerName = "etcd"
|
||||
)
|
||||
|
||||
func IsContainerRunning(host hosts.Host, containerName string) (bool, error) {
|
||||
logrus.Debugf("Checking if container %s is running on host [%s]", containerName, host.Hostname)
|
||||
containers, err := host.DClient.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
func GetKubernetesServiceIp(serviceClusterRange string) (net.IP, error) {
|
||||
ip, ipnet, err := net.ParseCIDR(serviceClusterRange)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Can't get Docker containers for host [%s]: %v", host.Hostname, err)
|
||||
|
||||
return nil, fmt.Errorf("Failed to get kubernetes service IP: %v", err)
|
||||
}
|
||||
for _, container := range containers {
|
||||
if container.Names[0] == "/"+containerName {
|
||||
return true, nil
|
||||
ip = ip.Mask(ipnet.Mask)
|
||||
for j := len(ip) - 1; j >= 0; j-- {
|
||||
ip[j]++
|
||||
if ip[j] > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func PullImage(host hosts.Host, containerImage string) error {
|
||||
out, err := host.DClient.ImagePull(context.Background(), containerImage, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't pull Docker image %s for host [%s]: %v", containerImage, host.Hostname, err)
|
||||
}
|
||||
defer out.Close()
|
||||
if logrus.GetLevel() == logrus.DebugLevel {
|
||||
io.Copy(os.Stdout, out)
|
||||
} else {
|
||||
io.Copy(ioutil.Discard, out)
|
||||
}
|
||||
|
||||
return nil
|
||||
return ip, nil
|
||||
}
|
||||
|
@@ -1,34 +0,0 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
func RunWorkerPlane(masterHosts []hosts.Host, workerHosts []hosts.Host, workerServices Services) error {
|
||||
logrus.Infof("[WorkerPlane] Building up Worker Plane..")
|
||||
for _, host := range masterHosts {
|
||||
// only one master for now
|
||||
err := runKubelet(host, masterHosts[0], workerServices.Kubelet, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = runKubeproxy(host, masterHosts[0], workerServices.Kubeproxy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, host := range workerHosts {
|
||||
// run kubelet
|
||||
err := runKubelet(host, masterHosts[0], workerServices.Kubelet, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run kubeproxy
|
||||
err = runKubeproxy(host, masterHosts[0], workerServices.Kubeproxy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
35
services/workerplane.go
Normal file
35
services/workerplane.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
func RunWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerServices Services) error {
|
||||
logrus.Infof("[%s] Building up Worker Plane..", WorkerRole)
|
||||
for _, host := range controlHosts {
|
||||
// only one master for now
|
||||
err := runKubelet(host, workerServices.Kubelet, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = runKubeproxy(host, workerServices.Kubeproxy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, host := range workerHosts {
|
||||
// run kubelet
|
||||
err := runKubelet(host, workerServices.Kubelet, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run kubeproxy
|
||||
err = runKubeproxy(host, workerServices.Kubeproxy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
logrus.Infof("[%s] Successfully started Worker Plane..", WorkerRole)
|
||||
return nil
|
||||
}
|
@@ -14,3 +14,4 @@ github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||
github.com/gogo/protobuf 117892bf1866fbaa2318c03e50e40564c8845457
|
||||
github.com/opencontainers/image-spec 7c889fafd04a893f5c5f50b7ab9963d5d64e5242
|
||||
github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
|
||||
k8s.io/client-go v4.0.0 transitive=true
|
||||
|
Reference in New Issue
Block a user