mirror of
https://github.com/rancher/rke.git
synced 2025-04-27 19:25:44 +00:00
Set up kubernetes components
This commit is contained in:
parent
84d02e8b42
commit
f7649289d4
31
cluster.yml
Normal file
31
cluster.yml
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
hosts:
|
||||
- hostname: server1
|
||||
ip: 1.1.1.1
|
||||
user: ubuntu
|
||||
role: [controlplane, etcd]
|
||||
docker_socket: /var/run/docker.sock
|
||||
- hostname: server2
|
||||
ip: 2.2.2.2
|
||||
user: ubuntu
|
||||
role: [worker]
|
||||
|
||||
services:
|
||||
etcd:
|
||||
version: latest
|
||||
image: quay.io/coreos/etcd
|
||||
kube-api:
|
||||
version: v1.7.5_coreos.0
|
||||
image: quay.io/coreos/hyperkube
|
||||
kube-controller:
|
||||
version: v1.7.5_coreos.0
|
||||
image: quay.io/coreos/hyperkube
|
||||
scheduler:
|
||||
version: v1.7.5_coreos.0
|
||||
image: quay.io/coreos/hyperkube
|
||||
kubelet:
|
||||
version: v1.7.5_coreos.0
|
||||
image: quay.io/coreos/hyperkube
|
||||
kubeproxy:
|
||||
version: v1.7.5_coreos.0
|
||||
image: quay.io/coreos/hyperkube
|
133
cmd/cluster.go
Normal file
133
cmd/cluster.go
Normal file
@ -0,0 +1,133 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func ClusterCommand() cli.Command {
|
||||
clusterUpFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cluster-file",
|
||||
Usage: "Specify an alternate cluster YAML file (default: cluster.yml)",
|
||||
EnvVar: "CLUSTER_FILE",
|
||||
},
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "cluster",
|
||||
ShortName: "cluster",
|
||||
Usage: "Operations on the cluster",
|
||||
Flags: clusterUpFlags,
|
||||
Subcommands: []cli.Command{
|
||||
cli.Command{
|
||||
Name: "up",
|
||||
Usage: "Bring the cluster up",
|
||||
Action: clusterUp,
|
||||
Flags: clusterUpFlags,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func clusterUp(ctx *cli.Context) error {
|
||||
logrus.Infof("Building up Kubernetes cluster")
|
||||
clusterFile, err := resolveClusterFile(ctx)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to bring cluster up: %v", err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Parsing cluster file [%v]", clusterFile)
|
||||
servicesLookup, k8shosts, err := parseClusterFile(clusterFile)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse the cluster file: %v", err)
|
||||
return err
|
||||
}
|
||||
for i := range k8shosts {
|
||||
// Set up socket tunneling
|
||||
k8shosts[i].TunnelUp(ctx)
|
||||
defer k8shosts[i].DClient.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
etcdHosts, cpHosts, workerHosts := hosts.DivideHosts(k8shosts)
|
||||
err = services.RunEtcdPlane(etcdHosts, servicesLookup.Services.Etcd)
|
||||
if err != nil {
|
||||
logrus.Errorf("[Etcd] Failed to bring up Etcd Plane: %v", err)
|
||||
return err
|
||||
}
|
||||
err = services.RunControlPlane(cpHosts, etcdHosts, servicesLookup.Services)
|
||||
if err != nil {
|
||||
logrus.Errorf("[ControlPlane] Failed to bring up Control Plane: %v", err)
|
||||
return err
|
||||
}
|
||||
err = services.RunWorkerPlane(cpHosts, workerHosts, servicesLookup.Services)
|
||||
if err != nil {
|
||||
logrus.Errorf("[WorkerPlane] Failed to bring up Worker Plane: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveClusterFile(ctx *cli.Context) (string, error) {
|
||||
clusterFile := ctx.String("cluster-file")
|
||||
if len(clusterFile) == 0 {
|
||||
clusterFile = "cluster.yml"
|
||||
}
|
||||
fp, err := filepath.Abs(clusterFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to lookup current directory name: %v", err)
|
||||
}
|
||||
file, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Can not find cluster.yml: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
buf, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read file: %v", err)
|
||||
}
|
||||
clusterFile = string(buf)
|
||||
|
||||
return clusterFile, nil
|
||||
}
|
||||
|
||||
func parseClusterFile(clusterFile string) (services.Container, []hosts.Host, error) {
|
||||
logrus.Debugf("cluster file: \n%s", clusterFile)
|
||||
// parse hosts
|
||||
k8shosts := hosts.Hosts{}
|
||||
err := yaml.Unmarshal([]byte(clusterFile), &k8shosts)
|
||||
if err != nil {
|
||||
return services.Container{}, nil, err
|
||||
}
|
||||
for i, host := range k8shosts.Hosts {
|
||||
if len(host.Hostname) == 0 {
|
||||
return services.Container{}, nil, fmt.Errorf("Hostname for host (%d) is not provided", i+1)
|
||||
} else if len(host.User) == 0 {
|
||||
return services.Container{}, nil, fmt.Errorf("User for host (%d) is not provided", i+1)
|
||||
} else if len(host.Role) == 0 {
|
||||
return services.Container{}, nil, fmt.Errorf("Role for host (%d) is not provided", i+1)
|
||||
}
|
||||
for _, role := range host.Role {
|
||||
if role != services.ETCDRole && role != services.MasterRole && role != services.WorkerRole {
|
||||
return services.Container{}, nil, fmt.Errorf("Role [%s] for host (%d) is not recognized", role, i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
// parse services
|
||||
k8sPlanes := services.Container{}
|
||||
err = yaml.Unmarshal([]byte(clusterFile), &k8sPlanes)
|
||||
if err != nil {
|
||||
return services.Container{}, nil, err
|
||||
}
|
||||
return k8sPlanes, k8shosts.Hosts, nil
|
||||
}
|
93
hosts/dialer.go
Normal file
93
hosts/dialer.go
Normal file
@ -0,0 +1,93 @@
|
||||
package hosts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type dialer struct {
|
||||
host *Host
|
||||
}
|
||||
|
||||
const (
|
||||
DockerAPIVersion = "1.24"
|
||||
)
|
||||
|
||||
func (d *dialer) Dial(network, addr string) (net.Conn, error) {
|
||||
sshAddr := d.host.IP + ":22"
|
||||
// Build SSH client configuration
|
||||
cfg, err := makeSSHConfig(d.host.User)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error configuring SSH: %v", err)
|
||||
}
|
||||
// Establish connection with SSH server
|
||||
conn, err := ssh.Dial("tcp", sshAddr, cfg)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error establishing SSH connection: %v", err)
|
||||
}
|
||||
if len(d.host.DockerSocket) == 0 {
|
||||
d.host.DockerSocket = "/var/run/docker.sock"
|
||||
}
|
||||
remote, err := conn.Dial("unix", d.host.DockerSocket)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error connecting to Docker socket on host [%s]: %v", d.host.Hostname, err)
|
||||
}
|
||||
return remote, err
|
||||
}
|
||||
|
||||
func (h *Host) TunnelUp(ctx *cli.Context) error {
|
||||
logrus.Infof("[SSH] Start tunnel for host [%s]", h.Hostname)
|
||||
|
||||
dialer := &dialer{
|
||||
host: h,
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: dialer.Dial,
|
||||
},
|
||||
}
|
||||
|
||||
// set Docker client
|
||||
var err error
|
||||
logrus.Debugf("Connecting to Docker API for host [%s]", h.Hostname)
|
||||
h.DClient, err = client.NewClient("unix:///var/run/docker.sock", DockerAPIVersion, httpClient, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't connect to Docker for host [%s]: %v", h.Hostname, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func privateKeyPath() string {
|
||||
return os.Getenv("HOME") + "/.ssh/id_rsa"
|
||||
}
|
||||
|
||||
// Get private key for ssh authentication
|
||||
func parsePrivateKey(keyPath string) (ssh.Signer, error) {
|
||||
buff, _ := ioutil.ReadFile(keyPath)
|
||||
return ssh.ParsePrivateKey(buff)
|
||||
}
|
||||
|
||||
func makeSSHConfig(user string) (*ssh.ClientConfig, error) {
|
||||
key, err := parsePrivateKey(privateKeyPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := ssh.ClientConfig{
|
||||
User: user,
|
||||
Auth: []ssh.AuthMethod{
|
||||
ssh.PublicKeys(key),
|
||||
},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
37
hosts/hosts.go
Normal file
37
hosts/hosts.go
Normal file
@ -0,0 +1,37 @@
|
||||
package hosts
|
||||
|
||||
import "github.com/docker/docker/client"
|
||||
|
||||
type Hosts struct {
|
||||
Hosts []Host `yaml:"hosts"`
|
||||
}
|
||||
|
||||
type Host struct {
|
||||
IP string `yaml:"ip"`
|
||||
Role []string `yaml:"role"`
|
||||
Hostname string `yaml:"hostname"`
|
||||
User string `yaml:"user"`
|
||||
Sudo bool `yaml:"sudo"`
|
||||
DockerSocket string `yaml:"docker_socket"`
|
||||
DClient *client.Client
|
||||
}
|
||||
|
||||
func DivideHosts(hosts []Host) ([]Host, []Host, []Host) {
|
||||
etcdHosts := []Host{}
|
||||
cpHosts := []Host{}
|
||||
workerHosts := []Host{}
|
||||
for _, host := range hosts {
|
||||
for _, role := range host.Role {
|
||||
if role == "etcd" {
|
||||
etcdHosts = append(etcdHosts, host)
|
||||
}
|
||||
if role == "controlplane" {
|
||||
cpHosts = append(cpHosts, host)
|
||||
}
|
||||
if role == "worker" {
|
||||
workerHosts = append(workerHosts, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
return etcdHosts, cpHosts, workerHosts
|
||||
}
|
23
main.go
23
main.go
@ -4,20 +4,33 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/rke/cmd"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var VERSION = "v0.0.0-dev"
|
||||
var VERSION = "v0.1.0-dev"
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "rke"
|
||||
app.Version = VERSION
|
||||
app.Usage = "You need help!"
|
||||
app.Action = func(c *cli.Context) error {
|
||||
logrus.Info("I'm a turkey")
|
||||
app.Usage = "Rancher Kubernetes Engine, Running kubernetes cluster in the cloud"
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
if ctx.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
app.Author = "Rancher Labs, Inc."
|
||||
app.Email = ""
|
||||
app.Commands = []cli.Command{
|
||||
cmd.ClusterCommand(),
|
||||
}
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug,d",
|
||||
Usage: "Debug logging",
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
28
services/controlplane.go
Normal file
28
services/controlplane.go
Normal file
@ -0,0 +1,28 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
func RunControlPlane(masterHosts []hosts.Host, etcdHosts []hosts.Host, masterServices Services) error {
|
||||
logrus.Infof("[ControlPlane] Building up Controller Plane..")
|
||||
for _, host := range masterHosts {
|
||||
// run kubeapi
|
||||
err := runKubeAPI(host, etcdHosts, masterServices.KubeAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run kubecontroller
|
||||
err = runKubeController(host, masterServices.KubeController)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run scheduler
|
||||
err = runScheduler(host, masterServices.Scheduler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
106
services/etcd.go
Normal file
106
services/etcd.go
Normal file
@ -0,0 +1,106 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
type Etcd struct {
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
}
|
||||
|
||||
func RunEtcdPlane(etcdHosts []hosts.Host, etcdService Etcd) error {
|
||||
logrus.Infof("[Etcd] Building up Etcd Plane..")
|
||||
for _, host := range etcdHosts {
|
||||
isRunning, err := IsContainerRunning(host, EtcdContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[Etcd] Container is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runEtcdContainer(host, etcdService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runEtcdContainer(host hosts.Host, etcdService Etcd) error {
|
||||
logrus.Debugf("[Etcd] Pulling Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, etcdService.Image+":"+etcdService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[Etcd] Successfully pulled Etcd image on host [%s]", host.Hostname)
|
||||
err = doRunEtcd(host, etcdService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[Etcd] Successfully ran Etcd container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunEtcd(host hosts.Host, etcdService Etcd) error {
|
||||
imageCfg := &container.Config{
|
||||
Image: etcdService.Image + ":" + etcdService.Version,
|
||||
Cmd: []string{"/usr/local/bin/etcd",
|
||||
"--name=etcd-" + host.Hostname,
|
||||
"--data-dir=/etcd-data",
|
||||
"--advertise-client-urls=http://" + host.IP + ":2379,http://" + host.IP + ":4001",
|
||||
"--listen-client-urls=http://0.0.0.0:2379",
|
||||
"--initial-advertise-peer-urls=http://" + host.IP + ":2380",
|
||||
"--listen-peer-urls=http://0.0.0.0:2380",
|
||||
"--initial-cluster-token=etcd-cluster-1",
|
||||
"--initial-cluster=etcd-" + host.Hostname + "=http://" + host.IP + ":2380"},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
Binds: []string{
|
||||
"/var/lib/etcd:/etcd-data"},
|
||||
PortBindings: nat.PortMap{
|
||||
"2379/tcp": []nat.PortBinding{
|
||||
{
|
||||
HostIP: "0.0.0.0",
|
||||
HostPort: "2379",
|
||||
},
|
||||
},
|
||||
"2380/tcp": []nat.PortBinding{
|
||||
{
|
||||
HostIP: "0.0.0.0",
|
||||
HostPort: "2380",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, EtcdContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Etcd container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Etcd container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[Etcd] Successfully started Etcd container: %s", resp.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEtcdConnString(hosts []hosts.Host) string {
|
||||
connString := ""
|
||||
for i, host := range hosts {
|
||||
connString += "http://" + host.IP + ":2379"
|
||||
if i < (len(hosts) - 1) {
|
||||
connString += ","
|
||||
}
|
||||
}
|
||||
return connString
|
||||
}
|
90
services/kubeapi.go
Normal file
90
services/kubeapi.go
Normal file
@ -0,0 +1,90 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
type KubeAPI struct {
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
}
|
||||
|
||||
func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService KubeAPI) error {
|
||||
isRunning, err := IsContainerRunning(host, KubeAPIContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[ControlPlane] KubeAPI is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
etcdConnString := getEtcdConnString(etcdHosts)
|
||||
err = runKubeAPIContainer(host, kubeAPIService, etcdConnString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runKubeAPIContainer(host hosts.Host, kubeAPIService KubeAPI, etcdConnString string) error {
|
||||
logrus.Debugf("[ControlPlane] Pulling Kube API Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, kubeAPIService.Image+":"+kubeAPIService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully pulled Kube API image on host [%s]", host.Hostname)
|
||||
err = doRunKubeAPI(host, kubeAPIService, etcdConnString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully ran Kube API container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunKubeAPI(host hosts.Host, kubeAPIService KubeAPI, etcdConnString string) error {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeAPIService.Image + ":" + kubeAPIService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"apiserver",
|
||||
"--insecure-bind-address=0.0.0.0",
|
||||
"--insecure-port=8080",
|
||||
"--cloud-provider=",
|
||||
"--allow_privileged=true",
|
||||
"--service-cluster-ip-range=10.233.0.0/18",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds",
|
||||
"--runtime-config=batch/v2alpha1",
|
||||
"--runtime-config=authentication.k8s.io/v1beta1=true",
|
||||
"--storage-backend=etcd3",
|
||||
"--etcd-servers=" + etcdConnString,
|
||||
"--advertise-address=" + host.IP},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
NetworkMode: "host",
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
PortBindings: nat.PortMap{
|
||||
"8080/tcp": []nat.PortBinding{
|
||||
{
|
||||
HostIP: "0.0.0.0",
|
||||
HostPort: "8080",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, KubeAPIContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Kube API container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Kube API container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[ControlPlane] Successfully started Kube API container: %s", resp.ID)
|
||||
return nil
|
||||
}
|
79
services/kubecontroller.go
Normal file
79
services/kubecontroller.go
Normal file
@ -0,0 +1,79 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
type KubeController struct {
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
}
|
||||
|
||||
func runKubeController(host hosts.Host, kubeControllerService KubeController) error {
|
||||
isRunning, err := IsContainerRunning(host, KubeControllerContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[ControlPlane] Kube-Controller is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runKubeControllerContainer(host, kubeControllerService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runKubeControllerContainer(host hosts.Host, kubeControllerService KubeController) error {
|
||||
logrus.Debugf("[ControlPlane] Pulling Kube Controller Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, kubeControllerService.Image+":"+kubeControllerService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully pulled Kube Controller image on host [%s]", host.Hostname)
|
||||
|
||||
err = doRunKubeController(host, kubeControllerService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully ran Kube Controller container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunKubeController(host hosts.Host, kubeControllerService KubeController) error {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeControllerService.Image + ":" + kubeControllerService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"controller-manager",
|
||||
"--address=0.0.0.0",
|
||||
"--cloud-provider=",
|
||||
"--master=http://" + host.IP + ":8080",
|
||||
"--enable-hostpath-provisioner=false",
|
||||
"--node-monitor-grace-period=40s",
|
||||
"--pod-eviction-timeout=5m0s",
|
||||
"--v=2",
|
||||
"--allocate-node-cidrs=true",
|
||||
"--cluster-cidr=10.233.64.0/18",
|
||||
"--service-cluster-ip-range=10.233.0.0/18"},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, KubeControllerContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Kube Controller container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Kube Controller container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[ControlPlane] Successfully started Kube Controller container: %s", resp.ID)
|
||||
return nil
|
||||
}
|
112
services/kubelet.go
Normal file
112
services/kubelet.go
Normal file
@ -0,0 +1,112 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
type Kubelet struct {
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
}
|
||||
|
||||
func runKubelet(host hosts.Host, masterHost hosts.Host, kubeletService Kubelet, isMaster bool) error {
|
||||
isRunning, err := IsContainerRunning(host, KubeletContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[WorkerPlane] Kubelet is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runKubeletContainer(host, masterHost, kubeletService, isMaster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runKubeletContainer(host hosts.Host, masterHost hosts.Host, kubeletService Kubelet, isMaster bool) error {
|
||||
logrus.Debugf("[WorkerPlane] Pulling Kubelet Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, kubeletService.Image+":"+kubeletService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[WorkerPlane] Successfully pulled Kubelet image on host [%s]", host.Hostname)
|
||||
|
||||
err = doRunKubelet(host, masterHost, kubeletService, isMaster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[WorkerPlane] Successfully ran Kubelet container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunKubelet(host hosts.Host, masterHost hosts.Host, kubeletService Kubelet, isMaster bool) error {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeletService.Image + ":" + kubeletService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"kubelet",
|
||||
"--v=2",
|
||||
"--address=0.0.0.0",
|
||||
"--cluster-domain=cluster.local",
|
||||
"--hostname-override=" + host.Hostname,
|
||||
"--pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0",
|
||||
"--cgroup-driver=cgroupfs",
|
||||
"--cgroups-per-qos=True",
|
||||
"--enforce-node-allocatable=",
|
||||
"--cluster-dns=10.233.0.3",
|
||||
"--network-plugin=cni",
|
||||
"--cni-conf-dir=/etc/cni/net.d",
|
||||
"--cni-bin-dir=/opt/cni/bin",
|
||||
"--resolv-conf=/etc/resolv.conf",
|
||||
"--allow-privileged=true",
|
||||
"--cloud-provider=",
|
||||
"--api-servers=http://" + masterHost.IP + ":8080/",
|
||||
},
|
||||
}
|
||||
if isMaster {
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, "--register-with-taints=node-role.kubernetes.io/master=:NoSchedule")
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, "--node-labels=node-role.kubernetes.io/master=true")
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
Binds: []string{
|
||||
"/etc/cni:/etc/cni:ro",
|
||||
"/opt/cni:/opt/cni:ro",
|
||||
"/etc/resolv.conf:/etc/resolv.conf",
|
||||
"/sys:/sys:ro",
|
||||
"/var/lib/docker:/var/lib/docker:rw",
|
||||
"/var/lib/kubelet:/var/lib/kubelet:shared",
|
||||
"/var/run:/var/run:rw",
|
||||
"/run:/run",
|
||||
"/dev:/host/dev"},
|
||||
NetworkMode: "host",
|
||||
PidMode: "host",
|
||||
Privileged: true,
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
PortBindings: nat.PortMap{
|
||||
"8080/tcp": []nat.PortBinding{
|
||||
{
|
||||
HostIP: "0.0.0.0",
|
||||
HostPort: "8080",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, KubeletContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Kubelet container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Kubelet container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[WorkerPlane] Successfully started Kubelet container: %s", resp.ID)
|
||||
return nil
|
||||
}
|
73
services/kubeproxy.go
Normal file
73
services/kubeproxy.go
Normal file
@ -0,0 +1,73 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
type Kubeproxy struct {
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
}
|
||||
|
||||
func runKubeproxy(host hosts.Host, masterHost hosts.Host, kubeproxyService Kubeproxy) error {
|
||||
isRunning, err := IsContainerRunning(host, KubeproxyContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[WorkerPlane] Kubeproxy is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runKubeproxyContainer(host, masterHost, kubeproxyService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runKubeproxyContainer(host hosts.Host, masterHost hosts.Host, kubeproxyService Kubeproxy) error {
|
||||
logrus.Debugf("[WorkerPlane] Pulling KubeProxy Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, kubeproxyService.Image+":"+kubeproxyService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[WorkerPlane] Successfully pulled KubeProxy image on host [%s]", host.Hostname)
|
||||
|
||||
err = doRunKubeProxy(host, masterHost, kubeproxyService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[WorkerPlane] Successfully ran KubeProxy container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunKubeProxy(host hosts.Host, masterHost hosts.Host, kubeproxyService Kubeproxy) error {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeproxyService.Image + ":" + kubeproxyService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"proxy",
|
||||
"--v=2",
|
||||
"--healthz-bind-address=0.0.0.0",
|
||||
"--master=http://" + masterHost.IP + ":8080/"},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
NetworkMode: "host",
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
Privileged: true,
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, KubeproxyContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create KubeProxy container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start KubeProxy container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[WorkerPlane] Successfully started KubeProxy container: %s", resp.ID)
|
||||
return nil
|
||||
}
|
72
services/scheduler.go
Normal file
72
services/scheduler.go
Normal file
@ -0,0 +1,72 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
type Scheduler struct {
|
||||
Version string `yaml:"version"`
|
||||
Image string `yaml:"image"`
|
||||
}
|
||||
|
||||
func runScheduler(host hosts.Host, schedulerService Scheduler) error {
|
||||
isRunning, err := IsContainerRunning(host, SchedulerContainerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[ControlPlane] Scheduler is already running on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
err = runSchedulerContainer(host, schedulerService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runSchedulerContainer(host hosts.Host, schedulerService Scheduler) error {
|
||||
logrus.Debugf("[ControlPlane] Pulling Scheduler Image on host [%s]", host.Hostname)
|
||||
err := PullImage(host, schedulerService.Image+":"+schedulerService.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully pulled Scheduler image on host [%s]", host.Hostname)
|
||||
|
||||
err = doRunScheduler(host, schedulerService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[ControlPlane] Successfully ran Scheduler container on host [%s]", host.Hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunScheduler(host hosts.Host, schedulerService Scheduler) error {
|
||||
imageCfg := &container.Config{
|
||||
Image: schedulerService.Image + ":" + schedulerService.Version,
|
||||
Cmd: []string{"/hyperkube",
|
||||
"scheduler",
|
||||
"--v=2",
|
||||
"--address=0.0.0.0",
|
||||
"--master=http://" + host.IP + ":8080/"},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, SchedulerContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Scheduler container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Scheduler container on host [%s]: %v", host.Hostname, err)
|
||||
}
|
||||
logrus.Debugf("[ControlPlane] Successfully started Scheduler container: %s", resp.ID)
|
||||
return nil
|
||||
}
|
68
services/services.go
Normal file
68
services/services.go
Normal file
@ -0,0 +1,68 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
Services Services `yaml:"services"`
|
||||
}
|
||||
|
||||
type Services struct {
|
||||
Etcd Etcd `yaml:"etcd"`
|
||||
KubeAPI KubeAPI `yaml:"kube-api"`
|
||||
KubeController KubeController `yaml:"kube-controller"`
|
||||
Scheduler Scheduler `yaml:"scheduler"`
|
||||
Kubelet Kubelet `yaml:"kubelet"`
|
||||
Kubeproxy Kubeproxy `yaml:"kubeproxy"`
|
||||
}
|
||||
|
||||
const (
|
||||
ETCDRole = "etcd"
|
||||
MasterRole = "controlplane"
|
||||
WorkerRole = "worker"
|
||||
KubeAPIContainerName = "kube-api"
|
||||
KubeletContainerName = "kubelet"
|
||||
KubeproxyContainerName = "kube-proxy"
|
||||
KubeControllerContainerName = "kube-controller"
|
||||
SchedulerContainerName = "scheduler"
|
||||
EtcdContainerName = "etcd"
|
||||
)
|
||||
|
||||
func IsContainerRunning(host hosts.Host, containerName string) (bool, error) {
|
||||
logrus.Debugf("Checking if container %s is running on host [%s]", containerName, host.Hostname)
|
||||
containers, err := host.DClient.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Can't get Docker containers for host [%s]: %v", host.Hostname, err)
|
||||
|
||||
}
|
||||
for _, container := range containers {
|
||||
if container.Names[0] == "/"+containerName {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func PullImage(host hosts.Host, containerImage string) error {
|
||||
out, err := host.DClient.ImagePull(context.Background(), containerImage, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't pull Docker image %s for host [%s]: %v", containerImage, host.Hostname, err)
|
||||
}
|
||||
defer out.Close()
|
||||
if logrus.GetLevel() == logrus.DebugLevel {
|
||||
io.Copy(os.Stdout, out)
|
||||
} else {
|
||||
io.Copy(ioutil.Discard, out)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
34
services/worker.go
Normal file
34
services/worker.go
Normal file
@ -0,0 +1,34 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/rancher/rke/hosts"
|
||||
)
|
||||
|
||||
func RunWorkerPlane(masterHosts []hosts.Host, workerHosts []hosts.Host, workerServices Services) error {
|
||||
logrus.Infof("[WorkerPlane] Building up Worker Plane..")
|
||||
for _, host := range masterHosts {
|
||||
// only one master for now
|
||||
err := runKubelet(host, masterHosts[0], workerServices.Kubelet, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = runKubeproxy(host, masterHosts[0], workerServices.Kubeproxy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, host := range workerHosts {
|
||||
// run kubelet
|
||||
err := runKubelet(host, masterHosts[0], workerServices.Kubelet, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run kubeproxy
|
||||
err = runKubeproxy(host, masterHosts[0], workerServices.Kubeproxy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
15
vendor.conf
15
vendor.conf
@ -1,5 +1,16 @@
|
||||
# package
|
||||
github.com/rancher/rke
|
||||
|
||||
github.com/Sirupsen/logrus v0.10.0
|
||||
github.com/urfave/cli v1.18.0
|
||||
github.com/Sirupsen/logrus v0.10.0
|
||||
github.com/urfave/cli v1.18.0
|
||||
golang.org/x/crypto 2509b142fb2b797aa7587dad548f113b2c0f20ce
|
||||
gopkg.in/yaml.v2 eb3733d160e74a9c7e442f435eb3bea458e1d19f
|
||||
github.com/docker/docker ecf4125b85e0faa57d2739348e0d453c1d24d10c
|
||||
github.com/docker/distribution 3800056b8832cf6075e78b282ac010131d8687bc
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
golang.org/x/net c73622c77280266305273cb545f54516ced95b93
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||
github.com/gogo/protobuf 117892bf1866fbaa2318c03e50e40564c8845457
|
||||
github.com/opencontainers/image-spec 7c889fafd04a893f5c5f50b7ab9963d5d64e5242
|
||||
github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
|
||||
|
Loading…
Reference in New Issue
Block a user