mirror of
https://github.com/rancher/rke.git
synced 2025-05-09 08:47:43 +00:00
Update rancher types
This commit is contained in:
parent
75448eb572
commit
37b6c8a65e
13
cluster.yml
13
cluster.yml
@ -1,6 +1,15 @@
|
||||
---
|
||||
auth_type: x509
|
||||
network_plugin: flannel
|
||||
|
||||
auth:
|
||||
strategy: x509
|
||||
options:
|
||||
foo: bar
|
||||
|
||||
network:
|
||||
plugin:
|
||||
options:
|
||||
foo: bar
|
||||
|
||||
hosts:
|
||||
- hostname: server1
|
||||
ip: 1.1.1.1
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func SetUpAuthentication(kubeCluster, currentCluster *Cluster) error {
|
||||
if kubeCluster.AuthType == X509AuthenticationProvider {
|
||||
if kubeCluster.Authentication.Strategy == X509AuthenticationProvider {
|
||||
var err error
|
||||
if currentCluster != nil {
|
||||
kubeCluster.Certificates, err = getClusterCerts(kubeCluster.KubeClient)
|
||||
@ -55,9 +55,12 @@ func getClusterCerts(kubeClient *kubernetes.Clientset) (map[string]pki.Certifica
|
||||
secretKey, _ := cert.ParsePrivateKeyPEM(secret.Data["Key"])
|
||||
secretConfig := string(secret.Data["Config"])
|
||||
certMap[certName] = pki.CertificatePKI{
|
||||
Certificate: secretCert[0],
|
||||
Key: secretKey.(*rsa.PrivateKey),
|
||||
Config: secretConfig,
|
||||
Certificate: secretCert[0],
|
||||
Key: secretKey.(*rsa.PrivateKey),
|
||||
Config: secretConfig,
|
||||
EnvName: string(secret.Data["EnvName"]),
|
||||
ConfigEnvName: string(secret.Data["ConfigEnvName"]),
|
||||
KeyEnvName: string(secret.Data["KeyEnvName"]),
|
||||
}
|
||||
}
|
||||
logrus.Infof("[certificates] Successfully fetched Cluster certificates from Kubernetes")
|
||||
@ -91,7 +94,22 @@ func saveCertToKubernetes(kubeClient *kubernetes.Clientset, crtName string, crt
|
||||
time.Sleep(time.Second * 5)
|
||||
continue
|
||||
}
|
||||
err = k8s.UpdateSecret(kubeClient, "EnvName", []byte(crt.EnvName), crtName)
|
||||
if err != nil {
|
||||
time.Sleep(time.Second * 5)
|
||||
continue
|
||||
}
|
||||
err = k8s.UpdateSecret(kubeClient, "KeyEnvName", []byte(crt.KeyEnvName), crtName)
|
||||
if err != nil {
|
||||
time.Sleep(time.Second * 5)
|
||||
continue
|
||||
}
|
||||
if len(crt.Config) > 0 {
|
||||
err = k8s.UpdateSecret(kubeClient, "ConfigEnvName", []byte(crt.ConfigEnvName), crtName)
|
||||
if err != nil {
|
||||
time.Sleep(time.Second * 5)
|
||||
continue
|
||||
}
|
||||
err = k8s.UpdateSecret(kubeClient, "Config", []byte(crt.Config), crtName)
|
||||
if err != nil {
|
||||
time.Sleep(time.Second * 5)
|
||||
|
@ -7,23 +7,23 @@ import (
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
type Cluster struct {
|
||||
v1.RKEConfig `yaml:",inline"`
|
||||
EtcdHosts []hosts.Host
|
||||
WorkerHosts []hosts.Host
|
||||
ControlPlaneHosts []hosts.Host
|
||||
KubeClient *kubernetes.Clientset
|
||||
KubernetesServiceIP net.IP
|
||||
Certificates map[string]pki.CertificatePKI
|
||||
ClusterDomain string
|
||||
ClusterCIDR string
|
||||
ClusterDNSServer string
|
||||
v1.RancherKubernetesEngineConfig `yaml:",inline"`
|
||||
EtcdHosts []hosts.Host
|
||||
WorkerHosts []hosts.Host
|
||||
ControlPlaneHosts []hosts.Host
|
||||
KubeClient *kubernetes.Clientset
|
||||
KubernetesServiceIP net.IP
|
||||
Certificates map[string]pki.CertificatePKI
|
||||
ClusterDomain string
|
||||
ClusterCIDR string
|
||||
ClusterDNSServer string
|
||||
}
|
||||
|
||||
const (
|
||||
@ -80,7 +80,7 @@ func parseClusterFile(clusterFile string) (*Cluster, error) {
|
||||
return nil, err
|
||||
}
|
||||
for i, host := range kubeCluster.Hosts {
|
||||
if len(host.Hostname) == 0 {
|
||||
if len(host.AdvertisedHostname) == 0 {
|
||||
return nil, fmt.Errorf("Hostname for host (%d) is not provided", i+1)
|
||||
} else if len(host.User) == 0 {
|
||||
return nil, fmt.Errorf("User for host (%d) is not provided", i+1)
|
||||
|
@ -37,7 +37,7 @@ func (c *Cluster) InvertIndexHosts() error {
|
||||
c.ControlPlaneHosts = make([]hosts.Host, 0)
|
||||
for _, host := range c.Hosts {
|
||||
for _, role := range host.Role {
|
||||
logrus.Debugf("Host: " + host.Hostname + " has role: " + role)
|
||||
logrus.Debugf("Host: " + host.AdvertisedHostname + " has role: " + role)
|
||||
newHost := hosts.Host{
|
||||
RKEConfigHost: host,
|
||||
}
|
||||
@ -49,7 +49,7 @@ func (c *Cluster) InvertIndexHosts() error {
|
||||
case services.WorkerRole:
|
||||
c.WorkerHosts = append(c.WorkerHosts, newHost)
|
||||
default:
|
||||
return fmt.Errorf("Failed to recognize host [%s] role %s", host.Hostname, role)
|
||||
return fmt.Errorf("Failed to recognize host [%s] role %s", host.AdvertisedHostname, role)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -57,7 +57,7 @@ func (c *Cluster) InvertIndexHosts() error {
|
||||
}
|
||||
|
||||
func (c *Cluster) SetUpHosts() error {
|
||||
if c.AuthType == X509AuthenticationProvider {
|
||||
if c.Authentication.Strategy == X509AuthenticationProvider {
|
||||
logrus.Infof("[certificates] Deploying kubernetes certificates to Cluster nodes")
|
||||
err := pki.DeployCertificatesOnMasters(c.ControlPlaneHosts, c.Certificates)
|
||||
if err != nil {
|
||||
|
@ -42,10 +42,10 @@ func (c *Cluster) buildClusterConfigEnv() []string {
|
||||
func (c *Cluster) RunKubectlCmd(kubectlCmd *KubectlCommand) error {
|
||||
h := c.ControlPlaneHosts[0]
|
||||
|
||||
logrus.Debugf("[kubectl] Using host [%s] for deployment", h.Hostname)
|
||||
logrus.Debugf("[kubectl] Using host [%s] for deployment", h.AdvertisedHostname)
|
||||
logrus.Debugf("[kubectl] Pulling kubectl image..")
|
||||
|
||||
if err := docker.PullImage(h.DClient, h.Hostname, KubectlImage); err != nil {
|
||||
if err := docker.PullImage(h.DClient, h.AdvertisedHostname, KubectlImage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -62,26 +62,26 @@ func (c *Cluster) RunKubectlCmd(kubectlCmd *KubectlCommand) error {
|
||||
logrus.Debugf("[kubectl] Creating kubectl container..")
|
||||
resp, err := h.DClient.ContainerCreate(context.Background(), imageCfg, nil, nil, KubctlContainer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create kubectl container on host [%s]: %v", h.Hostname, err)
|
||||
return fmt.Errorf("Failed to create kubectl container on host [%s]: %v", h.AdvertisedHostname, err)
|
||||
}
|
||||
logrus.Debugf("[kubectl] Container %s created..", resp.ID)
|
||||
if err := h.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start kubectl container on host [%s]: %v", h.Hostname, err)
|
||||
return fmt.Errorf("Failed to start kubectl container on host [%s]: %v", h.AdvertisedHostname, err)
|
||||
}
|
||||
logrus.Debugf("[kubectl] running command: %s", kubectlCmd.Cmd)
|
||||
statusCh, errCh := h.DClient.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute kubectl container on host [%s]: %v", h.Hostname, err)
|
||||
return fmt.Errorf("Failed to execute kubectl container on host [%s]: %v", h.AdvertisedHostname, err)
|
||||
}
|
||||
case status := <-statusCh:
|
||||
if status.StatusCode != 0 {
|
||||
return fmt.Errorf("kubectl command failed on host [%s]: exit status %v", h.Hostname, status.StatusCode)
|
||||
return fmt.Errorf("kubectl command failed on host [%s]: exit status %v", h.AdvertisedHostname, status.StatusCode)
|
||||
}
|
||||
}
|
||||
if err := h.DClient.ContainerRemove(context.Background(), resp.ID, types.ContainerRemoveOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to remove kubectl container on host[%s]: %v", h.Hostname, err)
|
||||
return fmt.Errorf("Failed to remove kubectl container on host[%s]: %v", h.AdvertisedHostname, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -11,10 +11,10 @@ const (
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployNetworkPlugin() error {
|
||||
logrus.Infof("[network] Setting up network plugin: %s", c.NetworkPlugin)
|
||||
logrus.Infof("[network] Setting up network plugin: %s", c.Network.Plugin)
|
||||
|
||||
kubectlCmd := &KubectlCommand{
|
||||
Cmd: []string{"apply -f /network/" + c.NetworkPlugin + ".yaml"},
|
||||
Cmd: []string{"apply -f /network/" + c.Network.Plugin + ".yaml"},
|
||||
}
|
||||
logrus.Infof("[network] Executing the deploy command..")
|
||||
err := c.RunKubectlCmd(kubectlCmd)
|
||||
|
@ -37,13 +37,13 @@ func (d *dialer) Dial(network, addr string) (net.Conn, error) {
|
||||
}
|
||||
remote, err := conn.Dial("unix", d.host.DockerSocket)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Error connecting to Docker socket on host [%s]: %v", d.host.Hostname, err)
|
||||
logrus.Fatalf("Error connecting to Docker socket on host [%s]: %v", d.host.AdvertisedHostname, err)
|
||||
}
|
||||
return remote, err
|
||||
}
|
||||
|
||||
func (h *Host) TunnelUp() error {
|
||||
logrus.Infof("[ssh] Start tunnel for host [%s]", h.Hostname)
|
||||
logrus.Infof("[ssh] Start tunnel for host [%s]", h.AdvertisedHostname)
|
||||
|
||||
dialer := &dialer{
|
||||
host: h,
|
||||
@ -56,10 +56,10 @@ func (h *Host) TunnelUp() error {
|
||||
|
||||
// set Docker client
|
||||
var err error
|
||||
logrus.Debugf("Connecting to Docker API for host [%s]", h.Hostname)
|
||||
logrus.Debugf("Connecting to Docker API for host [%s]", h.AdvertisedHostname)
|
||||
h.DClient, err = client.NewClient("unix:///var/run/docker.sock", DockerAPIVersion, httpClient, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't connect to Docker for host [%s]: %v", h.Hostname, err)
|
||||
return fmt.Errorf("Can't connect to Docker for host [%s]: %v", h.AdvertisedHostname, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package hosts
|
||||
import (
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
@ -17,7 +17,7 @@ func ReconcileWorkers(currentWorkers []Host, newWorkers []Host, kubeClient *kube
|
||||
for _, currentWorker := range currentWorkers {
|
||||
found := false
|
||||
for _, newWorker := range newWorkers {
|
||||
if currentWorker.Hostname == newWorker.Hostname {
|
||||
if currentWorker.AdvertisedHostname == newWorker.AdvertisedHostname {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
@ -31,11 +31,11 @@ func ReconcileWorkers(currentWorkers []Host, newWorkers []Host, kubeClient *kube
|
||||
}
|
||||
|
||||
func deleteWorkerNode(workerNode *Host, kubeClient *kubernetes.Clientset) error {
|
||||
logrus.Infof("[hosts] Deleting host [%s] from the cluster", workerNode.Hostname)
|
||||
err := k8s.DeleteNode(kubeClient, workerNode.Hostname)
|
||||
logrus.Infof("[hosts] Deleting host [%s] from the cluster", workerNode.AdvertisedHostname)
|
||||
err := k8s.DeleteNode(kubeClient, workerNode.AdvertisedHostname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[hosts] Successfully deleted host [%s] from the cluster", workerNode.Hostname)
|
||||
logrus.Infof("[hosts] Successfully deleted host [%s] from the cluster", workerNode.AdvertisedHostname)
|
||||
return nil
|
||||
}
|
||||
|
@ -61,8 +61,8 @@ func DeployCertificatesOnWorkers(workerHosts []hosts.Host, crtMap map[string]Cer
|
||||
}
|
||||
|
||||
func doRunDeployer(host *hosts.Host, containerEnv []string) error {
|
||||
logrus.Debugf("[certificates] Pulling Certificate downloader Image on host [%s]", host.Hostname)
|
||||
err := docker.PullImage(host.DClient, host.Hostname, CrtDownloaderImage)
|
||||
logrus.Debugf("[certificates] Pulling Certificate downloader Image on host [%s]", host.AdvertisedHostname)
|
||||
err := docker.PullImage(host.DClient, host.AdvertisedHostname, CrtDownloaderImage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -79,15 +79,15 @@ func doRunDeployer(host *hosts.Host, containerEnv []string) error {
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, CrtDownloaderContainer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Certificates deployer container on host [%s]: %v", host.Hostname, err)
|
||||
return fmt.Errorf("Failed to create Certificates deployer container on host [%s]: %v", host.AdvertisedHostname, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Certificates deployer container on host [%s]: %v", host.Hostname, err)
|
||||
return fmt.Errorf("Failed to start Certificates deployer container on host [%s]: %v", host.AdvertisedHostname, err)
|
||||
}
|
||||
logrus.Debugf("[certificates] Successfully started Certificate deployer container: %s", resp.ID)
|
||||
for {
|
||||
isDeployerRunning, err := docker.IsContainerRunning(host.DClient, host.Hostname, CrtDownloaderContainer)
|
||||
isDeployerRunning, err := docker.IsContainerRunning(host.DClient, host.AdvertisedHostname, CrtDownloaderContainer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -96,7 +96,7 @@ func doRunDeployer(host *hosts.Host, containerEnv []string) error {
|
||||
continue
|
||||
}
|
||||
if err := host.DClient.ContainerRemove(context.Background(), resp.ID, types.ContainerRemoveOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to delete Certificates deployer container on host[%s]: %v", host.Hostname, err)
|
||||
return fmt.Errorf("Failed to delete Certificates deployer container on host[%s]: %v", host.AdvertisedHostname, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ func getAltNames(cpHosts []hosts.Host, clusterDomain string, KubernetesServiceIP
|
||||
if host.IP != host.AdvertiseAddress {
|
||||
ips = append(ips, net.ParseIP(host.AdvertiseAddress))
|
||||
}
|
||||
dnsNames = append(dnsNames, host.Hostname)
|
||||
dnsNames = append(dnsNames, host.AdvertisedHostname)
|
||||
}
|
||||
ips = append(ips, net.ParseIP("127.0.0.1"))
|
||||
ips = append(ips, KubernetesServiceIP)
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/types/client/cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -19,10 +19,10 @@ func TestPKI(t *testing.T) {
|
||||
cpHosts := []hosts.Host{
|
||||
hosts.Host{
|
||||
RKEConfigHost: v1.RKEConfigHost{
|
||||
IP: "1.1.1.1",
|
||||
AdvertiseAddress: "192.168.1.5",
|
||||
Role: []string{"controlplane"},
|
||||
Hostname: "server1",
|
||||
IP: "1.1.1.1",
|
||||
AdvertiseAddress: "192.168.1.5",
|
||||
Role: []string{"controlplane"},
|
||||
AdvertisedHostname: "server1",
|
||||
},
|
||||
DClient: nil,
|
||||
},
|
||||
|
@ -2,7 +2,7 @@ package services
|
||||
|
||||
import (
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -1,11 +1,13 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -13,7 +15,7 @@ func RunEtcdPlane(etcdHosts []hosts.Host, etcdService v1.ETCDService) error {
|
||||
logrus.Infof("[%s] Building up Etcd Plane..", ETCDRole)
|
||||
for _, host := range etcdHosts {
|
||||
imageCfg, hostCfg := buildEtcdConfig(host, etcdService)
|
||||
err := docker.DoRunContainer(host.DClient, imageCfg, hostCfg, EtcdContainerName, host.Hostname, ETCDRole)
|
||||
err := docker.DoRunContainer(host.DClient, imageCfg, hostCfg, EtcdContainerName, host.AdvertisedHostname, ETCDRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -26,14 +28,14 @@ func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService) (*container.Co
|
||||
imageCfg := &container.Config{
|
||||
Image: etcdService.Image,
|
||||
Cmd: []string{"/usr/local/bin/etcd",
|
||||
"--name=etcd-" + host.Hostname,
|
||||
"--name=etcd-" + host.AdvertisedHostname,
|
||||
"--data-dir=/etcd-data",
|
||||
"--advertise-client-urls=http://" + host.AdvertiseAddress + ":2379,http://" + host.AdvertiseAddress + ":4001",
|
||||
"--listen-client-urls=http://0.0.0.0:2379",
|
||||
"--initial-advertise-peer-urls=http://" + host.AdvertiseAddress + ":2380",
|
||||
"--listen-peer-urls=http://0.0.0.0:2380",
|
||||
"--initial-cluster-token=etcd-cluster-1",
|
||||
"--initial-cluster=etcd-" + host.Hostname + "=http://" + host.AdvertiseAddress + ":2380"},
|
||||
"--initial-cluster=etcd-" + host.AdvertisedHostname + "=http://" + host.AdvertiseAddress + ":2380"},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
@ -54,7 +56,11 @@ func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService) (*container.Co
|
||||
},
|
||||
},
|
||||
}
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, etcdService.ExtraArgs...)
|
||||
for arg, value := range etcdService.ExtraArgs {
|
||||
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, cmd)
|
||||
}
|
||||
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
||||
|
@ -1,18 +1,20 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
)
|
||||
|
||||
func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error {
|
||||
etcdConnString := getEtcdConnString(etcdHosts)
|
||||
imageCfg, hostCfg := buildKubeAPIConfig(host, kubeAPIService, etcdConnString)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.Hostname, ControlRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.AdvertisedHostname, ControlRole)
|
||||
}
|
||||
|
||||
func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdConnString string) (*container.Config, *container.HostConfig) {
|
||||
@ -51,6 +53,10 @@ func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdC
|
||||
},
|
||||
},
|
||||
}
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, kubeAPIService.ExtraArgs...)
|
||||
|
||||
for arg, value := range kubeAPIService.ExtraArgs {
|
||||
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, cmd)
|
||||
}
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@ -1,16 +1,18 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
)
|
||||
|
||||
func runKubeController(host hosts.Host, kubeControllerService v1.KubeControllerService) error {
|
||||
imageCfg, hostCfg := buildKubeControllerConfig(kubeControllerService)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.Hostname, ControlRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.AdvertisedHostname, ControlRole)
|
||||
}
|
||||
|
||||
func buildKubeControllerConfig(kubeControllerService v1.KubeControllerService) (*container.Config, *container.HostConfig) {
|
||||
@ -38,6 +40,9 @@ func buildKubeControllerConfig(kubeControllerService v1.KubeControllerService) (
|
||||
},
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
}
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, kubeControllerService.ExtraArgs...)
|
||||
for arg, value := range kubeControllerService.ExtraArgs {
|
||||
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, cmd)
|
||||
}
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@ -1,17 +1,19 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
)
|
||||
|
||||
func runKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) error {
|
||||
imageCfg, hostCfg := buildKubeletConfig(host, kubeletService, isMaster)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeletContainerName, host.Hostname, WorkerRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeletContainerName, host.AdvertisedHostname, WorkerRole)
|
||||
}
|
||||
|
||||
func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) (*container.Config, *container.HostConfig) {
|
||||
@ -22,7 +24,7 @@ func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService, isMas
|
||||
"--v=2",
|
||||
"--address=0.0.0.0",
|
||||
"--cluster-domain=" + kubeletService.ClusterDomain,
|
||||
"--hostname-override=" + host.Hostname,
|
||||
"--hostname-override=" + host.AdvertisedHostname,
|
||||
"--pod-infra-container-image=" + kubeletService.InfraContainerImage,
|
||||
"--cgroup-driver=cgroupfs",
|
||||
"--cgroups-per-qos=True",
|
||||
@ -67,6 +69,9 @@ func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService, isMas
|
||||
},
|
||||
},
|
||||
}
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, kubeletService.ExtraArgs...)
|
||||
for arg, value := range kubeletService.ExtraArgs {
|
||||
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, cmd)
|
||||
}
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@ -1,16 +1,18 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
)
|
||||
|
||||
func runKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error {
|
||||
imageCfg, hostCfg := buildKubeproxyConfig(host, kubeproxyService)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.Hostname, WorkerRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.AdvertisedHostname, WorkerRole)
|
||||
}
|
||||
|
||||
func buildKubeproxyConfig(host hosts.Host, kubeproxyService v1.KubeproxyService) (*container.Config, *container.HostConfig) {
|
||||
@ -31,6 +33,9 @@ func buildKubeproxyConfig(host hosts.Host, kubeproxyService v1.KubeproxyService)
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
Privileged: true,
|
||||
}
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, kubeproxyService.ExtraArgs...)
|
||||
for arg, value := range kubeproxyService.ExtraArgs {
|
||||
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, cmd)
|
||||
}
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@ -1,16 +1,18 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
)
|
||||
|
||||
func runScheduler(host hosts.Host, schedulerService v1.SchedulerService) error {
|
||||
imageCfg, hostCfg := buildSchedulerConfig(host, schedulerService)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.Hostname, ControlRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.AdvertisedHostname, ControlRole)
|
||||
}
|
||||
|
||||
func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService) (*container.Config, *container.HostConfig) {
|
||||
@ -29,6 +31,9 @@ func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService)
|
||||
},
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
}
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, schedulerService.ExtraArgs...)
|
||||
for arg, value := range schedulerService.ExtraArgs {
|
||||
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
||||
imageCfg.Cmd = append(imageCfg.Cmd, cmd)
|
||||
}
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ package services
|
||||
|
||||
import (
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/types/io.cattle.cluster/v1"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -10,7 +10,7 @@ github.com/docker/distribution 3800056b8832cf6075e78b282ac010131d8687b
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
golang.org/x/net 186fd3fc8194a5e9980a82230d69c1ff7134229f
|
||||
github.com/rancher/types 582dfda7d374d8a36dd0c7a2385d18c4c2ce5f3c
|
||||
github.com/rancher/types a71860ee9f4809a57d2cc7dadf2d74b9b8f2d736
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||
github.com/gogo/protobuf 117892bf1866fbaa2318c03e50e40564c8845457
|
||||
github.com/opencontainers/image-spec 7c889fafd04a893f5c5f50b7ab9963d5d64e5242
|
||||
|
Loading…
Reference in New Issue
Block a user