mirror of
https://github.com/rancher/rke.git
synced 2025-08-02 07:43:04 +00:00
Structure and config changes
This commit is contained in:
parent
c77d3b51be
commit
41c48877ba
16
cluster.yml
16
cluster.yml
@ -14,18 +14,18 @@ network:
|
||||
options:
|
||||
foo: bar
|
||||
|
||||
hosts:
|
||||
- advertised_hostname: server1
|
||||
ip: 1.1.1.1
|
||||
nodes:
|
||||
- address: 1.1.1.1
|
||||
user: ubuntu
|
||||
role: [controlplane, etcd]
|
||||
docker_socket: /var/run/docker.sock
|
||||
advertise_address: 10.1.1.1
|
||||
- advertised_hostname: server2
|
||||
ip: 2.2.2.2
|
||||
- address: 2.2.2.2
|
||||
user: ubuntu
|
||||
role: [worker]
|
||||
advertise_address: 10.2.2.2
|
||||
- address: example.com
|
||||
user: ubuntu
|
||||
role: [worker]
|
||||
hostname_override: node3
|
||||
internal_address: 192.168.1.6
|
||||
|
||||
services:
|
||||
etcd:
|
||||
|
@ -56,7 +56,7 @@ func (c *Cluster) doAddonDeploy(addonYaml, resourceName string) error {
|
||||
|
||||
logrus.Infof("[addons] Executing deploy job..")
|
||||
|
||||
addonJob := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].AdvertisedHostname, c.Services.KubeAPI.Image)
|
||||
addonJob := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].HostnameOverride, c.Services.KubeAPI.Image)
|
||||
err = c.ApplySystemAddonExcuteJob(addonJob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to deploy addon execute job: %v", err)
|
||||
|
@ -33,12 +33,19 @@ type Cluster struct {
|
||||
}
|
||||
|
||||
const (
|
||||
X509AuthenticationProvider = "x509"
|
||||
DefaultClusterConfig = "cluster.yml"
|
||||
StateConfigMapName = "cluster-state"
|
||||
UpdateStateTimeout = 30
|
||||
GetStateTimeout = 30
|
||||
KubernetesClientTimeOut = 30
|
||||
X509AuthenticationProvider = "x509"
|
||||
DefaultClusterConfig = "cluster.yml"
|
||||
DefaultServiceClusterIPRange = "10.233.0.0/18"
|
||||
DefaultClusterCIDR = "10.233.64.0/18"
|
||||
DefaultClusterDNSService = "10.233.0.3"
|
||||
DefaultClusterDomain = "cluster.local"
|
||||
DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0"
|
||||
DefaultAuthStrategy = "x509"
|
||||
DefaultNetworkPlugin = "flannel"
|
||||
StateConfigMapName = "cluster-state"
|
||||
UpdateStateTimeout = 30
|
||||
GetStateTimeout = 30
|
||||
KubernetesClientTimeOut = 30
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployClusterPlanes() error {
|
||||
@ -96,26 +103,46 @@ func parseClusterFile(clusterFile string) (*Cluster, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, host := range kubeCluster.Hosts {
|
||||
if len(host.AdvertisedHostname) == 0 {
|
||||
return nil, fmt.Errorf("Hostname for host (%d) is not provided", i+1)
|
||||
} else if len(host.User) == 0 {
|
||||
return nil, fmt.Errorf("User for host (%d) is not provided", i+1)
|
||||
} else if len(host.Role) == 0 {
|
||||
return nil, fmt.Errorf("Role for host (%d) is not provided", i+1)
|
||||
// Setting cluster Defaults
|
||||
kubeCluster.setClusterDefaults()
|
||||
|
||||
} else if host.AdvertiseAddress == "" {
|
||||
// if control_plane_ip is not set,
|
||||
// default to the main IP
|
||||
kubeCluster.Hosts[i].AdvertiseAddress = host.IP
|
||||
return &kubeCluster, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) setClusterDefaults() {
|
||||
for i, host := range c.Nodes {
|
||||
if len(host.InternalAddress) == 0 {
|
||||
c.Nodes[i].InternalAddress = c.Nodes[i].Address
|
||||
}
|
||||
for _, role := range host.Role {
|
||||
if role != services.ETCDRole && role != services.ControlRole && role != services.WorkerRole {
|
||||
return nil, fmt.Errorf("Role [%s] for host (%d) is not recognized", role, i+1)
|
||||
}
|
||||
if len(host.HostnameOverride) == 0 {
|
||||
// This is a temporary modification
|
||||
c.Nodes[i].HostnameOverride = c.Nodes[i].Address
|
||||
}
|
||||
}
|
||||
return &kubeCluster, nil
|
||||
if len(c.Services.KubeAPI.ServiceClusterIPRange) == 0 {
|
||||
c.Services.KubeAPI.ServiceClusterIPRange = DefaultServiceClusterIPRange
|
||||
}
|
||||
if len(c.Services.KubeController.ServiceClusterIPRange) == 0 {
|
||||
c.Services.KubeController.ServiceClusterIPRange = DefaultServiceClusterIPRange
|
||||
}
|
||||
if len(c.Services.KubeController.ClusterCIDR) == 0 {
|
||||
c.Services.KubeController.ClusterCIDR = DefaultClusterCIDR
|
||||
}
|
||||
if len(c.Services.Kubelet.ClusterDNSServer) == 0 {
|
||||
c.Services.Kubelet.ClusterDNSServer = DefaultClusterDNSService
|
||||
}
|
||||
if len(c.Services.Kubelet.ClusterDomain) == 0 {
|
||||
c.Services.Kubelet.ClusterDomain = DefaultClusterDomain
|
||||
}
|
||||
if len(c.Services.Kubelet.InfraContainerImage) == 0 {
|
||||
c.Services.Kubelet.InfraContainerImage = DefaultInfraContainerImage
|
||||
}
|
||||
if len(c.Authentication.Strategy) == 0 {
|
||||
c.Authentication.Strategy = DefaultAuthStrategy
|
||||
}
|
||||
if len(c.Network.Plugin) == 0 {
|
||||
c.Network.Plugin = DefaultNetworkPlugin
|
||||
}
|
||||
}
|
||||
|
||||
func GetLocalKubeConfig(configPath string) string {
|
||||
@ -144,11 +171,11 @@ func ReconcileCluster(kubeCluster, currentCluster *Cluster) error {
|
||||
cpToDelete := hosts.GetToDeleteHosts(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts)
|
||||
for _, toDeleteHost := range cpToDelete {
|
||||
if err := hosts.DeleteNode(&toDeleteHost, kubeClient); err != nil {
|
||||
return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.AdvertisedHostname)
|
||||
return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address)
|
||||
}
|
||||
// attempting to clean up the host
|
||||
if err := reconcileHostCleaner(toDeleteHost, key, false); err != nil {
|
||||
logrus.Warnf("[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.AdvertisedHostname, err)
|
||||
logrus.Warnf("[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.Address, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -157,11 +184,11 @@ func ReconcileCluster(kubeCluster, currentCluster *Cluster) error {
|
||||
wpToDelete := hosts.GetToDeleteHosts(currentCluster.WorkerHosts, kubeCluster.WorkerHosts)
|
||||
for _, toDeleteHost := range wpToDelete {
|
||||
if err := hosts.DeleteNode(&toDeleteHost, kubeClient); err != nil {
|
||||
return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.AdvertisedHostname)
|
||||
return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address)
|
||||
}
|
||||
// attempting to clean up the host
|
||||
if err := reconcileHostCleaner(toDeleteHost, key, true); err != nil {
|
||||
logrus.Warnf("[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.AdvertisedHostname, err)
|
||||
logrus.Warnf("[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.Address, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -201,7 +228,7 @@ func rebuildLocalAdminConfig(kubeCluster *Cluster) error {
|
||||
currentKubeConfig := kubeCluster.Certificates[pki.KubeAdminCommonName]
|
||||
caCrt := kubeCluster.Certificates[pki.CACertName].Certificate
|
||||
newConfig := pki.GetKubeConfigX509WithData(
|
||||
"https://"+kubeCluster.ControlPlaneHosts[0].IP+":6443",
|
||||
"https://"+kubeCluster.ControlPlaneHosts[0].Address+":6443",
|
||||
pki.KubeAdminCommonName,
|
||||
string(cert.EncodeCertPEM(caCrt)),
|
||||
string(cert.EncodeCertPEM(currentKubeConfig.Certificate)),
|
||||
|
@ -48,11 +48,11 @@ func (c *Cluster) InvertIndexHosts() error {
|
||||
c.EtcdHosts = make([]hosts.Host, 0)
|
||||
c.WorkerHosts = make([]hosts.Host, 0)
|
||||
c.ControlPlaneHosts = make([]hosts.Host, 0)
|
||||
for _, host := range c.Hosts {
|
||||
for _, host := range c.Nodes {
|
||||
for _, role := range host.Role {
|
||||
logrus.Debugf("Host: " + host.AdvertisedHostname + " has role: " + role)
|
||||
logrus.Debugf("Host: " + host.Address + " has role: " + role)
|
||||
newHost := hosts.Host{
|
||||
RKEConfigHost: host,
|
||||
RKEConfigNode: host,
|
||||
}
|
||||
switch role {
|
||||
case services.ETCDRole:
|
||||
@ -62,7 +62,7 @@ func (c *Cluster) InvertIndexHosts() error {
|
||||
case services.WorkerRole:
|
||||
c.WorkerHosts = append(c.WorkerHosts, newHost)
|
||||
default:
|
||||
return fmt.Errorf("Failed to recognize host [%s] role %s", host.AdvertisedHostname, role)
|
||||
return fmt.Errorf("Failed to recognize host [%s] role %s", host.Address, role)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,16 +11,19 @@ import (
|
||||
|
||||
const (
|
||||
NetworkPluginResourceName = "rke-netwok-plugin"
|
||||
FlannelNetworkPlugin = "flannel"
|
||||
CalicoNetworkPlugin = "calico"
|
||||
CanalNetworkPlugin = "canal"
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployNetworkPlugin() error {
|
||||
logrus.Infof("[network] Setting up network plugin: %s", c.Network.Plugin)
|
||||
switch c.Network.Plugin {
|
||||
case "flannel":
|
||||
case FlannelNetworkPlugin:
|
||||
return c.doFlannelDeploy()
|
||||
case "calico":
|
||||
case CalicoNetworkPlugin:
|
||||
return c.doCalicoDeploy()
|
||||
case "canal":
|
||||
case CanalNetworkPlugin:
|
||||
return c.doCanalDeploy()
|
||||
default:
|
||||
return fmt.Errorf("[network] Unsupported network plugin: %s", c.Network.Plugin)
|
||||
|
@ -3,6 +3,8 @@ package cluster
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/rke/services"
|
||||
)
|
||||
|
||||
func (c *Cluster) ValidateCluster() error {
|
||||
@ -10,19 +12,79 @@ func (c *Cluster) ValidateCluster() error {
|
||||
if len(c.ControlPlaneHosts) == 0 {
|
||||
return fmt.Errorf("Cluster must have at least one control plane host")
|
||||
}
|
||||
if len(c.EtcdHosts) == 0 {
|
||||
return fmt.Errorf("Cluster must have at least one etcd host")
|
||||
if len(c.EtcdHosts)%2 == 0 {
|
||||
return fmt.Errorf("Cluster must have odd number of etcd nodes")
|
||||
}
|
||||
if len(c.WorkerHosts) == 0 {
|
||||
return fmt.Errorf("Cluster must have at least one worker plane host")
|
||||
}
|
||||
|
||||
// validate hosts options
|
||||
if err := validateHostsOptions(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// validate Auth options
|
||||
if err := validateAuthOptions(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// validate Network options
|
||||
if err := validateNetworkOptions(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// validate services options
|
||||
err := validateServicesOption(c)
|
||||
if err != nil {
|
||||
if err := validateServicesOptions(c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateServicesOption(c *Cluster) error {
|
||||
func validateAuthOptions(c *Cluster) error {
|
||||
if c.Authentication.Strategy != DefaultAuthStrategy {
|
||||
return fmt.Errorf("Authentication strategy [%s] is not supported", c.Authentication.Strategy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateNetworkOptions(c *Cluster) error {
|
||||
if c.Network.Plugin != FlannelNetworkPlugin && c.Network.Plugin != CalicoNetworkPlugin && c.Network.Plugin != CanalNetworkPlugin {
|
||||
return fmt.Errorf("Network plugin [%s] is not supported", c.Network.Plugin)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateHostsOptions(c *Cluster) error {
|
||||
for i, host := range c.Nodes {
|
||||
if len(host.Address) == 0 {
|
||||
return fmt.Errorf("User for host (%d) is not provided", i+1)
|
||||
}
|
||||
if len(host.User) == 0 {
|
||||
return fmt.Errorf("User for host (%d) is not provided", i+1)
|
||||
}
|
||||
if len(host.Role) == 0 {
|
||||
return fmt.Errorf("Role for host (%d) is not provided", i+1)
|
||||
}
|
||||
for _, role := range host.Role {
|
||||
if role != services.ETCDRole && role != services.ControlRole && role != services.WorkerRole {
|
||||
return fmt.Errorf("Role [%s] for host (%d) is not recognized", role, i+1)
|
||||
}
|
||||
}
|
||||
k := 0
|
||||
for _, role := range host.Role {
|
||||
if role == services.ControlRole || role == services.WorkerRole {
|
||||
k++
|
||||
}
|
||||
}
|
||||
if k > 1 {
|
||||
return fmt.Errorf("Host (%d) can't contain both worker and controlplane roles", i+1)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateServicesOptions(c *Cluster) error {
|
||||
servicesOptions := map[string]string{
|
||||
"etcd_image": c.Services.Etcd.Image,
|
||||
"kube_api_image": c.Services.KubeAPI.Image,
|
||||
|
@ -42,7 +42,11 @@ func ConfigCommand() cli.Command {
|
||||
|
||||
func getConfig(reader *bufio.Reader, text, def string) (string, error) {
|
||||
for {
|
||||
fmt.Printf("%s [%s]: ", text, def)
|
||||
if def == "" {
|
||||
fmt.Printf("%s [%s]: ", text, "none")
|
||||
} else {
|
||||
fmt.Printf("%s [%s]: ", text, def)
|
||||
}
|
||||
input, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -81,10 +85,17 @@ func clusterConfig(ctx *cli.Context) error {
|
||||
|
||||
// Generate empty configuration file
|
||||
if ctx.Bool("empty") {
|
||||
cluster.Hosts = make([]v1.RKEConfigHost, 1)
|
||||
cluster.Nodes = make([]v1.RKEConfigNode, 1)
|
||||
return writeConfig(&cluster, configFile, print)
|
||||
}
|
||||
|
||||
// Get number of hosts
|
||||
sshKeyPath, err := getConfig(reader, "SSH Private Key Path", "~/.ssh/id_rsa")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cluster.SSHKeyPath = sshKeyPath
|
||||
|
||||
// Get number of hosts
|
||||
numberOfHostsString, err := getConfig(reader, "Number of Hosts", "3")
|
||||
if err != nil {
|
||||
@ -96,13 +107,13 @@ func clusterConfig(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
// Get Hosts config
|
||||
cluster.Hosts = make([]v1.RKEConfigHost, 0)
|
||||
cluster.Nodes = make([]v1.RKEConfigNode, 0)
|
||||
for i := 0; i < numberOfHostsInt; i++ {
|
||||
hostCfg, err := getHostConfig(reader, i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cluster.Hosts = append(cluster.Hosts, *hostCfg)
|
||||
cluster.Nodes = append(cluster.Nodes, *hostCfg)
|
||||
}
|
||||
|
||||
// Get Network config
|
||||
@ -129,27 +140,21 @@ func clusterConfig(ctx *cli.Context) error {
|
||||
return writeConfig(&cluster, configFile, print)
|
||||
}
|
||||
|
||||
func getHostConfig(reader *bufio.Reader, index int) (*v1.RKEConfigHost, error) {
|
||||
host := v1.RKEConfigHost{}
|
||||
advertisedHostname, err := getConfig(reader, fmt.Sprintf("Hostname of host (%d)", index+1), "")
|
||||
func getHostConfig(reader *bufio.Reader, index int) (*v1.RKEConfigNode, error) {
|
||||
host := v1.RKEConfigNode{}
|
||||
address, err := getConfig(reader, fmt.Sprintf("SSH Address of host (%d)", index+1), "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host.AdvertisedHostname = advertisedHostname
|
||||
host.Address = address
|
||||
|
||||
sshIP, err := getConfig(reader, fmt.Sprintf("SSH IP of host (%s)", advertisedHostname), "")
|
||||
sshUser, err := getConfig(reader, fmt.Sprintf("SSH User of host (%s)", address), "ubuntu")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host.IP = sshIP
|
||||
host.User = sshUser
|
||||
|
||||
advertisedIP, err := getConfig(reader, fmt.Sprintf("Advertised IP of host (%s)", advertisedHostname), "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host.AdvertiseAddress = advertisedIP
|
||||
|
||||
isControlHost, err := getConfig(reader, fmt.Sprintf("Is host (%s) a control host (y/n)?", advertisedHostname), "y")
|
||||
isControlHost, err := getConfig(reader, fmt.Sprintf("Is host (%s) a control host (y/n)?", address), "y")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -157,7 +162,7 @@ func getHostConfig(reader *bufio.Reader, index int) (*v1.RKEConfigHost, error) {
|
||||
host.Role = append(host.Role, services.ControlRole)
|
||||
}
|
||||
|
||||
isWorkerHost, err := getConfig(reader, fmt.Sprintf("Is host (%s) a worker host (y/n)?", advertisedHostname), "n")
|
||||
isWorkerHost, err := getConfig(reader, fmt.Sprintf("Is host (%s) a worker host (y/n)?", address), "n")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -165,7 +170,7 @@ func getHostConfig(reader *bufio.Reader, index int) (*v1.RKEConfigHost, error) {
|
||||
host.Role = append(host.Role, services.WorkerRole)
|
||||
}
|
||||
|
||||
isEtcdHost, err := getConfig(reader, fmt.Sprintf("Is host (%s) an Etcd host (y/n)?", advertisedHostname), "n")
|
||||
isEtcdHost, err := getConfig(reader, fmt.Sprintf("Is host (%s) an Etcd host (y/n)?", address), "n")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -173,13 +178,19 @@ func getHostConfig(reader *bufio.Reader, index int) (*v1.RKEConfigHost, error) {
|
||||
host.Role = append(host.Role, services.ETCDRole)
|
||||
}
|
||||
|
||||
sshUser, err := getConfig(reader, fmt.Sprintf("SSH User of host (%s)", advertisedHostname), "ubuntu")
|
||||
hostnameOverride, err := getConfig(reader, fmt.Sprintf("Override Hostname of host (%s)", address), "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host.User = sshUser
|
||||
host.HostnameOverride = hostnameOverride
|
||||
|
||||
dockerSocketPath, err := getConfig(reader, fmt.Sprintf("Docker socket path on host (%s)", advertisedHostname), "/var/run/docker.sock")
|
||||
internalAddress, err := getConfig(reader, fmt.Sprintf("Internal IP of host (%s)", address), "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host.InternalAddress = internalAddress
|
||||
|
||||
dockerSocketPath, err := getConfig(reader, fmt.Sprintf("Docker socket path on host (%s)", address), "/var/run/docker.sock")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func ClusterUp(clusterFile string) (string, string, string, string, error) {
|
||||
return APIURL, caCrt, clientCert, clientKey, err
|
||||
}
|
||||
|
||||
APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].IP + ":6443")
|
||||
APIURL = fmt.Sprintf("https://" + kubeCluster.ControlPlaneHosts[0].Address + ":6443")
|
||||
caCrt = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.CACertName].Certificate))
|
||||
clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate))
|
||||
clientKey = string(cert.EncodePrivateKeyPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Key))
|
||||
|
@ -21,7 +21,7 @@ const (
|
||||
)
|
||||
|
||||
func (d *dialer) Dial(network, addr string) (net.Conn, error) {
|
||||
sshAddr := d.host.IP + ":22"
|
||||
sshAddr := d.host.Address + ":22"
|
||||
// Build SSH client configuration
|
||||
cfg, err := makeSSHConfig(d.host.User, d.signer)
|
||||
if err != nil {
|
||||
@ -37,13 +37,13 @@ func (d *dialer) Dial(network, addr string) (net.Conn, error) {
|
||||
}
|
||||
remote, err := conn.Dial("unix", d.host.DockerSocket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error connecting to Docker socket on host [%s]: %v", d.host.AdvertisedHostname, err)
|
||||
return nil, fmt.Errorf("Error connecting to Docker socket on host [%s]: %v", d.host.Address, err)
|
||||
}
|
||||
return remote, err
|
||||
}
|
||||
|
||||
func (h *Host) TunnelUp(signer ssh.Signer) error {
|
||||
logrus.Infof("[ssh] Start tunnel for host [%s]", h.AdvertisedHostname)
|
||||
logrus.Infof("[ssh] Start tunnel for host [%s]", h.Address)
|
||||
|
||||
dialer := &dialer{
|
||||
host: h,
|
||||
@ -57,10 +57,10 @@ func (h *Host) TunnelUp(signer ssh.Signer) error {
|
||||
|
||||
// set Docker client
|
||||
var err error
|
||||
logrus.Debugf("Connecting to Docker API for host [%s]", h.AdvertisedHostname)
|
||||
logrus.Debugf("Connecting to Docker API for host [%s]", h.Address)
|
||||
h.DClient, err = client.NewClient("unix:///var/run/docker.sock", DockerAPIVersion, httpClient, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't connect to Docker for host [%s]: %v", h.AdvertisedHostname, err)
|
||||
return fmt.Errorf("Can't connect to Docker for host [%s]: %v", h.Address, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
type Host struct {
|
||||
v1.RKEConfigHost
|
||||
v1.RKEConfigNode
|
||||
DClient *client.Client
|
||||
}
|
||||
|
||||
@ -28,7 +28,7 @@ const (
|
||||
)
|
||||
|
||||
func (h *Host) CleanUp() error {
|
||||
logrus.Infof("[hosts] Cleaning up host [%s]", h.AdvertisedHostname)
|
||||
logrus.Infof("[hosts] Cleaning up host [%s]", h.Address)
|
||||
toCleanDirs := []string{
|
||||
ToCleanEtcdDir,
|
||||
ToCleanSSLDir,
|
||||
@ -36,9 +36,9 @@ func (h *Host) CleanUp() error {
|
||||
ToCleanCNIBin,
|
||||
ToCleanCalicoRun,
|
||||
}
|
||||
logrus.Infof("[hosts] Running cleaner container on host [%s]", h.AdvertisedHostname)
|
||||
logrus.Infof("[hosts] Running cleaner container on host [%s]", h.Address)
|
||||
imageCfg, hostCfg := buildCleanerConfig(h, toCleanDirs)
|
||||
if err := docker.DoRunContainer(h.DClient, imageCfg, hostCfg, CleanerContainerName, h.AdvertisedHostname, CleanerContainerName); err != nil {
|
||||
if err := docker.DoRunContainer(h.DClient, imageCfg, hostCfg, CleanerContainerName, h.Address, CleanerContainerName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -46,26 +46,26 @@ func (h *Host) CleanUp() error {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("[hosts] Removing cleaner container on host [%s]", h.AdvertisedHostname)
|
||||
if err := docker.RemoveContainer(h.DClient, h.AdvertisedHostname, CleanerContainerName); err != nil {
|
||||
logrus.Infof("[hosts] Removing cleaner container on host [%s]", h.Address)
|
||||
if err := docker.RemoveContainer(h.DClient, h.Address, CleanerContainerName); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[hosts] Successfully cleaned up host [%s]", h.AdvertisedHostname)
|
||||
logrus.Infof("[hosts] Successfully cleaned up host [%s]", h.Address)
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteNode(toDeleteHost *Host, kubeClient *kubernetes.Clientset) error {
|
||||
logrus.Infof("[hosts] Cordoning host [%s]", toDeleteHost.AdvertisedHostname)
|
||||
err := k8s.CordonUncordon(kubeClient, toDeleteHost.AdvertisedHostname, true)
|
||||
logrus.Infof("[hosts] Cordoning host [%s]", toDeleteHost.Address)
|
||||
err := k8s.CordonUncordon(kubeClient, toDeleteHost.HostnameOverride, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[hosts] Deleting host [%s] from the cluster", toDeleteHost.AdvertisedHostname)
|
||||
err = k8s.DeleteNode(kubeClient, toDeleteHost.AdvertisedHostname)
|
||||
logrus.Infof("[hosts] Deleting host [%s] from the cluster", toDeleteHost.Address)
|
||||
err = k8s.DeleteNode(kubeClient, toDeleteHost.HostnameOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[hosts] Successfully deleted host [%s] from the cluster", toDeleteHost.AdvertisedHostname)
|
||||
logrus.Infof("[hosts] Successfully deleted host [%s] from the cluster", toDeleteHost.Address)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -74,7 +74,7 @@ func GetToDeleteHosts(currentHosts, configHosts []Host) []Host {
|
||||
for _, currentHost := range currentHosts {
|
||||
found := false
|
||||
for _, newHost := range configHosts {
|
||||
if currentHost.AdvertisedHostname == newHost.AdvertisedHostname {
|
||||
if currentHost.Address == newHost.Address {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
@ -90,8 +90,9 @@ func IsHostListChanged(currentHosts, configHosts []Host) bool {
|
||||
for _, host := range currentHosts {
|
||||
found := false
|
||||
for _, configHost := range configHosts {
|
||||
if host.AdvertisedHostname == configHost.AdvertisedHostname {
|
||||
if host.Address == configHost.Address {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
@ -101,8 +102,9 @@ func IsHostListChanged(currentHosts, configHosts []Host) bool {
|
||||
for _, host := range configHosts {
|
||||
found := false
|
||||
for _, currentHost := range currentHosts {
|
||||
if host.AdvertisedHostname == currentHost.AdvertisedHostname {
|
||||
if host.Address == currentHost.Address {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
|
2
main.go
2
main.go
@ -8,7 +8,7 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var VERSION = "v0.0.2-dev"
|
||||
var VERSION = "v0.0.6-dev"
|
||||
|
||||
func main() {
|
||||
if err := mainErr(); err != nil {
|
||||
|
@ -61,8 +61,8 @@ func DeployCertificatesOnWorkers(workerHosts []hosts.Host, crtMap map[string]Cer
|
||||
}
|
||||
|
||||
func doRunDeployer(host *hosts.Host, containerEnv []string) error {
|
||||
logrus.Debugf("[certificates] Pulling Certificate downloader Image on host [%s]", host.AdvertisedHostname)
|
||||
err := docker.PullImage(host.DClient, host.AdvertisedHostname, CrtDownloaderImage)
|
||||
logrus.Debugf("[certificates] Pulling Certificate downloader Image on host [%s]", host.Address)
|
||||
err := docker.PullImage(host.DClient, host.Address, CrtDownloaderImage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -79,15 +79,15 @@ func doRunDeployer(host *hosts.Host, containerEnv []string) error {
|
||||
}
|
||||
resp, err := host.DClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, CrtDownloaderContainer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create Certificates deployer container on host [%s]: %v", host.AdvertisedHostname, err)
|
||||
return fmt.Errorf("Failed to create Certificates deployer container on host [%s]: %v", host.Address, err)
|
||||
}
|
||||
|
||||
if err := host.DClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to start Certificates deployer container on host [%s]: %v", host.AdvertisedHostname, err)
|
||||
return fmt.Errorf("Failed to start Certificates deployer container on host [%s]: %v", host.Address, err)
|
||||
}
|
||||
logrus.Debugf("[certificates] Successfully started Certificate deployer container: %s", resp.ID)
|
||||
for {
|
||||
isDeployerRunning, err := docker.IsContainerRunning(host.DClient, host.AdvertisedHostname, CrtDownloaderContainer)
|
||||
isDeployerRunning, err := docker.IsContainerRunning(host.DClient, host.Address, CrtDownloaderContainer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -96,7 +96,7 @@ func doRunDeployer(host *hosts.Host, containerEnv []string) error {
|
||||
continue
|
||||
}
|
||||
if err := host.DClient.ContainerRemove(context.Background(), resp.ID, types.ContainerRemoveOptions{}); err != nil {
|
||||
return fmt.Errorf("Failed to delete Certificates deployer container on host[%s]: %v", host.AdvertisedHostname, err)
|
||||
return fmt.Errorf("Failed to delete Certificates deployer container on host [%s]: %v", host.Address, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
24
pki/pki.go
24
pki/pki.go
@ -167,7 +167,7 @@ func generateCerts(cpHosts []hosts.Host, clusterDomain, localConfigPath string,
|
||||
Certificate: kubeAdminCrt,
|
||||
Key: kubeAdminKey,
|
||||
Config: GetKubeConfigX509WithData(
|
||||
"https://"+cpHosts[0].IP+":6443",
|
||||
"https://"+cpHosts[0].Address+":6443",
|
||||
KubeAdminCommonName,
|
||||
string(cert.EncodeCertPEM(caCrt)),
|
||||
string(cert.EncodeCertPEM(kubeAdminCrt)),
|
||||
@ -250,11 +250,25 @@ func GetAltNames(cpHosts []hosts.Host, clusterDomain string, KubernetesServiceIP
|
||||
ips := []net.IP{}
|
||||
dnsNames := []string{}
|
||||
for _, host := range cpHosts {
|
||||
ips = append(ips, net.ParseIP(host.IP))
|
||||
if host.IP != host.AdvertiseAddress {
|
||||
ips = append(ips, net.ParseIP(host.AdvertiseAddress))
|
||||
// Check if node address is a valid IP
|
||||
if nodeIP := net.ParseIP(host.Address); nodeIP != nil {
|
||||
ips = append(ips, nodeIP)
|
||||
} else {
|
||||
dnsNames = append(dnsNames, host.Address)
|
||||
}
|
||||
|
||||
// Check if node internal address is a valid IP
|
||||
if len(host.InternalAddress) != 0 && host.InternalAddress != host.Address {
|
||||
if internalIP := net.ParseIP(host.InternalAddress); internalIP != nil {
|
||||
ips = append(ips, internalIP)
|
||||
} else {
|
||||
dnsNames = append(dnsNames, host.InternalAddress)
|
||||
}
|
||||
}
|
||||
// Add hostname to the ALT dns names
|
||||
if len(host.HostnameOverride) != 0 && host.HostnameOverride != host.Address {
|
||||
dnsNames = append(dnsNames, host.HostnameOverride)
|
||||
}
|
||||
dnsNames = append(dnsNames, host.AdvertisedHostname)
|
||||
}
|
||||
ips = append(ips, net.ParseIP("127.0.0.1"))
|
||||
ips = append(ips, KubernetesServiceIP)
|
||||
|
@ -18,11 +18,11 @@ const (
|
||||
func TestPKI(t *testing.T) {
|
||||
cpHosts := []hosts.Host{
|
||||
hosts.Host{
|
||||
RKEConfigHost: v1.RKEConfigHost{
|
||||
IP: "1.1.1.1",
|
||||
AdvertiseAddress: "192.168.1.5",
|
||||
Role: []string{"controlplane"},
|
||||
AdvertisedHostname: "server1",
|
||||
RKEConfigNode: v1.RKEConfigNode{
|
||||
Address: "1.1.1.1",
|
||||
InternalAddress: "192.168.1.5",
|
||||
Role: []string{"controlplane"},
|
||||
HostnameOverride: "server1",
|
||||
},
|
||||
DClient: nil,
|
||||
},
|
||||
@ -73,8 +73,8 @@ func TestPKI(t *testing.T) {
|
||||
// Test ALT IPs
|
||||
kubeAPIAltIPs := []net.IP{
|
||||
net.ParseIP("127.0.0.1"),
|
||||
net.ParseIP(cpHosts[0].AdvertiseAddress),
|
||||
net.ParseIP(cpHosts[0].IP),
|
||||
net.ParseIP(cpHosts[0].InternalAddress),
|
||||
net.ParseIP(cpHosts[0].Address),
|
||||
net.ParseIP(FakeKubernetesServiceIP),
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ func RunEtcdPlane(etcdHosts []hosts.Host, etcdService v1.ETCDService) error {
|
||||
initCluster := getEtcdInitialCluster(etcdHosts)
|
||||
for _, host := range etcdHosts {
|
||||
imageCfg, hostCfg := buildEtcdConfig(host, etcdService, initCluster)
|
||||
err := docker.DoRunContainer(host.DClient, imageCfg, hostCfg, EtcdContainerName, host.AdvertisedHostname, ETCDRole)
|
||||
err := docker.DoRunContainer(host.DClient, imageCfg, hostCfg, EtcdContainerName, host.Address, ETCDRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -28,7 +28,7 @@ func RunEtcdPlane(etcdHosts []hosts.Host, etcdService v1.ETCDService) error {
|
||||
func RemoveEtcdPlane(etcdHosts []hosts.Host) error {
|
||||
logrus.Infof("[%s] Tearing down Etcd Plane..", ETCDRole)
|
||||
for _, host := range etcdHosts {
|
||||
err := docker.DoRemoveContainer(host.DClient, EtcdContainerName, host.AdvertisedHostname)
|
||||
err := docker.DoRemoveContainer(host.DClient, EtcdContainerName, host.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -41,11 +41,11 @@ func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService, initCluster st
|
||||
imageCfg := &container.Config{
|
||||
Image: etcdService.Image,
|
||||
Cmd: []string{"/usr/local/bin/etcd",
|
||||
"--name=etcd-" + host.AdvertisedHostname,
|
||||
"--name=etcd-" + host.HostnameOverride,
|
||||
"--data-dir=/etcd-data",
|
||||
"--advertise-client-urls=http://" + host.AdvertiseAddress + ":2379,http://" + host.AdvertiseAddress + ":4001",
|
||||
"--advertise-client-urls=http://" + host.InternalAddress + ":2379,http://" + host.InternalAddress + ":4001",
|
||||
"--listen-client-urls=http://0.0.0.0:2379",
|
||||
"--initial-advertise-peer-urls=http://" + host.AdvertiseAddress + ":2380",
|
||||
"--initial-advertise-peer-urls=http://" + host.InternalAddress + ":2380",
|
||||
"--listen-peer-urls=http://0.0.0.0:2380",
|
||||
"--initial-cluster-token=etcd-cluster-1",
|
||||
"--initial-cluster=" + initCluster,
|
||||
@ -81,7 +81,7 @@ func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService, initCluster st
|
||||
func GetEtcdConnString(hosts []hosts.Host) string {
|
||||
connString := ""
|
||||
for i, host := range hosts {
|
||||
connString += "http://" + host.AdvertiseAddress + ":2379"
|
||||
connString += "http://" + host.InternalAddress + ":2379"
|
||||
if i < (len(hosts) - 1) {
|
||||
connString += ","
|
||||
}
|
||||
@ -92,7 +92,7 @@ func GetEtcdConnString(hosts []hosts.Host) string {
|
||||
func getEtcdInitialCluster(hosts []hosts.Host) string {
|
||||
initialCluster := ""
|
||||
for i, host := range hosts {
|
||||
initialCluster += fmt.Sprintf("etcd-%s=http://%s:2380", host.AdvertisedHostname, host.AdvertiseAddress)
|
||||
initialCluster += fmt.Sprintf("etcd-%s=http://%s:2380", host.HostnameOverride, host.InternalAddress)
|
||||
if i < (len(hosts) - 1) {
|
||||
initialCluster += ","
|
||||
}
|
||||
|
@ -14,11 +14,11 @@ import (
|
||||
func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error {
|
||||
etcdConnString := GetEtcdConnString(etcdHosts)
|
||||
imageCfg, hostCfg := buildKubeAPIConfig(host, kubeAPIService, etcdConnString)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.AdvertisedHostname, ControlRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.Address, ControlRole)
|
||||
}
|
||||
|
||||
func removeKubeAPI(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, KubeAPIContainerName, host.AdvertisedHostname)
|
||||
return docker.DoRemoveContainer(host.DClient, KubeAPIContainerName, host.Address)
|
||||
}
|
||||
|
||||
func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdConnString string) (*container.Config, *container.HostConfig) {
|
||||
@ -26,17 +26,18 @@ func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdC
|
||||
Image: kubeAPIService.Image,
|
||||
Entrypoint: []string{"kube-apiserver",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--bind-address=0.0.0.0",
|
||||
"--insecure-port=8080",
|
||||
"--secure-port=6443",
|
||||
"--cloud-provider=",
|
||||
"--allow_privileged=true",
|
||||
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
|
||||
"--service-cluster-ip-range=" + kubeAPIService.ServiceClusterIPRange,
|
||||
"--admission-control=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds",
|
||||
"--runtime-config=batch/v2alpha1",
|
||||
"--runtime-config=authentication.k8s.io/v1beta1=true",
|
||||
"--storage-backend=etcd3",
|
||||
"--etcd-servers=" + etcdConnString,
|
||||
"--advertise-address=" + host.AdvertiseAddress,
|
||||
"--client-ca-file=" + pki.CACertPath,
|
||||
"--tls-cert-file=" + pki.KubeAPICertPath,
|
||||
"--tls-private-key-file=" + pki.KubeAPIKeyPath,
|
||||
|
@ -12,11 +12,11 @@ import (
|
||||
|
||||
func runKubeController(host hosts.Host, kubeControllerService v1.KubeControllerService) error {
|
||||
imageCfg, hostCfg := buildKubeControllerConfig(kubeControllerService)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.AdvertisedHostname, ControlRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.Address, ControlRole)
|
||||
}
|
||||
|
||||
func removeKubeController(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, KubeControllerContainerName, host.AdvertisedHostname)
|
||||
return docker.DoRemoveContainer(host.DClient, KubeControllerContainerName, host.Address)
|
||||
}
|
||||
|
||||
func buildKubeControllerConfig(kubeControllerService v1.KubeControllerService) (*container.Config, *container.HostConfig) {
|
||||
|
@ -13,11 +13,11 @@ import (
|
||||
|
||||
func runKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) error {
|
||||
imageCfg, hostCfg := buildKubeletConfig(host, kubeletService, isMaster)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeletContainerName, host.AdvertisedHostname, WorkerRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeletContainerName, host.Address, WorkerRole)
|
||||
}
|
||||
|
||||
func removeKubelet(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, KubeletContainerName, host.AdvertisedHostname)
|
||||
return docker.DoRemoveContainer(host.DClient, KubeletContainerName, host.Address)
|
||||
}
|
||||
|
||||
func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) (*container.Config, *container.HostConfig) {
|
||||
@ -27,11 +27,11 @@ func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService, isMas
|
||||
"--v=2",
|
||||
"--address=0.0.0.0",
|
||||
"--cluster-domain=" + kubeletService.ClusterDomain,
|
||||
"--hostname-override=" + host.AdvertisedHostname,
|
||||
"--pod-infra-container-image=" + kubeletService.InfraContainerImage,
|
||||
"--cgroup-driver=cgroupfs",
|
||||
"--cgroups-per-qos=True",
|
||||
"--enforce-node-allocatable=",
|
||||
"--hostname-override=" + host.HostnameOverride,
|
||||
"--cluster-dns=" + kubeletService.ClusterDNSServer,
|
||||
"--network-plugin=cni",
|
||||
"--cni-conf-dir=/etc/cni/net.d",
|
||||
|
@ -12,11 +12,11 @@ import (
|
||||
|
||||
func runKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error {
|
||||
imageCfg, hostCfg := buildKubeproxyConfig(host, kubeproxyService)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.AdvertisedHostname, WorkerRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.Address, WorkerRole)
|
||||
}
|
||||
|
||||
func removeKubeproxy(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, KubeproxyContainerName, host.AdvertisedHostname)
|
||||
return docker.DoRemoveContainer(host.DClient, KubeproxyContainerName, host.Address)
|
||||
}
|
||||
|
||||
func buildKubeproxyConfig(host hosts.Host, kubeproxyService v1.KubeproxyService) (*container.Config, *container.HostConfig) {
|
||||
|
@ -17,7 +17,7 @@ func RollingUpdateNginxProxy(cpHosts []hosts.Host, workerHosts []hosts.Host) err
|
||||
nginxProxyEnv := buildProxyEnv(cpHosts)
|
||||
for _, host := range workerHosts {
|
||||
imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv)
|
||||
return docker.DoRollingUpdateContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.AdvertisedHostname, WorkerRole)
|
||||
return docker.DoRollingUpdateContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -25,11 +25,11 @@ func RollingUpdateNginxProxy(cpHosts []hosts.Host, workerHosts []hosts.Host) err
|
||||
func runNginxProxy(host hosts.Host, cpHosts []hosts.Host) error {
|
||||
nginxProxyEnv := buildProxyEnv(cpHosts)
|
||||
imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.AdvertisedHostname, WorkerRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole)
|
||||
}
|
||||
|
||||
func removeNginxProxy(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, NginxProxyContainerName, host.AdvertisedHostname)
|
||||
return docker.DoRemoveContainer(host.DClient, NginxProxyContainerName, host.Address)
|
||||
}
|
||||
|
||||
func buildNginxProxyConfig(host hosts.Host, nginxProxyEnv string) (*container.Config, *container.HostConfig) {
|
||||
@ -48,7 +48,7 @@ func buildNginxProxyConfig(host hosts.Host, nginxProxyEnv string) (*container.Co
|
||||
func buildProxyEnv(cpHosts []hosts.Host) string {
|
||||
proxyEnv := ""
|
||||
for i, cpHost := range cpHosts {
|
||||
proxyEnv += fmt.Sprintf("%s", cpHost.AdvertiseAddress)
|
||||
proxyEnv += fmt.Sprintf("%s", cpHost.InternalAddress)
|
||||
if i < (len(cpHosts) - 1) {
|
||||
proxyEnv += ","
|
||||
}
|
||||
|
@ -12,11 +12,11 @@ import (
|
||||
|
||||
func runScheduler(host hosts.Host, schedulerService v1.SchedulerService) error {
|
||||
imageCfg, hostCfg := buildSchedulerConfig(host, schedulerService)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.AdvertisedHostname, ControlRole)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.Address, ControlRole)
|
||||
}
|
||||
|
||||
func removeScheduler(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, SchedulerContainerName, host.AdvertisedHostname)
|
||||
return docker.DoRemoveContainer(host.DClient, SchedulerContainerName, host.Address)
|
||||
}
|
||||
|
||||
func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService) (*container.Config, *container.HostConfig) {
|
||||
|
@ -10,12 +10,12 @@ github.com/docker/distribution 3800056b8832cf6075e78b282ac010131d8687b
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
golang.org/x/net 186fd3fc8194a5e9980a82230d69c1ff7134229f
|
||||
github.com/rancher/types a7111733a50d97a2541c72e794d216105a22b972
|
||||
github.com/rancher/types 0a0a5647cfdec48e7f531534d383e755fae7861c
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||
github.com/gogo/protobuf 117892bf1866fbaa2318c03e50e40564c8845457
|
||||
github.com/opencontainers/image-spec 7c889fafd04a893f5c5f50b7ab9963d5d64e5242
|
||||
github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
|
||||
github.com/rancher/norman 068b9eb94326e2c566c5eed7636163b1b407c4c0
|
||||
github.com/rancher/norman faa1fb2148211044253fc2f6403008958c72b1f0
|
||||
gopkg.in/check.v1 11d3bc7aa68e238947792f30573146a3231fc0f1
|
||||
k8s.io/api/core/v1 4df58c811fe2e65feb879227b2b245e4dc26e7ad
|
||||
k8s.io/client-go v5.0.0 transitive=true
|
||||
|
Loading…
Reference in New Issue
Block a user