diff --git a/cluster.yml b/cluster.yml index 5929e88d..c8e246e4 100644 --- a/cluster.yml +++ b/cluster.yml @@ -72,7 +72,7 @@ services: system_images: alpine: alpine:latest - nginx_proxy: rancher/rke-nginx-proxy:v0.1.0 + nginx_proxy: rancher/rke-nginx-proxy:v0.1.1 cert_downloader: rancher/rke-cert-deployer:v0.1.1 service_sidekick_image: rancher/rke-service-sidekick:v0.1.0 kubedns_image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5 diff --git a/cluster/defaults.go b/cluster/defaults.go index 252fe052..19688627 100644 --- a/cluster/defaults.go +++ b/cluster/defaults.go @@ -19,7 +19,7 @@ const ( DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0" DefaultAplineImage = "alpine:latest" - DefaultNginxProxyImage = "rancher/rke-nginx-proxy:v0.1.0" + DefaultNginxProxyImage = "rancher/rke-nginx-proxy:v0.1.1" DefaultCertDownloaderImage = "rancher/rke-cert-deployer:v0.1.1" DefaultServiceSidekickImage = "rancher/rke-service-sidekick:v0.1.0" diff --git a/cluster/network.go b/cluster/network.go index 0cba0f35..b4277796 100644 --- a/cluster/network.go +++ b/cluster/network.go @@ -317,13 +317,16 @@ func (c *Cluster) deployTCPPortListeners(ctx context.Context, currentCluster *Cl } func (c *Cluster) deployListenerOnPlane(ctx context.Context, portList []string, holstPlane []*hosts.Host, containerName string) error { - portBindingList := []nat.PortBinding{} - for _, portNumber := range portList { - rawPort := fmt.Sprintf("0.0.0.0:%s:1337/tcp", portNumber) - portMapping, _ := nat.ParsePortSpec(rawPort) - portBindingList = append(portBindingList, portMapping[0].Binding) + var errgrp errgroup.Group + for _, host := range holstPlane { + runHost := host + errgrp.Go(func() error { + return c.deployListener(ctx, runHost, portList, containerName) + }) } - + return errgrp.Wait() +} +func (c *Cluster) deployListener(ctx context.Context, host *hosts.Host, portList []string, containerName string) error { imageCfg := &container.Config{ Image: c.SystemImages[AplineImage], Cmd: []string{ @@ -340,19 +343,19 @@ func (c *Cluster) deployListenerOnPlane(ctx context.Context, portList []string, } hostCfg := &container.HostConfig{ PortBindings: nat.PortMap{ - "1337/tcp": portBindingList, + "1337/tcp": getPortBindings("0.0.0.0", portList), }, } - var errgrp errgroup.Group - for _, host := range holstPlane { - runHost := host - errgrp.Go(func() error { - logrus.Debugf("[network] Starting deployListener [%s] on host [%s]", containerName, runHost.Address) - return docker.DoRunContainer(ctx, runHost.DClient, imageCfg, hostCfg, containerName, runHost.Address, "network") - }) + logrus.Debugf("[network] Starting deployListener [%s] on host [%s]", containerName, host.Address) + if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, containerName, host.Address, "network"); err != nil { + if strings.Contains(err.Error(), "bind: address already in use") { + logrus.Debugf("[network] Service is already up on host [%s]", host.Address) + return nil + } + return err } - return errgrp.Wait() + return nil } func (c *Cluster) removeTCPPortListeners(ctx context.Context) error { @@ -509,3 +512,13 @@ func getPortCheckLogs(reader io.ReadCloser) ([]string, error) { } return hostPortLines, nil } + +func getPortBindings(hostAddress string, portList []string) []nat.PortBinding { + portBindingList := []nat.PortBinding{} + for _, portNumber := range portList { + rawPort := fmt.Sprintf("%s:%s:1337/tcp", hostAddress, portNumber) + portMapping, _ := nat.ParsePortSpec(rawPort) + portBindingList = append(portBindingList, portMapping[0].Binding) + } + return portBindingList +}