1
0
mirror of https://github.com/rancher/rke.git synced 2025-08-01 15:19:09 +00:00

Log RKE components to a specific location

This commit is contained in:
moelsayed 2018-03-21 19:20:58 +02:00
parent 63cee2f239
commit ef7b997e2a
14 changed files with 161 additions and 32 deletions

View File

@ -62,7 +62,7 @@ func (c *Cluster) DeployControlPlane(ctx context.Context) error {
if len(c.Services.Etcd.ExternalURLs) > 0 {
log.Infof(ctx, "[etcd] External etcd connection string has been specified, skipping etcd plane")
} else {
if err := services.RunEtcdPlane(ctx, c.EtcdHosts, etcdProcessHostMap, c.LocalConnDialerFactory, c.PrivateRegistriesMap, c.UpdateWorkersOnly); err != nil {
if err := services.RunEtcdPlane(ctx, c.EtcdHosts, etcdProcessHostMap, c.LocalConnDialerFactory, c.PrivateRegistriesMap, c.UpdateWorkersOnly, c.SystemImages.Alpine); err != nil {
return fmt.Errorf("[etcd] Failed to bring up Etcd Plane: %v", err)
}
}
@ -77,7 +77,9 @@ func (c *Cluster) DeployControlPlane(ctx context.Context) error {
if err := services.RunControlPlane(ctx, c.ControlPlaneHosts,
c.LocalConnDialerFactory,
c.PrivateRegistriesMap,
processMap, c.UpdateWorkersOnly); err != nil {
processMap,
c.UpdateWorkersOnly,
c.SystemImages.Alpine); err != nil {
return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
}
@ -103,7 +105,7 @@ func (c *Cluster) DeployWorkerPlane(ctx context.Context) error {
kubeletProcessHostMap,
c.Certificates,
c.UpdateWorkersOnly,
); err != nil {
c.SystemImages.Alpine); err != nil {
return fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err)
}
return nil

58
cluster/logs.go Normal file
View File

@ -0,0 +1,58 @@
package cluster
import (
"context"
"fmt"
"github.com/docker/docker/api/types/container"
"github.com/rancher/rke/docker"
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/services"
"github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
func (c *Cluster) CleanDeadLogs(ctx context.Context) error {
hosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
var errgrp errgroup.Group
for _, host := range hosts {
runHost := host
errgrp.Go(func() error {
return doRunLogCleaner(ctx, runHost, c.SystemImages.Alpine, c.PrivateRegistriesMap)
})
}
return errgrp.Wait()
}
func doRunLogCleaner(ctx context.Context, host *hosts.Host, alpineImage string, prsMap map[string]v3.PrivateRegistry) error {
logrus.Debugf("[cleanup] Starting log link cleanup on host [%s]", host.Address)
imageCfg := &container.Config{
Image: alpineImage,
Tty: true,
Cmd: []string{
"sh",
"-c",
fmt.Sprintf("find %s -type l ! -exec test -e {} \\; -print -delete", services.RKELogsPath),
},
}
hostCfg := &container.HostConfig{
Binds: []string{
"/var/lib:/var/lib",
},
Privileged: true,
}
if err := docker.DoRemoveContainer(ctx, host.DClient, services.LogCleanerContainerName, host.Address); err != nil {
return err
}
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, services.LogCleanerContainerName, host.Address, "cleanup", prsMap); err != nil {
return err
}
if err := docker.DoRemoveContainer(ctx, host.DClient, services.LogCleanerContainerName, host.Address); err != nil {
return err
}
logrus.Debugf("[cleanup] Successfully cleaned up log links on host [%s]", host.Address)
return nil
}

View File

@ -211,7 +211,7 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
readyHosts := getReadyEtcdHosts(kubeCluster.EtcdHosts)
etcdProcessHostMap := kubeCluster.getEtcdProcessHostMap(readyHosts)
if err := services.ReloadEtcdCluster(ctx, readyHosts, currentCluster.LocalConnDialerFactory, clientCert, clientkey, currentCluster.PrivateRegistriesMap, etcdProcessHostMap); err != nil {
if err := services.ReloadEtcdCluster(ctx, readyHosts, currentCluster.LocalConnDialerFactory, clientCert, clientkey, currentCluster.PrivateRegistriesMap, etcdProcessHostMap, kubeCluster.SystemImages.Alpine); err != nil {
return err
}
}

View File

@ -117,6 +117,10 @@ func ClusterUp(
return APIURL, caCrt, clientCert, clientKey, err
}
if err = kubeCluster.CleanDeadLogs(ctx); err != nil {
return APIURL, caCrt, clientCert, clientKey, err
}
err = kubeCluster.SyncLabelsAndTaints(ctx)
if err != nil {
return APIURL, caCrt, clientCert, clientKey, err

View File

@ -9,7 +9,7 @@ import (
"golang.org/x/sync/errgroup"
)
func RunControlPlane(ctx context.Context, controlHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, updateWorkersOnly bool) error {
func RunControlPlane(ctx context.Context, controlHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, updateWorkersOnly bool, alpineImage string) error {
log.Infof(ctx, "[%s] Building up Controller Plane..", ControlRole)
var errgrp errgroup.Group
for _, host := range controlHosts {
@ -18,7 +18,7 @@ func RunControlPlane(ctx context.Context, controlHosts []*hosts.Host, localConnD
continue
}
errgrp.Go(func() error {
return doDeployControlHost(ctx, runHost, localConnDialerFactory, prsMap, processMap)
return doDeployControlHost(ctx, runHost, localConnDialerFactory, prsMap, processMap, alpineImage)
})
}
if err := errgrp.Wait(); err != nil {
@ -69,7 +69,7 @@ func RemoveControlPlane(ctx context.Context, controlHosts []*hosts.Host, force b
return nil
}
func doDeployControlHost(ctx context.Context, host *hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process) error {
func doDeployControlHost(ctx context.Context, host *hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, alpineImage string) error {
if host.IsWorker {
if err := removeNginxProxy(ctx, host); err != nil {
return err
@ -80,13 +80,13 @@ func doDeployControlHost(ctx context.Context, host *hosts.Host, localConnDialerF
return err
}
// run kubeapi
if err := runKubeAPI(ctx, host, localConnDialerFactory, prsMap, processMap[KubeAPIContainerName]); err != nil {
if err := runKubeAPI(ctx, host, localConnDialerFactory, prsMap, processMap[KubeAPIContainerName], alpineImage); err != nil {
return err
}
// run kubecontroller
if err := runKubeController(ctx, host, localConnDialerFactory, prsMap, processMap[KubeControllerContainerName]); err != nil {
if err := runKubeController(ctx, host, localConnDialerFactory, prsMap, processMap[KubeControllerContainerName], alpineImage); err != nil {
return err
}
// run scheduler
return runScheduler(ctx, host, localConnDialerFactory, prsMap, processMap[SchedulerContainerName])
return runScheduler(ctx, host, localConnDialerFactory, prsMap, processMap[SchedulerContainerName], alpineImage)
}

View File

@ -20,15 +20,17 @@ const (
EtcdHealthCheckURL = "https://127.0.0.1:2379/health"
)
func RunEtcdPlane(ctx context.Context, etcdHosts []*hosts.Host, etcdProcessHostMap map[*hosts.Host]v3.Process, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, updateWorkersOnly bool) error {
func RunEtcdPlane(ctx context.Context, etcdHosts []*hosts.Host, etcdProcessHostMap map[*hosts.Host]v3.Process, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, updateWorkersOnly bool, alpineImage string) error {
log.Infof(ctx, "[%s] Building up Etcd Plane..", ETCDRole)
for _, host := range etcdHosts {
if updateWorkersOnly {
continue
}
imageCfg, hostCfg, _ := GetProcessConfig(etcdProcessHostMap[host])
err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, EtcdContainerName, host.Address, ETCDRole, prsMap)
if err != nil {
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, EtcdContainerName, host.Address, ETCDRole, prsMap); err != nil {
return err
}
if err := createLogLink(ctx, host, EtcdContainerName, ETCDRole, alpineImage, prsMap); err != nil {
return err
}
}
@ -128,12 +130,15 @@ func RemoveEtcdMember(ctx context.Context, etcdHost *hosts.Host, etcdHosts []*ho
return nil
}
func ReloadEtcdCluster(ctx context.Context, readyEtcdHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, cert, key []byte, prsMap map[string]v3.PrivateRegistry, etcdProcessHostMap map[*hosts.Host]v3.Process) error {
func ReloadEtcdCluster(ctx context.Context, readyEtcdHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, cert, key []byte, prsMap map[string]v3.PrivateRegistry, etcdProcessHostMap map[*hosts.Host]v3.Process, alpineImage string) error {
for host, process := range etcdProcessHostMap {
imageCfg, hostCfg, _ := GetProcessConfig(process)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, EtcdContainerName, host.Address, ETCDRole, prsMap); err != nil {
return err
}
if err := createLogLink(ctx, host, EtcdContainerName, ETCDRole, alpineImage, prsMap); err != nil {
return err
}
}
time.Sleep(10 * time.Second)
var healthy bool

View File

@ -8,13 +8,16 @@ import (
"github.com/rancher/types/apis/management.cattle.io/v3"
)
func runKubeAPI(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeAPIProcess v3.Process) error {
func runKubeAPI(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeAPIProcess v3.Process, alpineImage string) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeAPIProcess)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.Address, ControlRole, prsMap); err != nil {
return err
}
return runHealthcheck(ctx, host, KubeAPIContainerName, df, healthCheckURL, nil)
if err := runHealthcheck(ctx, host, KubeAPIContainerName, df, healthCheckURL, nil); err != nil {
return err
}
return createLogLink(ctx, host, KubeAPIContainerName, ControlRole, alpineImage, prsMap)
}
func removeKubeAPI(ctx context.Context, host *hosts.Host) error {

View File

@ -8,12 +8,15 @@ import (
"github.com/rancher/types/apis/management.cattle.io/v3"
)
func runKubeController(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, controllerProcess v3.Process) error {
func runKubeController(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, controllerProcess v3.Process, alpineImage string) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(controllerProcess)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.Address, ControlRole, prsMap); err != nil {
return err
}
return runHealthcheck(ctx, host, KubeControllerContainerName, df, healthCheckURL, nil)
if err := runHealthcheck(ctx, host, KubeControllerContainerName, df, healthCheckURL, nil); err != nil {
return err
}
return createLogLink(ctx, host, KubeControllerContainerName, ControlRole, alpineImage, prsMap)
}
func removeKubeController(ctx context.Context, host *hosts.Host) error {

View File

@ -9,12 +9,15 @@ import (
"github.com/rancher/types/apis/management.cattle.io/v3"
)
func runKubelet(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeletProcess v3.Process, certMap map[string]pki.CertificatePKI) error {
func runKubelet(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeletProcess v3.Process, certMap map[string]pki.CertificatePKI, alpineImage string) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeletProcess)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeletContainerName, host.Address, WorkerRole, prsMap); err != nil {
return err
}
return runHealthcheck(ctx, host, KubeletContainerName, df, healthCheckURL, certMap)
if err := runHealthcheck(ctx, host, KubeletContainerName, df, healthCheckURL, certMap); err != nil {
return err
}
return createLogLink(ctx, host, KubeletContainerName, WorkerRole, alpineImage, prsMap)
}
func removeKubelet(ctx context.Context, host *hosts.Host) error {

View File

@ -8,12 +8,15 @@ import (
"github.com/rancher/types/apis/management.cattle.io/v3"
)
func runKubeproxy(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeProxyProcess v3.Process) error {
func runKubeproxy(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeProxyProcess v3.Process, alpineImage string) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeProxyProcess)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.Address, WorkerRole, prsMap); err != nil {
return err
}
return runHealthcheck(ctx, host, KubeproxyContainerName, df, healthCheckURL, nil)
if err := runHealthcheck(ctx, host, KubeproxyContainerName, df, healthCheckURL, nil); err != nil {
return err
}
return createLogLink(ctx, host, KubeproxyContainerName, WorkerRole, alpineImage, prsMap)
}
func removeKubeproxy(ctx context.Context, host *hosts.Host) error {

View File

@ -13,9 +13,12 @@ const (
NginxProxyEnvName = "CP_HOSTS"
)
func runNginxProxy(ctx context.Context, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, proxyProcess v3.Process) error {
func runNginxProxy(ctx context.Context, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, proxyProcess v3.Process, alpineImage string) error {
imageCfg, hostCfg, _ := GetProcessConfig(proxyProcess)
return docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole, prsMap)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole, prsMap); err != nil {
return err
}
return createLogLink(ctx, host, NginxProxyContainerName, WorkerRole, alpineImage, prsMap)
}
func removeNginxProxy(ctx context.Context, host *hosts.Host) error {

View File

@ -8,12 +8,15 @@ import (
"github.com/rancher/types/apis/management.cattle.io/v3"
)
func runScheduler(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, schedulerProcess v3.Process) error {
func runScheduler(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, schedulerProcess v3.Process, alpineImage string) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(schedulerProcess)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.Address, ControlRole, prsMap); err != nil {
return err
}
return runHealthcheck(ctx, host, SchedulerContainerName, df, healthCheckURL, nil)
if err := runHealthcheck(ctx, host, SchedulerContainerName, df, healthCheckURL, nil); err != nil {
return err
}
return createLogLink(ctx, host, SchedulerContainerName, ControlRole, alpineImage, prsMap)
}
func removeScheduler(ctx context.Context, host *hosts.Host) error {

View File

@ -9,6 +9,7 @@ import (
"github.com/rancher/rke/hosts"
"github.com/rancher/rke/log"
"github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/sirupsen/logrus"
)
const (
@ -27,12 +28,16 @@ const (
EtcdContainerName = "etcd"
NginxProxyContainerName = "nginx-proxy"
SidekickContainerName = "service-sidekick"
LogLinkContainerName = "rke-log-linker"
LogCleanerContainerName = "rke-log-cleaner"
KubeAPIPort = 6443
SchedulerPort = 10251
KubeControllerPort = 10252
KubeletPort = 10250
KubeproxyPort = 10256
RKELogsPath = "/var/lib/rancher/rke/log"
)
func runSidekick(ctx context.Context, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, sidecarProcess v3.Process) error {
@ -88,3 +93,40 @@ func GetHealthCheckURL(useTLS bool, port int) string {
}
return fmt.Sprintf("%s%s:%d%s", HTTPProtoPrefix, HealthzAddress, port, HealthzEndpoint)
}
func createLogLink(ctx context.Context, host *hosts.Host, containerName, plane, image string, prsMap map[string]v3.PrivateRegistry) error {
logrus.Debugf("[%s] Creating log link for Container [%s] on host [%s]", plane, containerName, host.Address)
containerInspect, err := docker.InspectContainer(ctx, host.DClient, host.Address, containerName)
if err != nil {
return err
}
containerID := containerInspect.ID
containerLogPath := containerInspect.LogPath
containerLogLink := fmt.Sprintf("%s/%s-%s.log", RKELogsPath, containerName, containerID)
imageCfg := &container.Config{
Image: image,
Tty: true,
Cmd: []string{
"sh",
"-c",
fmt.Sprintf("mkdir -p %s ; ln -s %s %s", RKELogsPath, containerLogPath, containerLogLink),
},
}
hostCfg := &container.HostConfig{
Binds: []string{
"/var/lib:/var/lib",
},
Privileged: true,
}
if err := docker.DoRemoveContainer(ctx, host.DClient, LogLinkContainerName, host.Address); err != nil {
return err
}
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, LogLinkContainerName, host.Address, plane, prsMap); err != nil {
return err
}
if err := docker.DoRemoveContainer(ctx, host.DClient, LogLinkContainerName, host.Address); err != nil {
return err
}
logrus.Debugf("[%s] Successfully created log link for Container [%s] on host [%s]", plane, containerName, host.Address)
return nil
}

View File

@ -14,7 +14,7 @@ const (
unschedulableEtcdTaint = "node-role.kubernetes.io/etcd=true:NoExecute"
)
func RunWorkerPlane(ctx context.Context, allHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, kubeletProcessHostMap map[*hosts.Host]v3.Process, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool) error {
func RunWorkerPlane(ctx context.Context, allHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, kubeletProcessHostMap map[*hosts.Host]v3.Process, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage string) error {
log.Infof(ctx, "[%s] Building up Worker Plane..", WorkerRole)
var errgrp errgroup.Group
for _, host := range allHosts {
@ -32,7 +32,7 @@ func RunWorkerPlane(ctx context.Context, allHosts []*hosts.Host, localConnDialer
hostProcessMap := copyProcessMap(processMap)
errgrp.Go(func() error {
hostProcessMap[KubeletContainerName] = kubeletProcessHostMap[runHost]
return doDeployWorkerPlane(ctx, runHost, localConnDialerFactory, prsMap, hostProcessMap, certMap)
return doDeployWorkerPlane(ctx, runHost, localConnDialerFactory, prsMap, hostProcessMap, certMap, alpineImage)
})
}
if err := errgrp.Wait(); err != nil {
@ -71,10 +71,10 @@ func RemoveWorkerPlane(ctx context.Context, workerHosts []*hosts.Host, force boo
func doDeployWorkerPlane(ctx context.Context, host *hosts.Host,
localConnDialerFactory hosts.DialerFactory,
prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, certMap map[string]pki.CertificatePKI) error {
prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, certMap map[string]pki.CertificatePKI, alpineImage string) error {
// run nginx proxy
if !host.IsControl {
if err := runNginxProxy(ctx, host, prsMap, processMap[NginxProxyContainerName]); err != nil {
if err := runNginxProxy(ctx, host, prsMap, processMap[NginxProxyContainerName], alpineImage); err != nil {
return err
}
}
@ -83,10 +83,10 @@ func doDeployWorkerPlane(ctx context.Context, host *hosts.Host,
return err
}
// run kubelet
if err := runKubelet(ctx, host, localConnDialerFactory, prsMap, processMap[KubeletContainerName], certMap); err != nil {
if err := runKubelet(ctx, host, localConnDialerFactory, prsMap, processMap[KubeletContainerName], certMap, alpineImage); err != nil {
return err
}
return runKubeproxy(ctx, host, localConnDialerFactory, prsMap, processMap[KubeproxyContainerName])
return runKubeproxy(ctx, host, localConnDialerFactory, prsMap, processMap[KubeproxyContainerName], alpineImage)
}
func copyProcessMap(m map[string]v3.Process) map[string]v3.Process {