mirror of
https://github.com/rancher/rke.git
synced 2025-08-19 07:17:30 +00:00
Do not rewrite SELinux labels on volume mounts
This commit is contained in:
parent
6f1661aaa9
commit
0cea67e9ff
@ -188,7 +188,7 @@ func GetClusterCertsFromNodes(ctx context.Context, kubeCluster *Cluster) (map[st
|
|||||||
backupHosts := hosts.GetUniqueHostList(kubeCluster.EtcdHosts, kubeCluster.ControlPlaneHosts, nil)
|
backupHosts := hosts.GetUniqueHostList(kubeCluster.EtcdHosts, kubeCluster.ControlPlaneHosts, nil)
|
||||||
var certificates map[string]pki.CertificatePKI
|
var certificates map[string]pki.CertificatePKI
|
||||||
for _, host := range backupHosts {
|
for _, host := range backupHosts {
|
||||||
certificates, err = pki.FetchCertificatesFromHost(ctx, kubeCluster.EtcdHosts, host, kubeCluster.SystemImages.Alpine, kubeCluster.LocalKubeConfigPath, kubeCluster.PrivateRegistriesMap)
|
certificates, err = pki.FetchCertificatesFromHost(ctx, kubeCluster.EtcdHosts, host, kubeCluster.SystemImages.Alpine, kubeCluster.LocalKubeConfigPath, kubeCluster.PrivateRegistriesMap, kubeCluster.Version)
|
||||||
if certificates != nil {
|
if certificates != nil {
|
||||||
// Handle service account token key issue
|
// Handle service account token key issue
|
||||||
kubeAPICert := certificates[pki.KubeAPICertName]
|
kubeAPICert := certificates[pki.KubeAPICertName]
|
||||||
|
@ -99,6 +99,7 @@ const (
|
|||||||
NameLabel = "name"
|
NameLabel = "name"
|
||||||
|
|
||||||
WorkerThreads = util.WorkerThreads
|
WorkerThreads = util.WorkerThreads
|
||||||
|
SELinuxLabel = services.SELinuxLabel
|
||||||
|
|
||||||
serviceAccountTokenFileParam = "service-account-key-file"
|
serviceAccountTokenFileParam = "service-account-key-file"
|
||||||
|
|
||||||
@ -132,7 +133,7 @@ func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptionData map[stri
|
|||||||
if len(c.Services.Etcd.ExternalURLs) > 0 {
|
if len(c.Services.Etcd.ExternalURLs) > 0 {
|
||||||
log.Infof(ctx, "[etcd] External etcd connection string has been specified, skipping etcd plane")
|
log.Infof(ctx, "[etcd] External etcd connection string has been specified, skipping etcd plane")
|
||||||
} else {
|
} else {
|
||||||
if err := services.RunEtcdPlane(ctx, c.EtcdHosts, etcdNodePlanMap, c.LocalConnDialerFactory, c.PrivateRegistriesMap, c.UpdateWorkersOnly, c.SystemImages.Alpine, c.Services.Etcd, c.Certificates); err != nil {
|
if err := services.RunEtcdPlane(ctx, c.EtcdHosts, etcdNodePlanMap, c.LocalConnDialerFactory, c.PrivateRegistriesMap, c.UpdateWorkersOnly, c.SystemImages.Alpine, c.Services.Etcd, c.Certificates, c.Version); err != nil {
|
||||||
return "", fmt.Errorf("[etcd] Failed to bring up Etcd Plane: %v", err)
|
return "", fmt.Errorf("[etcd] Failed to bring up Etcd Plane: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -155,7 +156,8 @@ func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptionData map[stri
|
|||||||
cpNodePlanMap,
|
cpNodePlanMap,
|
||||||
c.UpdateWorkersOnly,
|
c.UpdateWorkersOnly,
|
||||||
c.SystemImages.Alpine,
|
c.SystemImages.Alpine,
|
||||||
c.Certificates); err != nil {
|
c.Certificates,
|
||||||
|
c.Version); err != nil {
|
||||||
return "", fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
|
return "", fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
|
||||||
}
|
}
|
||||||
return "", nil
|
return "", nil
|
||||||
@ -201,7 +203,8 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
|
|||||||
cpNodePlanMap,
|
cpNodePlanMap,
|
||||||
c.UpdateWorkersOnly,
|
c.UpdateWorkersOnly,
|
||||||
c.SystemImages.Alpine,
|
c.SystemImages.Alpine,
|
||||||
c.Certificates)
|
c.Certificates,
|
||||||
|
c.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Failed to upgrade controlplane components on NotReady hosts, error: %v", err)
|
logrus.Errorf("Failed to upgrade controlplane components on NotReady hosts, error: %v", err)
|
||||||
}
|
}
|
||||||
@ -211,7 +214,8 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
|
|||||||
cpNodePlanMap,
|
cpNodePlanMap,
|
||||||
c.Certificates,
|
c.Certificates,
|
||||||
c.UpdateWorkersOnly,
|
c.UpdateWorkersOnly,
|
||||||
c.SystemImages.Alpine)
|
c.SystemImages.Alpine,
|
||||||
|
c.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Failed to upgrade worker components on NotReady hosts, error: %v", err)
|
logrus.Errorf("Failed to upgrade worker components on NotReady hosts, error: %v", err)
|
||||||
}
|
}
|
||||||
@ -230,7 +234,7 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
|
|||||||
cpNodePlanMap,
|
cpNodePlanMap,
|
||||||
c.UpdateWorkersOnly,
|
c.UpdateWorkersOnly,
|
||||||
c.SystemImages.Alpine,
|
c.SystemImages.Alpine,
|
||||||
c.Certificates, c.UpgradeStrategy, c.NewHosts, inactiveHosts, c.MaxUnavailableForControlNodes)
|
c.Certificates, c.UpgradeStrategy, c.NewHosts, inactiveHosts, c.MaxUnavailableForControlNodes, c.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("[controlPlane] Failed to upgrade Control Plane: %v", err)
|
return "", fmt.Errorf("[controlPlane] Failed to upgrade Control Plane: %v", err)
|
||||||
}
|
}
|
||||||
@ -273,7 +277,8 @@ func (c *Cluster) DeployWorkerPlane(ctx context.Context, svcOptionData map[strin
|
|||||||
workerNodePlanMap,
|
workerNodePlanMap,
|
||||||
c.Certificates,
|
c.Certificates,
|
||||||
c.UpdateWorkersOnly,
|
c.UpdateWorkersOnly,
|
||||||
c.SystemImages.Alpine); err != nil {
|
c.SystemImages.Alpine,
|
||||||
|
c.Version); err != nil {
|
||||||
return "", fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err)
|
return "", fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err)
|
||||||
}
|
}
|
||||||
return "", nil
|
return "", nil
|
||||||
@ -318,7 +323,8 @@ func (c *Cluster) UpgradeWorkerPlane(ctx context.Context, kubeClient *kubernetes
|
|||||||
workerNodePlanMap,
|
workerNodePlanMap,
|
||||||
c.Certificates,
|
c.Certificates,
|
||||||
c.UpdateWorkersOnly,
|
c.UpdateWorkersOnly,
|
||||||
c.SystemImages.Alpine)
|
c.SystemImages.Alpine,
|
||||||
|
c.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Failed to upgrade worker components on NotReady hosts, error: %v", err)
|
logrus.Errorf("Failed to upgrade worker components on NotReady hosts, error: %v", err)
|
||||||
}
|
}
|
||||||
@ -337,7 +343,11 @@ func (c *Cluster) UpgradeWorkerPlane(ctx context.Context, kubeClient *kubernetes
|
|||||||
workerNodePlanMap,
|
workerNodePlanMap,
|
||||||
c.Certificates,
|
c.Certificates,
|
||||||
c.UpdateWorkersOnly,
|
c.UpdateWorkersOnly,
|
||||||
c.SystemImages.Alpine, c.UpgradeStrategy, c.NewHosts, c.MaxUnavailableForWorkerNodes)
|
c.SystemImages.Alpine,
|
||||||
|
c.UpgradeStrategy,
|
||||||
|
c.NewHosts,
|
||||||
|
c.MaxUnavailableForWorkerNodes,
|
||||||
|
c.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("[workerPlane] Failed to upgrade Worker Plane: %v", err)
|
return "", fmt.Errorf("[workerPlane] Failed to upgrade Worker Plane: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -338,7 +338,7 @@ func (c *Cluster) updateEncryptionProvider(ctx context.Context, keys []*encrypti
|
|||||||
|
|
||||||
func (c *Cluster) DeployEncryptionProviderFile(ctx context.Context) error {
|
func (c *Cluster) DeployEncryptionProviderFile(ctx context.Context) error {
|
||||||
logrus.Debugf("[%s] Deploying Encryption Provider Configuration file on Control Plane nodes..", services.ControlRole)
|
logrus.Debugf("[%s] Deploying Encryption Provider Configuration file on Control Plane nodes..", services.ControlRole)
|
||||||
return deployFile(ctx, c.ControlPlaneHosts, c.SystemImages.Alpine, c.PrivateRegistriesMap, EncryptionProviderFilePath, c.EncryptionConfig.EncryptionProviderFile)
|
return deployFile(ctx, c.ControlPlaneHosts, c.SystemImages.Alpine, c.PrivateRegistriesMap, EncryptionProviderFilePath, c.EncryptionConfig.EncryptionProviderFile, c.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReconcileDesiredStateEncryptionConfig We do the rotation outside of the cluster reconcile logic. When we are done,
|
// ReconcileDesiredStateEncryptionConfig We do the rotation outside of the cluster reconcile logic. When we are done,
|
||||||
|
@ -23,7 +23,7 @@ func (c *Cluster) SnapshotEtcd(ctx context.Context, snapshotName string) error {
|
|||||||
containerTimeout = c.Services.Etcd.BackupConfig.Timeout
|
containerTimeout = c.Services.Etcd.BackupConfig.Timeout
|
||||||
}
|
}
|
||||||
newCtx := context.WithValue(ctx, docker.WaitTimeoutContextKey, containerTimeout)
|
newCtx := context.WithValue(ctx, docker.WaitTimeoutContextKey, containerTimeout)
|
||||||
if err := services.RunEtcdSnapshotSave(newCtx, host, c.PrivateRegistriesMap, backupImage, snapshotName, true, c.Services.Etcd); err != nil {
|
if err := services.RunEtcdSnapshotSave(newCtx, host, c.PrivateRegistriesMap, backupImage, snapshotName, true, c.Services.Etcd, c.Version); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -54,7 +54,8 @@ func (c *Cluster) DeployRestoreCerts(ctx context.Context, clusterCerts map[strin
|
|||||||
c.SystemImages.CertDownloader,
|
c.SystemImages.CertDownloader,
|
||||||
c.PrivateRegistriesMap,
|
c.PrivateRegistriesMap,
|
||||||
false,
|
false,
|
||||||
env); err != nil {
|
env,
|
||||||
|
c.Version); err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -81,7 +82,7 @@ func (c *Cluster) DeployStateFile(ctx context.Context, stateFilePath, snapshotNa
|
|||||||
errgrp.Go(func() error {
|
errgrp.Go(func() error {
|
||||||
var errList []error
|
var errList []error
|
||||||
for host := range hostsQueue {
|
for host := range hostsQueue {
|
||||||
err := pki.DeployStateOnPlaneHost(ctx, host.(*hosts.Host), c.SystemImages.CertDownloader, c.PrivateRegistriesMap, stateFilePath, snapshotName)
|
err := pki.DeployStateOnPlaneHost(ctx, host.(*hosts.Host), c.SystemImages.CertDownloader, c.PrivateRegistriesMap, stateFilePath, snapshotName, c.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
}
|
}
|
||||||
@ -95,7 +96,7 @@ func (c *Cluster) DeployStateFile(ctx context.Context, stateFilePath, snapshotNa
|
|||||||
func (c *Cluster) GetStateFileFromSnapshot(ctx context.Context, snapshotName string) (string, error) {
|
func (c *Cluster) GetStateFileFromSnapshot(ctx context.Context, snapshotName string) (string, error) {
|
||||||
backupImage := c.getBackupImage()
|
backupImage := c.getBackupImage()
|
||||||
for _, host := range c.EtcdHosts {
|
for _, host := range c.EtcdHosts {
|
||||||
stateFile, err := services.RunGetStateFileFromSnapshot(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotName, c.Services.Etcd)
|
stateFile, err := services.RunGetStateFileFromSnapshot(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotName, c.Services.Etcd, c.Version)
|
||||||
if err != nil || stateFile == "" {
|
if err != nil || stateFile == "" {
|
||||||
logrus.Infof("Could not extract state file from snapshot [%s] on host [%s]", snapshotName, host.Address)
|
logrus.Infof("Could not extract state file from snapshot [%s] on host [%s]", snapshotName, host.Address)
|
||||||
continue
|
continue
|
||||||
@ -125,7 +126,7 @@ func (c *Cluster) PrepareBackup(ctx context.Context, snapshotPath string) error
|
|||||||
log.Warnf(ctx, "failed to stop etcd container on host [%s]: %v", host.Address, err)
|
log.Warnf(ctx, "failed to stop etcd container on host [%s]: %v", host.Address, err)
|
||||||
}
|
}
|
||||||
// start the download server, only one node should have it!
|
// start the download server, only one node should have it!
|
||||||
if err := services.StartBackupServer(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotPath); err != nil {
|
if err := services.StartBackupServer(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotPath, c.Version); err != nil {
|
||||||
log.Warnf(ctx, "failed to start backup server on host [%s]: %v", host.Address, err)
|
log.Warnf(ctx, "failed to start backup server on host [%s]: %v", host.Address, err)
|
||||||
errors = append(errors, err)
|
errors = append(errors, err)
|
||||||
continue
|
continue
|
||||||
@ -147,7 +148,7 @@ func (c *Cluster) PrepareBackup(ctx context.Context, snapshotPath string) error
|
|||||||
if host.Address == backupServer.Address { // we skip the backup server if it's there
|
if host.Address == backupServer.Address { // we skip the backup server if it's there
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := services.DownloadEtcdSnapshotFromBackupServer(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotPath, backupServer); err != nil {
|
if err := services.DownloadEtcdSnapshotFromBackupServer(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotPath, backupServer, c.Version); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -163,7 +164,7 @@ func (c *Cluster) PrepareBackup(ctx context.Context, snapshotPath string) error
|
|||||||
c.Services.Etcd.BackupConfig.S3BackupConfig != nil {
|
c.Services.Etcd.BackupConfig.S3BackupConfig != nil {
|
||||||
log.Infof(ctx, "[etcd] etcd s3 backup configuration found, will use s3 as source")
|
log.Infof(ctx, "[etcd] etcd s3 backup configuration found, will use s3 as source")
|
||||||
for _, host := range c.EtcdHosts {
|
for _, host := range c.EtcdHosts {
|
||||||
if err := services.DownloadEtcdSnapshotFromS3(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotPath, c.Services.Etcd); err != nil {
|
if err := services.DownloadEtcdSnapshotFromS3(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotPath, c.Services.Etcd, c.Version); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -190,7 +191,7 @@ func (c *Cluster) RestoreEtcdSnapshot(ctx context.Context, snapshotPath string)
|
|||||||
}
|
}
|
||||||
newCtx := context.WithValue(ctx, docker.WaitTimeoutContextKey, containerTimeout)
|
newCtx := context.WithValue(ctx, docker.WaitTimeoutContextKey, containerTimeout)
|
||||||
if err := services.RestoreEtcdSnapshot(newCtx, host, c.PrivateRegistriesMap, c.SystemImages.Etcd, backupImage,
|
if err := services.RestoreEtcdSnapshot(newCtx, host, c.PrivateRegistriesMap, c.SystemImages.Etcd, backupImage,
|
||||||
snapshotPath, initCluster, c.Services.Etcd); err != nil {
|
snapshotPath, initCluster, c.Services.Etcd, c.Version); err != nil {
|
||||||
return fmt.Errorf("[etcd] Failed to restore etcd snapshot: %v", err)
|
return fmt.Errorf("[etcd] Failed to restore etcd snapshot: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -201,7 +202,7 @@ func (c *Cluster) RemoveEtcdSnapshot(ctx context.Context, snapshotName string) e
|
|||||||
backupImage := c.getBackupImage()
|
backupImage := c.getBackupImage()
|
||||||
for _, host := range c.EtcdHosts {
|
for _, host := range c.EtcdHosts {
|
||||||
if err := services.RunEtcdSnapshotRemove(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotName,
|
if err := services.RunEtcdSnapshotRemove(ctx, host, c.PrivateRegistriesMap, backupImage, snapshotName,
|
||||||
false, c.Services.Etcd); err != nil {
|
false, c.Services.Etcd, c.Version); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -214,7 +215,7 @@ func (c *Cluster) etcdSnapshotChecksum(ctx context.Context, snapshotPath string)
|
|||||||
backupImage := c.getBackupImage()
|
backupImage := c.getBackupImage()
|
||||||
|
|
||||||
for _, etcdHost := range c.EtcdHosts {
|
for _, etcdHost := range c.EtcdHosts {
|
||||||
checksum, err := services.GetEtcdSnapshotChecksum(ctx, etcdHost, c.PrivateRegistriesMap, backupImage, snapshotPath)
|
checksum, err := services.GetEtcdSnapshotChecksum(ctx, etcdHost, c.PrivateRegistriesMap, backupImage, snapshotPath, c.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/rancher/rke/hosts"
|
"github.com/rancher/rke/hosts"
|
||||||
"github.com/rancher/rke/log"
|
"github.com/rancher/rke/log"
|
||||||
v3 "github.com/rancher/rke/types"
|
v3 "github.com/rancher/rke/types"
|
||||||
|
"github.com/rancher/rke/util"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -19,17 +20,17 @@ const (
|
|||||||
ConfigEnv = "FILE_DEPLOY"
|
ConfigEnv = "FILE_DEPLOY"
|
||||||
)
|
)
|
||||||
|
|
||||||
func deployFile(ctx context.Context, uniqueHosts []*hosts.Host, alpineImage string, prsMap map[string]v3.PrivateRegistry, fileName, fileContents string) error {
|
func deployFile(ctx context.Context, uniqueHosts []*hosts.Host, alpineImage string, prsMap map[string]v3.PrivateRegistry, fileName, fileContents, k8sVersion string) error {
|
||||||
for _, host := range uniqueHosts {
|
for _, host := range uniqueHosts {
|
||||||
log.Infof(ctx, "[%s] Deploying file [%s] to node [%s]", ServiceName, fileName, host.Address)
|
log.Infof(ctx, "[%s] Deploying file [%s] to node [%s]", ServiceName, fileName, host.Address)
|
||||||
if err := doDeployFile(ctx, host, fileName, fileContents, alpineImage, prsMap); err != nil {
|
if err := doDeployFile(ctx, host, fileName, fileContents, alpineImage, prsMap, k8sVersion); err != nil {
|
||||||
return fmt.Errorf("[%s] Failed to deploy file [%s] on node [%s]: %v", ServiceName, fileName, host.Address, err)
|
return fmt.Errorf("[%s] Failed to deploy file [%s] on node [%s]: %v", ServiceName, fileName, host.Address, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func doDeployFile(ctx context.Context, host *hosts.Host, fileName, fileContents, alpineImage string, prsMap map[string]v3.PrivateRegistry) error {
|
func doDeployFile(ctx context.Context, host *hosts.Host, fileName, fileContents, alpineImage string, prsMap map[string]v3.PrivateRegistry, k8sVersion string) error {
|
||||||
// remove existing container. Only way it's still here is if previous deployment failed
|
// remove existing container. Only way it's still here is if previous deployment failed
|
||||||
if err := docker.DoRemoveContainer(ctx, host.DClient, ContainerName, host.Address); err != nil {
|
if err := docker.DoRemoveContainer(ctx, host.DClient, ContainerName, host.Address); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -58,11 +59,30 @@ func doDeployFile(ctx context.Context, host *hosts.Host, fileName, fileContents,
|
|||||||
Cmd: cmd,
|
Cmd: cmd,
|
||||||
Env: containerEnv,
|
Env: containerEnv,
|
||||||
}
|
}
|
||||||
hostCfg := &container.HostConfig{
|
|
||||||
Binds: []string{
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
if err != nil {
|
||||||
},
|
return err
|
||||||
}
|
}
|
||||||
|
hostCfg := &container.HostConfig{}
|
||||||
|
// Rewrite SELinux labels (:z) is the default
|
||||||
|
binds := []string{
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
||||||
|
}
|
||||||
|
// Do not rewrite SELinux labels if k8s version is 1.22
|
||||||
|
if matchedRange {
|
||||||
|
binds = []string{
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
||||||
|
}
|
||||||
|
// If SELinux is enabled, configure SELinux label
|
||||||
|
if hosts.IsDockerSELinuxEnabled(host) {
|
||||||
|
// We configure the label because we do not rewrite SELinux labels anymore on volume mounts (no :z)
|
||||||
|
logrus.Debugf("Configuring security opt label [%s] for [%s] container on host [%s]", SELinuxLabel, ContainerName, host.Address)
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRunOnetimeContainer(ctx, host.DClient, imageCfg, hostCfg, ContainerName, host.Address, ServiceName, prsMap); err != nil {
|
if err := docker.DoRunOnetimeContainer(ctx, host.DClient, imageCfg, hostCfg, ContainerName, host.Address, ServiceName, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -234,7 +234,8 @@ func (c *Cluster) SetUpHosts(ctx context.Context, flags ExternalFlags) error {
|
|||||||
c.SystemImages.CertDownloader,
|
c.SystemImages.CertDownloader,
|
||||||
c.PrivateRegistriesMap,
|
c.PrivateRegistriesMap,
|
||||||
c.ForceDeployCerts,
|
c.ForceDeployCerts,
|
||||||
env); err != nil {
|
env,
|
||||||
|
c.Version); err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -250,14 +251,14 @@ func (c *Cluster) SetUpHosts(ctx context.Context, flags ExternalFlags) error {
|
|||||||
}
|
}
|
||||||
log.Infof(ctx, "[certificates] Successfully deployed kubernetes certificates to Cluster nodes")
|
log.Infof(ctx, "[certificates] Successfully deployed kubernetes certificates to Cluster nodes")
|
||||||
if c.CloudProvider.Name != "" {
|
if c.CloudProvider.Name != "" {
|
||||||
if err := deployFile(ctx, hostList, c.SystemImages.Alpine, c.PrivateRegistriesMap, cloudConfigFileName, c.CloudConfigFile); err != nil {
|
if err := deployFile(ctx, hostList, c.SystemImages.Alpine, c.PrivateRegistriesMap, cloudConfigFileName, c.CloudConfigFile, c.Version); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "[%s] Successfully deployed kubernetes cloud config to Cluster nodes", cloudConfigFileName)
|
log.Infof(ctx, "[%s] Successfully deployed kubernetes cloud config to Cluster nodes", cloudConfigFileName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Authentication.Webhook != nil {
|
if c.Authentication.Webhook != nil {
|
||||||
if err := deployFile(ctx, hostList, c.SystemImages.Alpine, c.PrivateRegistriesMap, authnWebhookFileName, c.Authentication.Webhook.ConfigFile); err != nil {
|
if err := deployFile(ctx, hostList, c.SystemImages.Alpine, c.PrivateRegistriesMap, authnWebhookFileName, c.Authentication.Webhook.ConfigFile, c.Version); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "[%s] Successfully deployed authentication webhook config Cluster nodes", authnWebhookFileName)
|
log.Infof(ctx, "[%s] Successfully deployed authentication webhook config Cluster nodes", authnWebhookFileName)
|
||||||
@ -279,7 +280,7 @@ func (c *Cluster) SetUpHosts(ctx context.Context, flags ExternalFlags) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := deployFile(ctx, controlPlaneHosts, c.SystemImages.Alpine, c.PrivateRegistriesMap, DefaultKubeAPIArgAdmissionControlConfigFileValue, string(bytes)); err != nil {
|
if err := deployFile(ctx, controlPlaneHosts, c.SystemImages.Alpine, c.PrivateRegistriesMap, DefaultKubeAPIArgAdmissionControlConfigFileValue, string(bytes), c.Version); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "[%s] Successfully deployed admission control config to Cluster control nodes", DefaultKubeAPIArgAdmissionControlConfigFileValue)
|
log.Infof(ctx, "[%s] Successfully deployed admission control config to Cluster control nodes", DefaultKubeAPIArgAdmissionControlConfigFileValue)
|
||||||
@ -293,7 +294,7 @@ func (c *Cluster) SetUpHosts(ctx context.Context, flags ExternalFlags) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := deployFile(ctx, controlPlaneHosts, c.SystemImages.Alpine, c.PrivateRegistriesMap, DefaultKubeAPIArgAuditPolicyFileValue, string(bytes)); err != nil {
|
if err := deployFile(ctx, controlPlaneHosts, c.SystemImages.Alpine, c.PrivateRegistriesMap, DefaultKubeAPIArgAuditPolicyFileValue, string(bytes), c.Version); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "[%s] Successfully deployed audit policy file to Cluster control nodes", DefaultKubeAPIArgAuditPolicyFileValue)
|
log.Infof(ctx, "[%s] Successfully deployed audit policy file to Cluster control nodes", DefaultKubeAPIArgAuditPolicyFileValue)
|
||||||
|
@ -266,11 +266,13 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, serviceOptions v3.Kubern
|
|||||||
VolumesFrom := []string{
|
VolumesFrom := []string{
|
||||||
services.SidekickContainerName,
|
services.SidekickContainerName,
|
||||||
}
|
}
|
||||||
|
|
||||||
Binds := []string{
|
Binds := []string{
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Services.KubeAPI.AuditLog != nil && c.Services.KubeAPI.AuditLog.Enabled {
|
if c.Services.KubeAPI.AuditLog != nil && c.Services.KubeAPI.AuditLog.Enabled {
|
||||||
Binds = append(Binds, fmt.Sprintf("%s:/var/log/kube-audit:z", path.Join(host.PrefixPath, "/var/log/kube-audit")))
|
Binds = append(Binds, fmt.Sprintf("%s:/var/log/kube-audit", path.Join(host.PrefixPath, "/var/log/kube-audit")))
|
||||||
bytes, err := yaml.Marshal(c.Services.KubeAPI.AuditLog.Configuration.Policy)
|
bytes, err := yaml.Marshal(c.Services.KubeAPI.AuditLog.Configuration.Policy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warnf("Error while marshalling auditlog policy: %v", err)
|
logrus.Warnf("Error while marshalling auditlog policy: %v", err)
|
||||||
@ -281,6 +283,15 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, serviceOptions v3.Kubern
|
|||||||
fmt.Sprintf("%s=%s", AuditLogConfigSumEnv, getStringChecksum(string(bytes))))
|
fmt.Sprintf("%s=%s", AuditLogConfigSumEnv, getStringChecksum(string(bytes))))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("Error while matching cluster version [%s] with range [%s]", c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
Binds = util.RemoveZFromBinds(Binds)
|
||||||
|
}
|
||||||
|
|
||||||
// Override args if they exist, add additional args
|
// Override args if they exist, add additional args
|
||||||
for arg, value := range c.Services.KubeAPI.ExtraArgs {
|
for arg, value := range c.Services.KubeAPI.ExtraArgs {
|
||||||
if _, ok := c.Services.KubeAPI.ExtraArgs[arg]; ok {
|
if _, ok := c.Services.KubeAPI.ExtraArgs[arg]; ok {
|
||||||
@ -358,10 +369,20 @@ func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, serviceOptions v3
|
|||||||
VolumesFrom := []string{
|
VolumesFrom := []string{
|
||||||
services.SidekickContainerName,
|
services.SidekickContainerName,
|
||||||
}
|
}
|
||||||
|
|
||||||
Binds := []string{
|
Binds := []string{
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("Error while matching cluster version [%s] with range [%s]", c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
Binds = util.RemoveZFromBinds(Binds)
|
||||||
|
}
|
||||||
|
|
||||||
for arg, value := range c.Services.KubeController.ExtraArgs {
|
for arg, value := range c.Services.KubeController.ExtraArgs {
|
||||||
if _, ok := c.Services.KubeController.ExtraArgs[arg]; ok {
|
if _, ok := c.Services.KubeController.ExtraArgs[arg]; ok {
|
||||||
CommandArgs[arg] = value
|
CommandArgs[arg] = value
|
||||||
@ -526,6 +547,16 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, serviceOptions v3.Kubern
|
|||||||
Binds = append(Binds, "/var/lib/kubelet/volumeplugins:/var/lib/kubelet/volumeplugins:shared,z")
|
Binds = append(Binds, "/var/lib/kubelet/volumeplugins:/var/lib/kubelet/volumeplugins:shared,z")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("Error while matching cluster version [%s] with range [%s]", c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
Binds = util.RemoveZFromBinds(Binds)
|
||||||
|
}
|
||||||
|
|
||||||
Binds = append(Binds, host.GetExtraBinds(kubelet.BaseService)...)
|
Binds = append(Binds, host.GetExtraBinds(kubelet.BaseService)...)
|
||||||
|
|
||||||
Env := host.GetExtraEnv(kubelet.BaseService)
|
Env := host.GetExtraEnv(kubelet.BaseService)
|
||||||
@ -645,6 +676,16 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, serviceOptions v3.Kube
|
|||||||
BindModules := "/lib/modules:/lib/modules:ro"
|
BindModules := "/lib/modules:/lib/modules:ro"
|
||||||
Binds = append(Binds, BindModules)
|
Binds = append(Binds, BindModules)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("Error while matching cluster version [%s] with range [%s]", c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
Binds = util.RemoveZFromBinds(Binds)
|
||||||
|
}
|
||||||
|
|
||||||
Binds = append(Binds, host.GetExtraBinds(kubeproxy.BaseService)...)
|
Binds = append(Binds, host.GetExtraBinds(kubeproxy.BaseService)...)
|
||||||
|
|
||||||
Env := host.GetExtraEnv(kubeproxy.BaseService)
|
Env := host.GetExtraEnv(kubeproxy.BaseService)
|
||||||
@ -770,6 +811,15 @@ func (c *Cluster) BuildSchedulerProcess(host *hosts.Host, serviceOptions v3.Kube
|
|||||||
Command = append(Command, cmd)
|
Command = append(Command, cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("Error while matching cluster version [%s] with range [%s]", c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
Binds = util.RemoveZFromBinds(Binds)
|
||||||
|
}
|
||||||
|
|
||||||
Binds = append(Binds, c.Services.Scheduler.ExtraBinds...)
|
Binds = append(Binds, c.Services.Scheduler.ExtraBinds...)
|
||||||
|
|
||||||
healthCheck := v3.HealthCheck{
|
healthCheck := v3.HealthCheck{
|
||||||
@ -930,6 +980,15 @@ func (c *Cluster) BuildEtcdProcess(host *hosts.Host, etcdHosts []*hosts.Host, se
|
|||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("Error while matching cluster version [%s] with range [%s]", c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
Binds = util.RemoveZFromBinds(Binds)
|
||||||
|
}
|
||||||
|
|
||||||
if serviceOptions.Etcd != nil {
|
if serviceOptions.Etcd != nil {
|
||||||
for k, v := range serviceOptions.Etcd {
|
for k, v := range serviceOptions.Etcd {
|
||||||
// if the value is empty, we remove that option
|
// if the value is empty, we remove that option
|
||||||
|
@ -147,21 +147,21 @@ func reconcileHost(ctx context.Context, toDeleteHost *hosts.Host, worker, etcd b
|
|||||||
if err := services.RemoveWorkerPlane(ctx, []*hosts.Host{toDeleteHost}, false); err != nil {
|
if err := services.RemoveWorkerPlane(ctx, []*hosts.Host{toDeleteHost}, false); err != nil {
|
||||||
return fmt.Errorf("Couldn't remove worker plane: %v", err)
|
return fmt.Errorf("Couldn't remove worker plane: %v", err)
|
||||||
}
|
}
|
||||||
if err := toDeleteHost.CleanUpWorkerHost(ctx, cluster.SystemImages.Alpine, cluster.PrivateRegistriesMap); err != nil {
|
if err := toDeleteHost.CleanUpWorkerHost(ctx, cluster.SystemImages.Alpine, cluster.PrivateRegistriesMap, cluster.Version); err != nil {
|
||||||
return fmt.Errorf("Not able to clean the host: %v", err)
|
return fmt.Errorf("Not able to clean the host: %v", err)
|
||||||
}
|
}
|
||||||
} else if etcd {
|
} else if etcd {
|
||||||
if err := services.RemoveEtcdPlane(ctx, []*hosts.Host{toDeleteHost}, false); err != nil {
|
if err := services.RemoveEtcdPlane(ctx, []*hosts.Host{toDeleteHost}, false); err != nil {
|
||||||
return fmt.Errorf("Couldn't remove etcd plane: %v", err)
|
return fmt.Errorf("Couldn't remove etcd plane: %v", err)
|
||||||
}
|
}
|
||||||
if err := toDeleteHost.CleanUpEtcdHost(ctx, cluster.SystemImages.Alpine, cluster.PrivateRegistriesMap); err != nil {
|
if err := toDeleteHost.CleanUpEtcdHost(ctx, cluster.SystemImages.Alpine, cluster.PrivateRegistriesMap, cluster.Version); err != nil {
|
||||||
return fmt.Errorf("Not able to clean the host: %v", err)
|
return fmt.Errorf("Not able to clean the host: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := services.RemoveControlPlane(ctx, []*hosts.Host{toDeleteHost}, false); err != nil {
|
if err := services.RemoveControlPlane(ctx, []*hosts.Host{toDeleteHost}, false); err != nil {
|
||||||
return fmt.Errorf("Couldn't remove control plane: %v", err)
|
return fmt.Errorf("Couldn't remove control plane: %v", err)
|
||||||
}
|
}
|
||||||
if err := toDeleteHost.CleanUpControlHost(ctx, cluster.SystemImages.Alpine, cluster.PrivateRegistriesMap); err != nil {
|
if err := toDeleteHost.CleanUpControlHost(ctx, cluster.SystemImages.Alpine, cluster.PrivateRegistriesMap, cluster.Version); err != nil {
|
||||||
return fmt.Errorf("Not able to clean the host: %v", err)
|
return fmt.Errorf("Not able to clean the host: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -227,7 +227,7 @@ func addEtcdMembers(ctx context.Context, currentCluster, kubeCluster *Cluster, k
|
|||||||
}
|
}
|
||||||
// this will start the newly added etcd node and make sure it started correctly before restarting other node
|
// this will start the newly added etcd node and make sure it started correctly before restarting other node
|
||||||
// https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#add-a-new-member
|
// https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#add-a-new-member
|
||||||
if err := services.ReloadEtcdCluster(ctx, kubeCluster.EtcdReadyHosts, etcdHost, currentCluster.LocalConnDialerFactory, clientCert, clientKey, currentCluster.PrivateRegistriesMap, etcdNodePlanMap, kubeCluster.SystemImages.Alpine); err != nil {
|
if err := services.ReloadEtcdCluster(ctx, kubeCluster.EtcdReadyHosts, etcdHost, currentCluster.LocalConnDialerFactory, clientCert, clientKey, currentCluster.PrivateRegistriesMap, etcdNodePlanMap, kubeCluster.SystemImages.Alpine, kubeCluster.Version); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ func (c *Cluster) ClusterRemove(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanUpHosts(ctx context.Context, cpHosts, workerHosts, etcdHosts []*hosts.Host, cleanerImage string, prsMap map[string]v3.PrivateRegistry, externalEtcd bool) error {
|
func cleanUpHosts(ctx context.Context, cpHosts, workerHosts, etcdHosts []*hosts.Host, cleanerImage string, prsMap map[string]v3.PrivateRegistry, externalEtcd bool, k8sVersion string) error {
|
||||||
|
|
||||||
uniqueHosts := hosts.GetUniqueHostList(cpHosts, workerHosts, etcdHosts)
|
uniqueHosts := hosts.GetUniqueHostList(cpHosts, workerHosts, etcdHosts)
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ func cleanUpHosts(ctx context.Context, cpHosts, workerHosts, etcdHosts []*hosts.
|
|||||||
var errList []error
|
var errList []error
|
||||||
for host := range hostsQueue {
|
for host := range hostsQueue {
|
||||||
runHost := host.(*hosts.Host)
|
runHost := host.(*hosts.Host)
|
||||||
if err := runHost.CleanUpAll(ctx, cleanerImage, prsMap, externalEtcd); err != nil {
|
if err := runHost.CleanUpAll(ctx, cleanerImage, prsMap, externalEtcd, k8sVersion); err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -65,7 +65,7 @@ func (c *Cluster) CleanupNodes(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Clean up all hosts
|
// Clean up all hosts
|
||||||
return cleanUpHosts(ctx, c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts, c.SystemImages.Alpine, c.PrivateRegistriesMap, externalEtcd)
|
return cleanUpHosts(ctx, c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts, c.SystemImages.Alpine, c.PrivateRegistriesMap, externalEtcd, c.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) CleanupFiles(ctx context.Context) error {
|
func (c *Cluster) CleanupFiles(ctx context.Context) error {
|
||||||
|
81
cluster/selinux.go
Normal file
81
cluster/selinux.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package cluster
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
|
|
||||||
|
"github.com/rancher/rke/docker"
|
||||||
|
"github.com/rancher/rke/hosts"
|
||||||
|
v3 "github.com/rancher/rke/types"
|
||||||
|
"github.com/rancher/rke/util"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
SELinuxCheckContainer = "rke-selinux-checker"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *Cluster) RunSELinuxCheck(ctx context.Context) error {
|
||||||
|
// We only need to check this on k8s 1.22 and higher
|
||||||
|
matchedRange, err := util.SemVerMatchRange(c.Version, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
var errgrp errgroup.Group
|
||||||
|
allHosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
|
||||||
|
hostsQueue := util.GetObjectQueue(allHosts)
|
||||||
|
for w := 0; w < WorkerThreads; w++ {
|
||||||
|
errgrp.Go(func() error {
|
||||||
|
var errList []error
|
||||||
|
for host := range hostsQueue {
|
||||||
|
if hosts.IsDockerSELinuxEnabled(host.(*hosts.Host)) {
|
||||||
|
err := checkSELinuxLabelOnHost(ctx, host.(*hosts.Host), c.SystemImages.Alpine, c.PrivateRegistriesMap)
|
||||||
|
if err != nil {
|
||||||
|
errList = append(errList, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return util.ErrList(errList)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := errgrp.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkSELinuxLabelOnHost(ctx context.Context, host *hosts.Host, image string, prsMap map[string]v3.PrivateRegistry) error {
|
||||||
|
var err error
|
||||||
|
imageCfg := &container.Config{
|
||||||
|
Image: image,
|
||||||
|
}
|
||||||
|
hostCfg := &container.HostConfig{
|
||||||
|
SecurityOpt: []string{SELinuxLabel},
|
||||||
|
}
|
||||||
|
for retries := 0; retries < 3; retries++ {
|
||||||
|
logrus.Infof("[selinux] Checking if host [%s] recognizes SELinux label [%s], try #%d", host.Address, SELinuxLabel, retries+1)
|
||||||
|
if err = docker.DoRemoveContainer(ctx, host.DClient, SELinuxCheckContainer, host.Address); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = docker.DoRunOnetimeContainer(ctx, host.DClient, imageCfg, hostCfg, SELinuxCheckContainer, host.Address, "selinux", prsMap); err != nil {
|
||||||
|
// If we hit the error that indicates that the rancher-selinux RPM package is not installed (SELinux label is not recognized), we immediately return
|
||||||
|
// Else we keep trying as there might be an error with Docker (slow system for example)
|
||||||
|
if strings.Contains(err.Error(), "invalid argument") {
|
||||||
|
return fmt.Errorf("[selinux] Host [%s] does not recognize SELinux label [%s]. This is required for Kubernetes version [%s]. Please install rancher-selinux RPM package and try again", host.Address, SELinuxLabel, util.SemVerK8sVersion122OrHigher)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("[selinux] Host [%s] was not able to correctly perform SELinux label check: [%v]", host.Address, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -72,7 +72,7 @@ func (c *Cluster) GetClusterState(ctx context.Context, fullState *FullState) (*C
|
|||||||
func (c *Cluster) GetStateFileFromConfigMap(ctx context.Context) (string, error) {
|
func (c *Cluster) GetStateFileFromConfigMap(ctx context.Context) (string, error) {
|
||||||
kubeletImage := c.Services.Kubelet.Image
|
kubeletImage := c.Services.Kubelet.Image
|
||||||
for _, host := range c.ControlPlaneHosts {
|
for _, host := range c.ControlPlaneHosts {
|
||||||
stateFile, err := services.RunGetStateFileFromConfigMap(ctx, host, c.PrivateRegistriesMap, kubeletImage)
|
stateFile, err := services.RunGetStateFileFromConfigMap(ctx, host, c.PrivateRegistriesMap, kubeletImage, c.Version)
|
||||||
if err != nil || stateFile == "" {
|
if err != nil || stateFile == "" {
|
||||||
logrus.Infof("Could not get ConfigMap with cluster state from host [%s]", host.Address)
|
logrus.Infof("Could not get ConfigMap with cluster state from host [%s]", host.Address)
|
||||||
continue
|
continue
|
||||||
@ -300,7 +300,7 @@ func GetStateFromNodes(ctx context.Context, kubeCluster *Cluster) *Cluster {
|
|||||||
uniqueHosts := hosts.GetUniqueHostList(kubeCluster.EtcdHosts, kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts)
|
uniqueHosts := hosts.GetUniqueHostList(kubeCluster.EtcdHosts, kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts)
|
||||||
for _, host := range uniqueHosts {
|
for _, host := range uniqueHosts {
|
||||||
filePath := path.Join(pki.TempCertPath, pki.ClusterStateFile)
|
filePath := path.Join(pki.TempCertPath, pki.ClusterStateFile)
|
||||||
clusterFile, err = pki.FetchFileFromHost(ctx, filePath, kubeCluster.SystemImages.Alpine, host, kubeCluster.PrivateRegistriesMap, pki.StateDeployerContainerName, "state")
|
clusterFile, err = pki.FetchFileFromHost(ctx, filePath, kubeCluster.SystemImages.Alpine, host, kubeCluster.PrivateRegistriesMap, pki.StateDeployerContainerName, "state", kubeCluster.Version)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -126,6 +126,10 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = kubeCluster.RunSELinuxCheck(ctx); err != nil {
|
||||||
|
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
err = cluster.SetUpAuthentication(ctx, kubeCluster, currentCluster, clusterState)
|
err = cluster.SetUpAuthentication(ctx, kubeCluster, currentCluster, clusterState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||||
|
@ -421,7 +421,7 @@ func StartContainer(ctx context.Context, dClient *client.Client, hostname string
|
|||||||
logrus.Infof("Starting container [%s] on host [%s], try #%d", containerName, hostname, i)
|
logrus.Infof("Starting container [%s] on host [%s], try #%d", containerName, hostname, i)
|
||||||
err = dClient.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
|
err = dClient.ContainerStart(ctx, containerName, types.ContainerStartOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "bind: address already in use") {
|
if strings.Contains(err.Error(), "bind: address already in use") || strings.Contains(err.Error(), "invalid argument") {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logrus.Warningf("Can't start Docker container [%s] on host [%s]: %v", containerName, hostname, err)
|
logrus.Warningf("Can't start Docker container [%s] on host [%s]: %v", containerName, hostname, err)
|
||||||
|
@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/rancher/rke/k8s"
|
"github.com/rancher/rke/k8s"
|
||||||
"github.com/rancher/rke/log"
|
"github.com/rancher/rke/log"
|
||||||
v3 "github.com/rancher/rke/types"
|
v3 "github.com/rancher/rke/types"
|
||||||
|
"github.com/rancher/rke/util"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
)
|
)
|
||||||
@ -51,6 +52,7 @@ const (
|
|||||||
CleanerContainerName = "kube-cleaner"
|
CleanerContainerName = "kube-cleaner"
|
||||||
LogCleanerContainerName = "rke-log-cleaner"
|
LogCleanerContainerName = "rke-log-cleaner"
|
||||||
RKELogsPath = "/var/lib/rancher/rke/log"
|
RKELogsPath = "/var/lib/rancher/rke/log"
|
||||||
|
SELinuxLabel = "label=type:rke_container_t"
|
||||||
|
|
||||||
B2DOS = "Boot2Docker"
|
B2DOS = "Boot2Docker"
|
||||||
B2DPrefixPath = "/mnt/sda1/rke"
|
B2DPrefixPath = "/mnt/sda1/rke"
|
||||||
@ -64,7 +66,7 @@ const (
|
|||||||
WindowsPrefixPath = "c:/"
|
WindowsPrefixPath = "c:/"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *Host) CleanUpAll(ctx context.Context, cleanerImage string, prsMap map[string]v3.PrivateRegistry, externalEtcd bool) error {
|
func (h *Host) CleanUpAll(ctx context.Context, cleanerImage string, prsMap map[string]v3.PrivateRegistry, externalEtcd bool, k8sVersion string) error {
|
||||||
log.Infof(ctx, "[hosts] Cleaning up host [%s]", h.Address)
|
log.Infof(ctx, "[hosts] Cleaning up host [%s]", h.Address)
|
||||||
toCleanPaths := []string{
|
toCleanPaths := []string{
|
||||||
path.Join(h.PrefixPath, ToCleanSSLDir),
|
path.Join(h.PrefixPath, ToCleanSSLDir),
|
||||||
@ -78,10 +80,10 @@ func (h *Host) CleanUpAll(ctx context.Context, cleanerImage string, prsMap map[s
|
|||||||
if !externalEtcd {
|
if !externalEtcd {
|
||||||
toCleanPaths = append(toCleanPaths, path.Join(h.PrefixPath, ToCleanEtcdDir))
|
toCleanPaths = append(toCleanPaths, path.Join(h.PrefixPath, ToCleanEtcdDir))
|
||||||
}
|
}
|
||||||
return h.CleanUp(ctx, toCleanPaths, cleanerImage, prsMap)
|
return h.CleanUp(ctx, toCleanPaths, cleanerImage, prsMap, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Host) CleanUpWorkerHost(ctx context.Context, cleanerImage string, prsMap map[string]v3.PrivateRegistry) error {
|
func (h *Host) CleanUpWorkerHost(ctx context.Context, cleanerImage string, prsMap map[string]v3.PrivateRegistry, k8sVersion string) error {
|
||||||
if h.IsControl || h.IsEtcd {
|
if h.IsControl || h.IsEtcd {
|
||||||
log.Infof(ctx, "[hosts] Host [%s] is already a controlplane or etcd host, skipping cleanup.", h.Address)
|
log.Infof(ctx, "[hosts] Host [%s] is already a controlplane or etcd host, skipping cleanup.", h.Address)
|
||||||
return nil
|
return nil
|
||||||
@ -93,10 +95,10 @@ func (h *Host) CleanUpWorkerHost(ctx context.Context, cleanerImage string, prsMa
|
|||||||
ToCleanCalicoRun,
|
ToCleanCalicoRun,
|
||||||
path.Join(h.PrefixPath, ToCleanCNILib),
|
path.Join(h.PrefixPath, ToCleanCNILib),
|
||||||
}
|
}
|
||||||
return h.CleanUp(ctx, toCleanPaths, cleanerImage, prsMap)
|
return h.CleanUp(ctx, toCleanPaths, cleanerImage, prsMap, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Host) CleanUpControlHost(ctx context.Context, cleanerImage string, prsMap map[string]v3.PrivateRegistry) error {
|
func (h *Host) CleanUpControlHost(ctx context.Context, cleanerImage string, prsMap map[string]v3.PrivateRegistry, k8sVersion string) error {
|
||||||
if h.IsWorker || h.IsEtcd {
|
if h.IsWorker || h.IsEtcd {
|
||||||
log.Infof(ctx, "[hosts] Host [%s] is already a worker or etcd host, skipping cleanup.", h.Address)
|
log.Infof(ctx, "[hosts] Host [%s] is already a worker or etcd host, skipping cleanup.", h.Address)
|
||||||
return nil
|
return nil
|
||||||
@ -108,10 +110,10 @@ func (h *Host) CleanUpControlHost(ctx context.Context, cleanerImage string, prsM
|
|||||||
ToCleanCalicoRun,
|
ToCleanCalicoRun,
|
||||||
path.Join(h.PrefixPath, ToCleanCNILib),
|
path.Join(h.PrefixPath, ToCleanCNILib),
|
||||||
}
|
}
|
||||||
return h.CleanUp(ctx, toCleanPaths, cleanerImage, prsMap)
|
return h.CleanUp(ctx, toCleanPaths, cleanerImage, prsMap, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Host) CleanUpEtcdHost(ctx context.Context, cleanerImage string, prsMap map[string]v3.PrivateRegistry) error {
|
func (h *Host) CleanUpEtcdHost(ctx context.Context, cleanerImage string, prsMap map[string]v3.PrivateRegistry, k8sVersion string) error {
|
||||||
toCleanPaths := []string{
|
toCleanPaths := []string{
|
||||||
path.Join(h.PrefixPath, ToCleanEtcdDir),
|
path.Join(h.PrefixPath, ToCleanEtcdDir),
|
||||||
path.Join(h.PrefixPath, ToCleanSSLDir),
|
path.Join(h.PrefixPath, ToCleanSSLDir),
|
||||||
@ -122,12 +124,12 @@ func (h *Host) CleanUpEtcdHost(ctx context.Context, cleanerImage string, prsMap
|
|||||||
path.Join(h.PrefixPath, ToCleanEtcdDir),
|
path.Join(h.PrefixPath, ToCleanEtcdDir),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return h.CleanUp(ctx, toCleanPaths, cleanerImage, prsMap)
|
return h.CleanUp(ctx, toCleanPaths, cleanerImage, prsMap, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Host) CleanUp(ctx context.Context, toCleanPaths []string, cleanerImage string, prsMap map[string]v3.PrivateRegistry) error {
|
func (h *Host) CleanUp(ctx context.Context, toCleanPaths []string, cleanerImage string, prsMap map[string]v3.PrivateRegistry, k8sVersion string) error {
|
||||||
log.Infof(ctx, "[hosts] Cleaning up host [%s]", h.Address)
|
log.Infof(ctx, "[hosts] Cleaning up host [%s]", h.Address)
|
||||||
imageCfg, hostCfg := buildCleanerConfig(h, toCleanPaths, cleanerImage)
|
imageCfg, hostCfg := buildCleanerConfig(h, toCleanPaths, cleanerImage, k8sVersion)
|
||||||
log.Infof(ctx, "[hosts] Running cleaner container on host [%s]", h.Address)
|
log.Infof(ctx, "[hosts] Running cleaner container on host [%s]", h.Address)
|
||||||
if err := docker.DoRunContainer(ctx, h.DClient, imageCfg, hostCfg, CleanerContainerName, h.Address, CleanerContainerName, prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, h.DClient, imageCfg, hostCfg, CleanerContainerName, h.Address, CleanerContainerName, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -294,7 +296,7 @@ func IsHostListChanged(currentHosts, configHosts []*Host) bool {
|
|||||||
return changed
|
return changed
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildCleanerConfig(host *Host, toCleanDirs []string, cleanerImage string) (*container.Config, *container.HostConfig) {
|
func buildCleanerConfig(host *Host, toCleanDirs []string, cleanerImage, k8sVersion string) (*container.Config, *container.HostConfig) {
|
||||||
cmd := []string{
|
cmd := []string{
|
||||||
"sh",
|
"sh",
|
||||||
"-c",
|
"-c",
|
||||||
@ -304,13 +306,26 @@ func buildCleanerConfig(host *Host, toCleanDirs []string, cleanerImage string) (
|
|||||||
Image: cleanerImage,
|
Image: cleanerImage,
|
||||||
Cmd: cmd,
|
Cmd: cmd,
|
||||||
}
|
}
|
||||||
bindMounts := []string{}
|
binds := []string{}
|
||||||
for _, vol := range toCleanDirs {
|
for _, vol := range toCleanDirs {
|
||||||
bindMounts = append(bindMounts, fmt.Sprintf("%s:%s:z", vol, vol))
|
binds = append(binds, fmt.Sprintf("%s:%s:z", vol, vol))
|
||||||
}
|
}
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{}
|
||||||
Binds: bindMounts,
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
|
||||||
|
if IsDockerSELinuxEnabled(host) {
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hostCfg.Binds = binds
|
||||||
return imageCfg, hostCfg
|
return imageCfg, hostCfg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/rancher/rke/log"
|
"github.com/rancher/rke/log"
|
||||||
"github.com/rancher/rke/pki/cert"
|
"github.com/rancher/rke/pki/cert"
|
||||||
v3 "github.com/rancher/rke/types"
|
v3 "github.com/rancher/rke/types"
|
||||||
|
"github.com/rancher/rke/util"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -32,7 +33,8 @@ func DeployCertificatesOnPlaneHost(
|
|||||||
certDownloaderImage string,
|
certDownloaderImage string,
|
||||||
prsMap map[string]v3.PrivateRegistry,
|
prsMap map[string]v3.PrivateRegistry,
|
||||||
forceDeploy bool,
|
forceDeploy bool,
|
||||||
env []string) error {
|
env []string,
|
||||||
|
k8sVersion string) error {
|
||||||
crtBundle := GenerateRKENodeCerts(ctx, rkeConfig, host.Address, crtMap)
|
crtBundle := GenerateRKENodeCerts(ctx, rkeConfig, host.Address, crtMap)
|
||||||
|
|
||||||
// Strip CA key as its sensitive and unneeded on nodes without controlplane role
|
// Strip CA key as its sensitive and unneeded on nodes without controlplane role
|
||||||
@ -58,10 +60,10 @@ func DeployCertificatesOnPlaneHost(
|
|||||||
fmt.Sprintf("ETCD_GID=%d", rkeConfig.Services.Etcd.GID)}...)
|
fmt.Sprintf("ETCD_GID=%d", rkeConfig.Services.Etcd.GID)}...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return doRunDeployer(ctx, host, env, certDownloaderImage, prsMap)
|
return doRunDeployer(ctx, host, env, certDownloaderImage, prsMap, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DeployStateOnPlaneHost(ctx context.Context, host *hosts.Host, stateDownloaderImage string, prsMap map[string]v3.PrivateRegistry, stateFilePath, snapshotName string) error {
|
func DeployStateOnPlaneHost(ctx context.Context, host *hosts.Host, stateDownloaderImage string, prsMap map[string]v3.PrivateRegistry, stateFilePath, snapshotName, k8sVersion string) error {
|
||||||
// remove existing container. Only way it's still here is if previous deployment failed
|
// remove existing container. Only way it's still here is if previous deployment failed
|
||||||
if err := docker.DoRemoveContainer(ctx, host.DClient, StateDeployerContainerName, host.Address); err != nil {
|
if err := docker.DoRemoveContainer(ctx, host.DClient, StateDeployerContainerName, host.Address); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -84,10 +86,21 @@ func DeployStateOnPlaneHost(ctx context.Context, host *hosts.Host, stateDownload
|
|||||||
fmt.Sprintf("for i in $(seq 1 12); do if [ -f \"%[1]s\" ]; then echo \"File [%[1]s] present in this container\"; echo \"Moving [%[1]s] to [%[2]s]\"; mv %[1]s %[2]s; echo \"State file successfully moved to [%[2]s]\"; echo \"Changing permissions to 0400\"; chmod 400 %[2]s; break; else echo \"Waiting for file [%[1]s] to be successfully copied to this container, retry count $i\"; sleep 5; fi; done", SourceClusterStateFilePath, DestinationClusterStateFilePath),
|
fmt.Sprintf("for i in $(seq 1 12); do if [ -f \"%[1]s\" ]; then echo \"File [%[1]s] present in this container\"; echo \"Moving [%[1]s] to [%[2]s]\"; mv %[1]s %[2]s; echo \"State file successfully moved to [%[2]s]\"; echo \"Changing permissions to 0400\"; chmod 400 %[2]s; break; else echo \"Waiting for file [%[1]s] to be successfully copied to this container, retry count $i\"; sleep 5; fi; done", SourceClusterStateFilePath, DestinationClusterStateFilePath),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
Binds := []string{
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
||||||
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
Binds = util.RemoveZFromBinds(Binds)
|
||||||
|
}
|
||||||
|
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
Binds: Binds,
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
|
||||||
},
|
|
||||||
Privileged: true,
|
Privileged: true,
|
||||||
}
|
}
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, StateDeployerContainerName, host.Address, "state", prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, StateDeployerContainerName, host.Address, "state", prsMap); err != nil {
|
||||||
@ -110,7 +123,7 @@ func DeployStateOnPlaneHost(ctx context.Context, host *hosts.Host, stateDownload
|
|||||||
return docker.DoRemoveContainer(ctx, host.DClient, StateDeployerContainerName, host.Address)
|
return docker.DoRemoveContainer(ctx, host.DClient, StateDeployerContainerName, host.Address)
|
||||||
}
|
}
|
||||||
|
|
||||||
func doRunDeployer(ctx context.Context, host *hosts.Host, containerEnv []string, certDownloaderImage string, prsMap map[string]v3.PrivateRegistry) error {
|
func doRunDeployer(ctx context.Context, host *hosts.Host, containerEnv []string, certDownloaderImage string, prsMap map[string]v3.PrivateRegistry, k8sVersion string) error {
|
||||||
// remove existing container. Only way it's still here is if previous deployment failed
|
// remove existing container. Only way it's still here is if previous deployment failed
|
||||||
isRunning := false
|
isRunning := false
|
||||||
isRunning, err := docker.IsContainerRunning(ctx, host.DClient, host.Address, CrtDownloaderContainer, true)
|
isRunning, err := docker.IsContainerRunning(ctx, host.DClient, host.Address, CrtDownloaderContainer, true)
|
||||||
@ -140,10 +153,22 @@ func doRunDeployer(ctx context.Context, host *hosts.Host, containerEnv []string,
|
|||||||
Env: containerEnv,
|
Env: containerEnv,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Binds := []string{
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
||||||
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
Binds = util.RemoveZFromBinds(Binds)
|
||||||
|
}
|
||||||
|
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
Binds: Binds,
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
|
||||||
},
|
|
||||||
Privileged: true,
|
Privileged: true,
|
||||||
}
|
}
|
||||||
if host.IsWindows() { // compatible with Windows
|
if host.IsWindows() { // compatible with Windows
|
||||||
@ -202,17 +227,7 @@ func RemoveAdminConfig(ctx context.Context, localConfigPath string) {
|
|||||||
log.Infof(ctx, "Local admin Kubeconfig removed successfully")
|
log.Infof(ctx, "Local admin Kubeconfig removed successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
func DeployCertificatesOnHost(ctx context.Context, host *hosts.Host, crtMap map[string]CertificatePKI, certDownloaderImage, certPath string, prsMap map[string]v3.PrivateRegistry) error {
|
func FetchCertificatesFromHost(ctx context.Context, extraHosts []*hosts.Host, host *hosts.Host, image, localConfigPath string, prsMap map[string]v3.PrivateRegistry, k8sVersion string) (map[string]CertificatePKI, error) {
|
||||||
env := []string{
|
|
||||||
"CRTS_DEPLOY_PATH=" + certPath,
|
|
||||||
}
|
|
||||||
for _, crt := range crtMap {
|
|
||||||
env = append(env, crt.ToEnv()...)
|
|
||||||
}
|
|
||||||
return doRunDeployer(ctx, host, env, certDownloaderImage, prsMap)
|
|
||||||
}
|
|
||||||
|
|
||||||
func FetchCertificatesFromHost(ctx context.Context, extraHosts []*hosts.Host, host *hosts.Host, image, localConfigPath string, prsMap map[string]v3.PrivateRegistry) (map[string]CertificatePKI, error) {
|
|
||||||
// rebuilding the certificates. This should look better after refactoring pki
|
// rebuilding the certificates. This should look better after refactoring pki
|
||||||
tmpCerts := make(map[string]CertificatePKI)
|
tmpCerts := make(map[string]CertificatePKI)
|
||||||
|
|
||||||
@ -235,7 +250,7 @@ func FetchCertificatesFromHost(ctx context.Context, extraHosts []*hosts.Host, ho
|
|||||||
|
|
||||||
for certName, config := range crtList {
|
for certName, config := range crtList {
|
||||||
certificate := CertificatePKI{}
|
certificate := CertificatePKI{}
|
||||||
crt, err := FetchFileFromHost(ctx, GetCertTempPath(certName), image, host, prsMap, CertFetcherContainer, "certificates")
|
crt, err := FetchFileFromHost(ctx, GetCertTempPath(certName), image, host, prsMap, CertFetcherContainer, "certificates", k8sVersion)
|
||||||
// Return error if the certificate file is not found but only if its not etcd or request header certificate
|
// Return error if the certificate file is not found but only if its not etcd or request header certificate
|
||||||
if err != nil && !strings.HasPrefix(certName, "kube-etcd") &&
|
if err != nil && !strings.HasPrefix(certName, "kube-etcd") &&
|
||||||
certName != RequestHeaderCACertName &&
|
certName != RequestHeaderCACertName &&
|
||||||
@ -256,7 +271,7 @@ func FetchCertificatesFromHost(ctx context.Context, extraHosts []*hosts.Host, ho
|
|||||||
tmpCerts[certName] = CertificatePKI{}
|
tmpCerts[certName] = CertificatePKI{}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
key, err := FetchFileFromHost(ctx, GetKeyTempPath(certName), image, host, prsMap, CertFetcherContainer, "certificate")
|
key, err := FetchFileFromHost(ctx, GetKeyTempPath(certName), image, host, prsMap, CertFetcherContainer, "certificate", k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isFileNotFoundErr(err) {
|
if isFileNotFoundErr(err) {
|
||||||
return nil, fmt.Errorf("Key %s is not found", GetKeyTempPath(certName))
|
return nil, fmt.Errorf("Key %s is not found", GetKeyTempPath(certName))
|
||||||
@ -264,7 +279,7 @@ func FetchCertificatesFromHost(ctx context.Context, extraHosts []*hosts.Host, ho
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if config {
|
if config {
|
||||||
config, err := FetchFileFromHost(ctx, GetConfigTempPath(certName), image, host, prsMap, CertFetcherContainer, "certificate")
|
config, err := FetchFileFromHost(ctx, GetConfigTempPath(certName), image, host, prsMap, CertFetcherContainer, "certificate", k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isFileNotFoundErr(err) {
|
if isFileNotFoundErr(err) {
|
||||||
return nil, fmt.Errorf("Config %s is not found", GetConfigTempPath(certName))
|
return nil, fmt.Errorf("Config %s is not found", GetConfigTempPath(certName))
|
||||||
@ -294,14 +309,26 @@ func FetchCertificatesFromHost(ctx context.Context, extraHosts []*hosts.Host, ho
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func FetchFileFromHost(ctx context.Context, filePath, image string, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, containerName, state string) (string, error) {
|
func FetchFileFromHost(ctx context.Context, filePath, image string, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, containerName, state, k8sVersion string) (string, error) {
|
||||||
imageCfg := &container.Config{
|
imageCfg := &container.Config{
|
||||||
Image: image,
|
Image: image,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Binds := []string{
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
||||||
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
Binds = util.RemoveZFromBinds(Binds)
|
||||||
|
}
|
||||||
|
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
Binds: Binds,
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
|
||||||
},
|
|
||||||
Privileged: true,
|
Privileged: true,
|
||||||
}
|
}
|
||||||
isRunning, err := docker.IsContainerRunning(ctx, host.DClient, host.Address, containerName, true)
|
isRunning, err := docker.IsContainerRunning(ctx, host.DClient, host.Address, containerName, true)
|
||||||
|
72
pki/pki.go
72
pki/pki.go
@ -7,14 +7,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/rancher/rke/docker"
|
"github.com/rancher/rke/docker"
|
||||||
"github.com/rancher/rke/hosts"
|
"github.com/rancher/rke/hosts"
|
||||||
"github.com/rancher/rke/log"
|
"github.com/rancher/rke/log"
|
||||||
v3 "github.com/rancher/rke/types"
|
v3 "github.com/rancher/rke/types"
|
||||||
|
"github.com/rancher/rke/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CertificatePKI struct {
|
type CertificatePKI struct {
|
||||||
@ -100,7 +99,7 @@ func RegenerateEtcdCertificate(
|
|||||||
return crtMap, nil
|
return crtMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SaveBackupBundleOnHost(ctx context.Context, host *hosts.Host, alpineSystemImage, etcdSnapshotPath string, prsMap map[string]v3.PrivateRegistry) error {
|
func SaveBackupBundleOnHost(ctx context.Context, host *hosts.Host, alpineSystemImage, etcdSnapshotPath string, prsMap map[string]v3.PrivateRegistry, k8sVersion string) error {
|
||||||
imageCfg := &container.Config{
|
imageCfg := &container.Config{
|
||||||
Cmd: []string{
|
Cmd: []string{
|
||||||
"sh",
|
"sh",
|
||||||
@ -110,13 +109,25 @@ func SaveBackupBundleOnHost(ctx context.Context, host *hosts.Host, alpineSystemI
|
|||||||
Image: alpineSystemImage,
|
Image: alpineSystemImage,
|
||||||
}
|
}
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
|
|
||||||
Binds: []string{
|
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
|
||||||
fmt.Sprintf("%s:/backup:z", etcdSnapshotPath),
|
|
||||||
},
|
|
||||||
Privileged: true,
|
Privileged: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binds := []string{
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
||||||
|
fmt.Sprintf("%s:/backup:z", etcdSnapshotPath),
|
||||||
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
}
|
||||||
|
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, BundleCertContainer, host.Address, "certificates", prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, BundleCertContainer, host.Address, "certificates", prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -130,48 +141,3 @@ func SaveBackupBundleOnHost(ctx context.Context, host *hosts.Host, alpineSystemI
|
|||||||
log.Infof(ctx, "[certificates] successfully saved certificate bundle [%s/pki.bundle.tar.gz] on host [%s]", etcdSnapshotPath, host.Address)
|
log.Infof(ctx, "[certificates] successfully saved certificate bundle [%s/pki.bundle.tar.gz] on host [%s]", etcdSnapshotPath, host.Address)
|
||||||
return docker.RemoveContainer(ctx, host.DClient, host.Address, BundleCertContainer)
|
return docker.RemoveContainer(ctx, host.DClient, host.Address, BundleCertContainer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExtractBackupBundleOnHost(ctx context.Context, host *hosts.Host, alpineSystemImage, etcdSnapshotPath string, prsMap map[string]v3.PrivateRegistry) error {
|
|
||||||
imageCfg := &container.Config{
|
|
||||||
Cmd: []string{
|
|
||||||
"sh",
|
|
||||||
"-c",
|
|
||||||
fmt.Sprintf(
|
|
||||||
"mkdir -p %s; tar xzvf %s -C %s --strip-components %d --exclude %s",
|
|
||||||
TempCertPath,
|
|
||||||
BundleCertPath,
|
|
||||||
TempCertPath,
|
|
||||||
len(strings.Split(filepath.Clean(TempCertPath), "/"))-1,
|
|
||||||
ClusterStateFile),
|
|
||||||
},
|
|
||||||
Image: alpineSystemImage,
|
|
||||||
}
|
|
||||||
hostCfg := &container.HostConfig{
|
|
||||||
|
|
||||||
Binds: []string{
|
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(host.PrefixPath, "/etc/kubernetes")),
|
|
||||||
fmt.Sprintf("%s:/backup:z", etcdSnapshotPath),
|
|
||||||
},
|
|
||||||
Privileged: true,
|
|
||||||
}
|
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, BundleCertContainer, host.Address, "certificates", prsMap); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
status, err := docker.WaitForContainer(ctx, host.DClient, host.Address, BundleCertContainer)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if status != 0 {
|
|
||||||
containerErrLog, _, err := docker.GetContainerLogsStdoutStderr(ctx, host.DClient, BundleCertContainer, "5", false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// removing the container in case of an error too
|
|
||||||
if err := docker.RemoveContainer(ctx, host.DClient, host.Address, BundleCertContainer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Failed to run certificate bundle extract, exit status is: %d, container logs: %s", status, containerErrLog)
|
|
||||||
}
|
|
||||||
log.Infof(ctx, "[certificates] successfully extracted certificate bundle on host [%s] to backup path [%s]", host.Address, TempCertPath)
|
|
||||||
return docker.RemoveContainer(ctx, host.DClient, host.Address, BundleCertContainer)
|
|
||||||
}
|
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"k8s.io/kubectl/pkg/drain"
|
"k8s.io/kubectl/pkg/drain"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RunControlPlane(ctx context.Context, controlHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool, alpineImage string, certMap map[string]pki.CertificatePKI) error {
|
func RunControlPlane(ctx context.Context, controlHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool, alpineImage string, certMap map[string]pki.CertificatePKI, k8sVersion string) error {
|
||||||
if updateWorkersOnly {
|
if updateWorkersOnly {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -35,7 +35,7 @@ func RunControlPlane(ctx context.Context, controlHosts []*hosts.Host, localConnD
|
|||||||
var errList []error
|
var errList []error
|
||||||
for host := range hostsQueue {
|
for host := range hostsQueue {
|
||||||
runHost := host.(*hosts.Host)
|
runHost := host.(*hosts.Host)
|
||||||
err := doDeployControlHost(ctx, runHost, localConnDialerFactory, prsMap, cpNodePlanMap[runHost.Address].Processes, alpineImage, certMap)
|
err := doDeployControlHost(ctx, runHost, localConnDialerFactory, prsMap, cpNodePlanMap[runHost.Address].Processes, alpineImage, certMap, k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
}
|
}
|
||||||
@ -52,7 +52,7 @@ func RunControlPlane(ctx context.Context, controlHosts []*hosts.Host, localConnD
|
|||||||
|
|
||||||
func UpgradeControlPlaneNodes(ctx context.Context, kubeClient *kubernetes.Clientset, controlHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory,
|
func UpgradeControlPlaneNodes(ctx context.Context, kubeClient *kubernetes.Clientset, controlHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory,
|
||||||
prsMap map[string]v3.PrivateRegistry, cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool, alpineImage string, certMap map[string]pki.CertificatePKI,
|
prsMap map[string]v3.PrivateRegistry, cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool, alpineImage string, certMap map[string]pki.CertificatePKI,
|
||||||
upgradeStrategy *v3.NodeUpgradeStrategy, newHosts, inactiveHosts map[string]bool, maxUnavailable int) (string, error) {
|
upgradeStrategy *v3.NodeUpgradeStrategy, newHosts, inactiveHosts map[string]bool, maxUnavailable int, k8sVersion string) (string, error) {
|
||||||
if updateWorkersOnly {
|
if updateWorkersOnly {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
@ -83,7 +83,7 @@ func UpgradeControlPlaneNodes(ctx context.Context, kubeClient *kubernetes.Client
|
|||||||
inactiveHostErr = fmt.Errorf("provisioning incomplete, host(s) [%s] skipped because they could not be contacted", strings.Join(inactiveHostNames, ","))
|
inactiveHostErr = fmt.Errorf("provisioning incomplete, host(s) [%s] skipped because they could not be contacted", strings.Join(inactiveHostNames, ","))
|
||||||
}
|
}
|
||||||
hostsFailedToUpgrade, err := processControlPlaneForUpgrade(ctx, kubeClient, controlHosts, localConnDialerFactory, prsMap, cpNodePlanMap, updateWorkersOnly, alpineImage, certMap,
|
hostsFailedToUpgrade, err := processControlPlaneForUpgrade(ctx, kubeClient, controlHosts, localConnDialerFactory, prsMap, cpNodePlanMap, updateWorkersOnly, alpineImage, certMap,
|
||||||
upgradeStrategy, newHosts, inactiveHosts, maxUnavailable, drainHelper)
|
upgradeStrategy, newHosts, inactiveHosts, maxUnavailable, drainHelper, k8sVersion)
|
||||||
if err != nil || inactiveHostErr != nil {
|
if err != nil || inactiveHostErr != nil {
|
||||||
if len(hostsFailedToUpgrade) > 0 {
|
if len(hostsFailedToUpgrade) > 0 {
|
||||||
logrus.Errorf("Failed to upgrade hosts: %v with error %v", strings.Join(hostsFailedToUpgrade, ","), err)
|
logrus.Errorf("Failed to upgrade hosts: %v with error %v", strings.Join(hostsFailedToUpgrade, ","), err)
|
||||||
@ -103,7 +103,7 @@ func UpgradeControlPlaneNodes(ctx context.Context, kubeClient *kubernetes.Client
|
|||||||
|
|
||||||
func processControlPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Clientset, controlHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory,
|
func processControlPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Clientset, controlHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory,
|
||||||
prsMap map[string]v3.PrivateRegistry, cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool, alpineImage string, certMap map[string]pki.CertificatePKI,
|
prsMap map[string]v3.PrivateRegistry, cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool, alpineImage string, certMap map[string]pki.CertificatePKI,
|
||||||
upgradeStrategy *v3.NodeUpgradeStrategy, newHosts, inactiveHosts map[string]bool, maxUnavailable int, drainHelper drain.Helper) ([]string, error) {
|
upgradeStrategy *v3.NodeUpgradeStrategy, newHosts, inactiveHosts map[string]bool, maxUnavailable int, drainHelper drain.Helper, k8sVersion string) ([]string, error) {
|
||||||
var errgrp errgroup.Group
|
var errgrp errgroup.Group
|
||||||
var failedHosts []string
|
var failedHosts []string
|
||||||
var hostsFailedToUpgrade = make(chan string, maxUnavailable)
|
var hostsFailedToUpgrade = make(chan string, maxUnavailable)
|
||||||
@ -122,7 +122,7 @@ func processControlPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.C
|
|||||||
runHost := host.(*hosts.Host)
|
runHost := host.(*hosts.Host)
|
||||||
log.Infof(ctx, "Processing controlplane host %v", runHost.HostnameOverride)
|
log.Infof(ctx, "Processing controlplane host %v", runHost.HostnameOverride)
|
||||||
if newHosts[runHost.HostnameOverride] {
|
if newHosts[runHost.HostnameOverride] {
|
||||||
if err := startNewControlHost(ctx, runHost, localConnDialerFactory, prsMap, cpNodePlanMap, updateWorkersOnly, alpineImage, certMap); err != nil {
|
if err := startNewControlHost(ctx, runHost, localConnDialerFactory, prsMap, cpNodePlanMap, updateWorkersOnly, alpineImage, certMap, k8sVersion); err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
hostsFailedToUpgrade <- runHost.HostnameOverride
|
hostsFailedToUpgrade <- runHost.HostnameOverride
|
||||||
hostsFailed.Store(runHost.HostnameOverride, true)
|
hostsFailed.Store(runHost.HostnameOverride, true)
|
||||||
@ -156,7 +156,7 @@ func processControlPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.C
|
|||||||
if maxUnavailableHit || len(hostsFailedToUpgrade) >= maxUnavailable {
|
if maxUnavailableHit || len(hostsFailedToUpgrade) >= maxUnavailable {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
controlPlaneUpgradable, workerPlaneUpgradable, err := checkHostUpgradable(ctx, runHost, cpNodePlanMap)
|
controlPlaneUpgradable, workerPlaneUpgradable, err := checkHostUpgradable(ctx, runHost, cpNodePlanMap, k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
hostsFailedToUpgrade <- runHost.HostnameOverride
|
hostsFailedToUpgrade <- runHost.HostnameOverride
|
||||||
@ -173,7 +173,7 @@ func processControlPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
shouldDrain := upgradeStrategy.Drain != nil && *upgradeStrategy.Drain
|
shouldDrain := upgradeStrategy.Drain != nil && *upgradeStrategy.Drain
|
||||||
if err := upgradeControlHost(ctx, kubeClient, runHost, shouldDrain, drainHelper, localConnDialerFactory, prsMap, cpNodePlanMap, updateWorkersOnly, alpineImage, certMap, controlPlaneUpgradable, workerPlaneUpgradable); err != nil {
|
if err := upgradeControlHost(ctx, kubeClient, runHost, shouldDrain, drainHelper, localConnDialerFactory, prsMap, cpNodePlanMap, updateWorkersOnly, alpineImage, certMap, controlPlaneUpgradable, workerPlaneUpgradable, k8sVersion); err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
hostsFailedToUpgrade <- runHost.HostnameOverride
|
hostsFailedToUpgrade <- runHost.HostnameOverride
|
||||||
hostsFailed.Store(runHost.HostnameOverride, true)
|
hostsFailed.Store(runHost.HostnameOverride, true)
|
||||||
@ -194,20 +194,20 @@ func processControlPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
func startNewControlHost(ctx context.Context, runHost *hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry,
|
func startNewControlHost(ctx context.Context, runHost *hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry,
|
||||||
cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool, alpineImage string, certMap map[string]pki.CertificatePKI) error {
|
cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool, alpineImage string, certMap map[string]pki.CertificatePKI, k8sVersion string) error {
|
||||||
if err := doDeployControlHost(ctx, runHost, localConnDialerFactory, prsMap, cpNodePlanMap[runHost.Address].Processes, alpineImage, certMap); err != nil {
|
if err := doDeployControlHost(ctx, runHost, localConnDialerFactory, prsMap, cpNodePlanMap[runHost.Address].Processes, alpineImage, certMap, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return doDeployWorkerPlaneHost(ctx, runHost, localConnDialerFactory, prsMap, cpNodePlanMap[runHost.Address].Processes, certMap, updateWorkersOnly, alpineImage)
|
return doDeployWorkerPlaneHost(ctx, runHost, localConnDialerFactory, prsMap, cpNodePlanMap[runHost.Address].Processes, certMap, updateWorkersOnly, alpineImage, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkHostUpgradable(ctx context.Context, runHost *hosts.Host, cpNodePlanMap map[string]v3.RKEConfigNodePlan) (bool, bool, error) {
|
func checkHostUpgradable(ctx context.Context, runHost *hosts.Host, cpNodePlanMap map[string]v3.RKEConfigNodePlan, k8sVersion string) (bool, bool, error) {
|
||||||
var controlPlaneUpgradable, workerPlaneUpgradable bool
|
var controlPlaneUpgradable, workerPlaneUpgradable bool
|
||||||
controlPlaneUpgradable, err := isControlPlaneHostUpgradable(ctx, runHost, cpNodePlanMap[runHost.Address].Processes)
|
controlPlaneUpgradable, err := isControlPlaneHostUpgradable(ctx, runHost, cpNodePlanMap[runHost.Address].Processes, k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return controlPlaneUpgradable, workerPlaneUpgradable, err
|
return controlPlaneUpgradable, workerPlaneUpgradable, err
|
||||||
}
|
}
|
||||||
workerPlaneUpgradable, err = isWorkerHostUpgradable(ctx, runHost, cpNodePlanMap[runHost.Address].Processes)
|
workerPlaneUpgradable, err = isWorkerHostUpgradable(ctx, runHost, cpNodePlanMap[runHost.Address].Processes, k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return controlPlaneUpgradable, workerPlaneUpgradable, err
|
return controlPlaneUpgradable, workerPlaneUpgradable, err
|
||||||
}
|
}
|
||||||
@ -216,19 +216,19 @@ func checkHostUpgradable(ctx context.Context, runHost *hosts.Host, cpNodePlanMap
|
|||||||
|
|
||||||
func upgradeControlHost(ctx context.Context, kubeClient *kubernetes.Clientset, host *hosts.Host, drain bool, drainHelper drain.Helper,
|
func upgradeControlHost(ctx context.Context, kubeClient *kubernetes.Clientset, host *hosts.Host, drain bool, drainHelper drain.Helper,
|
||||||
localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool,
|
localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, cpNodePlanMap map[string]v3.RKEConfigNodePlan, updateWorkersOnly bool,
|
||||||
alpineImage string, certMap map[string]pki.CertificatePKI, controlPlaneUpgradable, workerPlaneUpgradable bool) error {
|
alpineImage string, certMap map[string]pki.CertificatePKI, controlPlaneUpgradable, workerPlaneUpgradable bool, k8sVersion string) error {
|
||||||
if err := cordonAndDrainNode(kubeClient, host, drain, drainHelper, ControlRole); err != nil {
|
if err := cordonAndDrainNode(kubeClient, host, drain, drainHelper, ControlRole); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if controlPlaneUpgradable {
|
if controlPlaneUpgradable {
|
||||||
log.Infof(ctx, "Upgrading controlplane components for control host %v", host.HostnameOverride)
|
log.Infof(ctx, "Upgrading controlplane components for control host %v", host.HostnameOverride)
|
||||||
if err := doDeployControlHost(ctx, host, localConnDialerFactory, prsMap, cpNodePlanMap[host.Address].Processes, alpineImage, certMap); err != nil {
|
if err := doDeployControlHost(ctx, host, localConnDialerFactory, prsMap, cpNodePlanMap[host.Address].Processes, alpineImage, certMap, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if workerPlaneUpgradable {
|
if workerPlaneUpgradable {
|
||||||
log.Infof(ctx, "Upgrading workerplane components for control host %v", host.HostnameOverride)
|
log.Infof(ctx, "Upgrading workerplane components for control host %v", host.HostnameOverride)
|
||||||
if err := doDeployWorkerPlaneHost(ctx, host, localConnDialerFactory, prsMap, cpNodePlanMap[host.Address].Processes, certMap, updateWorkersOnly, alpineImage); err != nil {
|
if err := doDeployWorkerPlaneHost(ctx, host, localConnDialerFactory, prsMap, cpNodePlanMap[host.Address].Processes, certMap, updateWorkersOnly, alpineImage, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -318,32 +318,32 @@ func RestartControlPlane(ctx context.Context, controlHosts []*hosts.Host) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func doDeployControlHost(ctx context.Context, host *hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, alpineImage string, certMap map[string]pki.CertificatePKI) error {
|
func doDeployControlHost(ctx context.Context, host *hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, alpineImage string, certMap map[string]pki.CertificatePKI, k8sVersion string) error {
|
||||||
if host.IsWorker {
|
if host.IsWorker {
|
||||||
if err := removeNginxProxy(ctx, host); err != nil {
|
if err := removeNginxProxy(ctx, host); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// run sidekick
|
// run sidekick
|
||||||
if err := runSidekick(ctx, host, prsMap, processMap[SidekickContainerName]); err != nil {
|
if err := runSidekick(ctx, host, prsMap, processMap[SidekickContainerName], k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// run kubeapi
|
// run kubeapi
|
||||||
if err := runKubeAPI(ctx, host, localConnDialerFactory, prsMap, processMap[KubeAPIContainerName], alpineImage, certMap); err != nil {
|
if err := runKubeAPI(ctx, host, localConnDialerFactory, prsMap, processMap[KubeAPIContainerName], alpineImage, certMap, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// run kubecontroller
|
// run kubecontroller
|
||||||
if err := runKubeController(ctx, host, localConnDialerFactory, prsMap, processMap[KubeControllerContainerName], alpineImage); err != nil {
|
if err := runKubeController(ctx, host, localConnDialerFactory, prsMap, processMap[KubeControllerContainerName], alpineImage, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// run scheduler
|
// run scheduler
|
||||||
return runScheduler(ctx, host, localConnDialerFactory, prsMap, processMap[SchedulerContainerName], alpineImage)
|
return runScheduler(ctx, host, localConnDialerFactory, prsMap, processMap[SchedulerContainerName], alpineImage, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isControlPlaneHostUpgradable(ctx context.Context, host *hosts.Host, processMap map[string]v3.Process) (bool, error) {
|
func isControlPlaneHostUpgradable(ctx context.Context, host *hosts.Host, processMap map[string]v3.Process, k8sVersion string) (bool, error) {
|
||||||
for _, service := range []string{SidekickContainerName, KubeAPIContainerName, KubeControllerContainerName, SchedulerContainerName} {
|
for _, service := range []string{SidekickContainerName, KubeAPIContainerName, KubeControllerContainerName, SchedulerContainerName} {
|
||||||
process := processMap[service]
|
process := processMap[service]
|
||||||
imageCfg, hostCfg, _ := GetProcessConfig(process, host)
|
imageCfg, hostCfg, _ := GetProcessConfig(process, host, k8sVersion)
|
||||||
upgradable, err := docker.IsContainerUpgradable(ctx, host.DClient, imageCfg, hostCfg, service, host.Address, ControlRole)
|
upgradable, err := docker.IsContainerUpgradable(ctx, host.DClient, imageCfg, hostCfg, service, host.Address, ControlRole)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if client.IsErrNotFound(err) {
|
if client.IsErrNotFound(err) {
|
||||||
@ -363,7 +363,7 @@ func isControlPlaneHostUpgradable(ctx context.Context, host *hosts.Host, process
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func RunGetStateFileFromConfigMap(ctx context.Context, controlPlaneHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, dockerImage string) (string, error) {
|
func RunGetStateFileFromConfigMap(ctx context.Context, controlPlaneHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, dockerImage, k8sVersion string) (string, error) {
|
||||||
imageCfg := &container.Config{
|
imageCfg := &container.Config{
|
||||||
Entrypoint: []string{"bash"},
|
Entrypoint: []string{"bash"},
|
||||||
Cmd: []string{
|
Cmd: []string{
|
||||||
@ -373,13 +373,31 @@ func RunGetStateFileFromConfigMap(ctx context.Context, controlPlaneHost *hosts.H
|
|||||||
Image: dockerImage,
|
Image: dockerImage,
|
||||||
}
|
}
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(controlPlaneHost.PrefixPath, "/etc/kubernetes")),
|
|
||||||
},
|
|
||||||
NetworkMode: container.NetworkMode("host"),
|
NetworkMode: container.NetworkMode("host"),
|
||||||
RestartPolicy: container.RestartPolicy{Name: "no"},
|
RestartPolicy: container.RestartPolicy{Name: "no"},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binds := []string{
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(controlPlaneHost.PrefixPath, "/etc/kubernetes")),
|
||||||
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
if hosts.IsDockerSELinuxEnabled(controlPlaneHost) {
|
||||||
|
// We configure the label because we do not rewrite SELinux labels anymore on volume mounts (no :z)
|
||||||
|
logrus.Debugf("Configuring security opt label [%s] for [%s] container on host [%s]", SELinuxLabel, ControlPlaneConfigMapStateFileContainerName, controlPlaneHost.Address)
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
}
|
||||||
|
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRemoveContainer(ctx, controlPlaneHost.DClient, ControlPlaneConfigMapStateFileContainerName, controlPlaneHost.Address); err != nil {
|
if err := docker.DoRemoveContainer(ctx, controlPlaneHost.DClient, ControlPlaneConfigMapStateFileContainerName, controlPlaneHost.Address); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
241
services/etcd.go
241
services/etcd.go
@ -41,7 +41,8 @@ func RunEtcdPlane(
|
|||||||
updateWorkersOnly bool,
|
updateWorkersOnly bool,
|
||||||
alpineImage string,
|
alpineImage string,
|
||||||
es v3.ETCDService,
|
es v3.ETCDService,
|
||||||
certMap map[string]pki.CertificatePKI) error {
|
certMap map[string]pki.CertificatePKI,
|
||||||
|
k8sVersion string) error {
|
||||||
log.Infof(ctx, "[%s] Building up etcd plane..", ETCDRole)
|
log.Infof(ctx, "[%s] Building up etcd plane..", ETCDRole)
|
||||||
for _, host := range etcdHosts {
|
for _, host := range etcdHosts {
|
||||||
if updateWorkersOnly {
|
if updateWorkersOnly {
|
||||||
@ -51,10 +52,10 @@ func RunEtcdPlane(
|
|||||||
etcdProcess := etcdNodePlanMap[host.Address].Processes[EtcdContainerName]
|
etcdProcess := etcdNodePlanMap[host.Address].Processes[EtcdContainerName]
|
||||||
|
|
||||||
// need to run this first to set proper ownership and permissions on etcd data dir
|
// need to run this first to set proper ownership and permissions on etcd data dir
|
||||||
if err := setEtcdPermissions(ctx, host, prsMap, alpineImage, etcdProcess); err != nil {
|
if err := setEtcdPermissions(ctx, host, prsMap, alpineImage, etcdProcess, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
imageCfg, hostCfg, _ := GetProcessConfig(etcdProcess, host)
|
imageCfg, hostCfg, _ := GetProcessConfig(etcdProcess, host, k8sVersion)
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, EtcdContainerName, host.Address, ETCDRole, prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, EtcdContainerName, host.Address, ETCDRole, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -63,10 +64,10 @@ func RunEtcdPlane(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := RunEtcdSnapshotSave(ctx, host, prsMap, rkeToolsImage, EtcdSnapshotContainerName, false, es); err != nil {
|
if err := RunEtcdSnapshotSave(ctx, host, prsMap, rkeToolsImage, EtcdSnapshotContainerName, false, es, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := pki.SaveBackupBundleOnHost(ctx, host, rkeToolsImage, EtcdSnapshotPath, prsMap); err != nil {
|
if err := pki.SaveBackupBundleOnHost(ctx, host, rkeToolsImage, EtcdSnapshotPath, prsMap, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := createLogLink(ctx, host, EtcdSnapshotContainerName, ETCDRole, alpineImage, prsMap); err != nil {
|
if err := createLogLink(ctx, host, EtcdSnapshotContainerName, ETCDRole, alpineImage, prsMap); err != nil {
|
||||||
@ -87,7 +88,7 @@ func RunEtcdPlane(
|
|||||||
var healthError error
|
var healthError error
|
||||||
var hosts []string
|
var hosts []string
|
||||||
for _, host := range etcdHosts {
|
for _, host := range etcdHosts {
|
||||||
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName], host)
|
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName], host, k8sVersion)
|
||||||
healthError = isEtcdHealthy(localConnDialerFactory, host, clientCert, clientKey, healthCheckURL)
|
healthError = isEtcdHealthy(localConnDialerFactory, host, clientCert, clientKey, healthCheckURL)
|
||||||
if healthError == nil {
|
if healthError == nil {
|
||||||
break
|
break
|
||||||
@ -277,7 +278,7 @@ func RemoveEtcdMember(ctx context.Context, toDeleteEtcdHost *hosts.Host, etcdHos
|
|||||||
// Need to health check after successful member remove (especially for leader re-election)
|
// Need to health check after successful member remove (especially for leader re-election)
|
||||||
// We will check all hosts to see if the cluster becomes healthy
|
// We will check all hosts to see if the cluster becomes healthy
|
||||||
var healthError error
|
var healthError error
|
||||||
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName], host)
|
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName], host, k8sVersion)
|
||||||
logrus.Infof("[remove/%s] Checking etcd cluster health on [etcd-%s] after removing [etcd-%s]", ETCDRole, host.HostnameOverride, toDeleteEtcdHost.HostnameOverride)
|
logrus.Infof("[remove/%s] Checking etcd cluster health on [etcd-%s] after removing [etcd-%s]", ETCDRole, host.HostnameOverride, toDeleteEtcdHost.HostnameOverride)
|
||||||
logrus.Debugf("[remove/%s] healthCheckURL for checking etcd cluster health on [etcd-%s] after removing [%s]: [%s]", ETCDRole, host.HostnameOverride, toDeleteEtcdHost.HostnameOverride, healthCheckURL)
|
logrus.Debugf("[remove/%s] healthCheckURL for checking etcd cluster health on [etcd-%s] after removing [%s]: [%s]", ETCDRole, host.HostnameOverride, toDeleteEtcdHost.HostnameOverride, healthCheckURL)
|
||||||
healthError = isEtcdHealthy(localConnDialerFactory, host, cert, key, healthCheckURL)
|
healthError = isEtcdHealthy(localConnDialerFactory, host, cert, key, healthCheckURL)
|
||||||
@ -298,10 +299,10 @@ func RemoveEtcdMember(ctx context.Context, toDeleteEtcdHost *hosts.Host, etcdHos
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReloadEtcdCluster(ctx context.Context, readyEtcdHosts []*hosts.Host, newHost *hosts.Host, localConnDialerFactory hosts.DialerFactory, cert, key []byte, prsMap map[string]v3.PrivateRegistry, etcdNodePlanMap map[string]v3.RKEConfigNodePlan, alpineImage string) error {
|
func ReloadEtcdCluster(ctx context.Context, readyEtcdHosts []*hosts.Host, newHost *hosts.Host, localConnDialerFactory hosts.DialerFactory, cert, key []byte, prsMap map[string]v3.PrivateRegistry, etcdNodePlanMap map[string]v3.RKEConfigNodePlan, alpineImage, k8sVersion string) error {
|
||||||
imageCfg, hostCfg, _ := GetProcessConfig(etcdNodePlanMap[newHost.Address].Processes[EtcdContainerName], newHost)
|
imageCfg, hostCfg, _ := GetProcessConfig(etcdNodePlanMap[newHost.Address].Processes[EtcdContainerName], newHost, k8sVersion)
|
||||||
|
|
||||||
if err := setEtcdPermissions(ctx, newHost, prsMap, alpineImage, etcdNodePlanMap[newHost.Address].Processes[EtcdContainerName]); err != nil {
|
if err := setEtcdPermissions(ctx, newHost, prsMap, alpineImage, etcdNodePlanMap[newHost.Address].Processes[EtcdContainerName], k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,7 +316,7 @@ func ReloadEtcdCluster(ctx context.Context, readyEtcdHosts []*hosts.Host, newHos
|
|||||||
var healthError error
|
var healthError error
|
||||||
var hosts []string
|
var hosts []string
|
||||||
for _, host := range readyEtcdHosts {
|
for _, host := range readyEtcdHosts {
|
||||||
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName], host)
|
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName], host, k8sVersion)
|
||||||
healthError = isEtcdHealthy(localConnDialerFactory, host, cert, key, healthCheckURL)
|
healthError = isEtcdHealthy(localConnDialerFactory, host, cert, key, healthCheckURL)
|
||||||
if healthError == nil {
|
if healthError == nil {
|
||||||
break
|
break
|
||||||
@ -389,7 +390,7 @@ func IsEtcdMember(ctx context.Context, etcdHost *hosts.Host, etcdHosts []*hosts.
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func RunEtcdSnapshotSave(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage string, name string, once bool, es v3.ETCDService) error {
|
func RunEtcdSnapshotSave(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage string, name string, once bool, es v3.ETCDService, k8sVersion string) error {
|
||||||
backupCmd := "etcd-backup"
|
backupCmd := "etcd-backup"
|
||||||
restartPolicy := "always"
|
restartPolicy := "always"
|
||||||
imageCfg := &container.Config{
|
imageCfg := &container.Config{
|
||||||
@ -420,13 +421,28 @@ func RunEtcdSnapshotSave(ctx context.Context, etcdHost *hosts.Host, prsMap map[s
|
|||||||
imageCfg = configS3BackupImgCmd(ctx, imageCfg, es.BackupConfig)
|
imageCfg = configS3BackupImgCmd(ctx, imageCfg, es.BackupConfig)
|
||||||
}
|
}
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
|
||||||
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))},
|
|
||||||
NetworkMode: container.NetworkMode("host"),
|
NetworkMode: container.NetworkMode("host"),
|
||||||
RestartPolicy: container.RestartPolicy{Name: restartPolicy},
|
RestartPolicy: container.RestartPolicy{Name: restartPolicy},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binds := []string{
|
||||||
|
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
if hosts.IsDockerSELinuxEnabled(etcdHost) {
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if once {
|
if once {
|
||||||
log.Infof(ctx, "[etcd] Running snapshot save once on host [%s]", etcdHost.Address)
|
log.Infof(ctx, "[etcd] Running snapshot save once on host [%s]", etcdHost.Address)
|
||||||
logrus.Debugf("[etcd] Using command [%s] for snapshot save once container [%s] on host [%s]", getSanitizedSnapshotCmd(imageCfg, es.BackupConfig), EtcdSnapshotOnceContainerName, etcdHost.Address)
|
logrus.Debugf("[etcd] Using command [%s] for snapshot save once container [%s] on host [%s]", getSanitizedSnapshotCmd(imageCfg, es.BackupConfig), EtcdSnapshotOnceContainerName, etcdHost.Address)
|
||||||
@ -470,7 +486,7 @@ func RunEtcdSnapshotSave(ctx context.Context, etcdHost *hosts.Host, prsMap map[s
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func RunGetStateFileFromSnapshot(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage string, name string, es v3.ETCDService) (string, error) {
|
func RunGetStateFileFromSnapshot(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage string, name string, es v3.ETCDService, k8sVersion string) (string, error) {
|
||||||
backupCmd := "etcd-backup"
|
backupCmd := "etcd-backup"
|
||||||
imageCfg := &container.Config{
|
imageCfg := &container.Config{
|
||||||
Cmd: []string{
|
Cmd: []string{
|
||||||
@ -487,13 +503,30 @@ func RunGetStateFileFromSnapshot(ctx context.Context, etcdHost *hosts.Host, prsM
|
|||||||
imageCfg = configS3BackupImgCmd(ctx, imageCfg, es.BackupConfig)
|
imageCfg = configS3BackupImgCmd(ctx, imageCfg, es.BackupConfig)
|
||||||
}
|
}
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
|
||||||
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
|
||||||
},
|
|
||||||
NetworkMode: container.NetworkMode("host"),
|
NetworkMode: container.NetworkMode("host"),
|
||||||
RestartPolicy: container.RestartPolicy{Name: "no"},
|
RestartPolicy: container.RestartPolicy{Name: "no"},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binds := []string{
|
||||||
|
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
||||||
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
|
||||||
|
if hosts.IsDockerSELinuxEnabled(etcdHost) {
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdStateFileContainerName, etcdHost.Address); err != nil {
|
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdStateFileContainerName, etcdHost.Address); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -511,7 +544,7 @@ func RunGetStateFileFromSnapshot(ctx context.Context, etcdHost *hosts.Host, prsM
|
|||||||
return statefile, nil
|
return statefile, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownloadEtcdSnapshotFromS3(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage string, name string, es v3.ETCDService) error {
|
func DownloadEtcdSnapshotFromS3(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage string, name string, es v3.ETCDService, k8sVersion string) error {
|
||||||
s3Backend := es.BackupConfig.S3BackupConfig
|
s3Backend := es.BackupConfig.S3BackupConfig
|
||||||
if len(s3Backend.Endpoint) == 0 || len(s3Backend.BucketName) == 0 {
|
if len(s3Backend.Endpoint) == 0 || len(s3Backend.BucketName) == 0 {
|
||||||
return fmt.Errorf("failed to get snapshot [%s] from s3 on host [%s], invalid s3 configurations", name, etcdHost.Address)
|
return fmt.Errorf("failed to get snapshot [%s] from s3 on host [%s], invalid s3 configurations", name, etcdHost.Address)
|
||||||
@ -554,12 +587,30 @@ func DownloadEtcdSnapshotFromS3(ctx context.Context, etcdHost *hosts.Host, prsMa
|
|||||||
}
|
}
|
||||||
log.Infof(ctx, s3Logline)
|
log.Infof(ctx, s3Logline)
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
|
||||||
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))},
|
|
||||||
NetworkMode: container.NetworkMode("host"),
|
NetworkMode: container.NetworkMode("host"),
|
||||||
RestartPolicy: container.RestartPolicy{Name: "no"},
|
RestartPolicy: container.RestartPolicy{Name: "no"},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binds := []string{
|
||||||
|
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
|
||||||
|
if hosts.IsDockerSELinuxEnabled(etcdHost) {
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdDownloadBackupContainerName, etcdHost.Address); err != nil {
|
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdDownloadBackupContainerName, etcdHost.Address); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -581,7 +632,7 @@ func DownloadEtcdSnapshotFromS3(ctx context.Context, etcdHost *hosts.Host, prsMa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RestoreEtcdSnapshot(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry,
|
func RestoreEtcdSnapshot(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry,
|
||||||
etcdRestoreImage, etcdBackupImage, snapshotName, initCluster string, es v3.ETCDService) error {
|
etcdRestoreImage, etcdBackupImage, snapshotName, initCluster string, es v3.ETCDService, k8sVersion string) error {
|
||||||
log.Infof(ctx, "[etcd] Restoring [%s] snapshot on etcd host [%s]", snapshotName, etcdHost.Address)
|
log.Infof(ctx, "[etcd] Restoring [%s] snapshot on etcd host [%s]", snapshotName, etcdHost.Address)
|
||||||
nodeName := pki.GetCrtNameForHost(etcdHost, pki.EtcdCertName)
|
nodeName := pki.GetCrtNameForHost(etcdHost, pki.EtcdCertName)
|
||||||
snapshotPath := fmt.Sprintf("%s%s", EtcdSnapshotPath, snapshotName)
|
snapshotPath := fmt.Sprintf("%s%s", EtcdSnapshotPath, snapshotName)
|
||||||
@ -610,12 +661,28 @@ func RestoreEtcdSnapshot(ctx context.Context, etcdHost *hosts.Host, prsMap map[s
|
|||||||
Image: etcdRestoreImage,
|
Image: etcdRestoreImage,
|
||||||
}
|
}
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
|
||||||
"/opt/rke/:/opt/rke/:z",
|
|
||||||
fmt.Sprintf("%s:/var/lib/rancher/etcd:z", path.Join(etcdHost.PrefixPath, "/var/lib/etcd")),
|
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))},
|
|
||||||
NetworkMode: container.NetworkMode("host"),
|
NetworkMode: container.NetworkMode("host"),
|
||||||
}
|
}
|
||||||
|
binds := []string{
|
||||||
|
"/opt/rke/:/opt/rke/:z",
|
||||||
|
fmt.Sprintf("%s:/var/lib/rancher/etcd:z", path.Join(etcdHost.PrefixPath, "/var/lib/etcd")),
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
|
||||||
|
if hosts.IsDockerSELinuxEnabled(etcdHost) {
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdRestoreContainerName, etcdHost.Address); err != nil {
|
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdRestoreContainerName, etcdHost.Address); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -640,10 +707,10 @@ func RestoreEtcdSnapshot(ctx context.Context, etcdHost *hosts.Host, prsMap map[s
|
|||||||
if err := docker.RemoveContainer(ctx, etcdHost.DClient, etcdHost.Address, EtcdRestoreContainerName); err != nil {
|
if err := docker.RemoveContainer(ctx, etcdHost.DClient, etcdHost.Address, EtcdRestoreContainerName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return RunEtcdSnapshotRemove(ctx, etcdHost, prsMap, etcdBackupImage, snapshotName, true, es)
|
return RunEtcdSnapshotRemove(ctx, etcdHost, prsMap, etcdBackupImage, snapshotName, true, es, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func RunEtcdSnapshotRemove(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage string, name string, cleanupRestore bool, es v3.ETCDService) error {
|
func RunEtcdSnapshotRemove(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage string, name string, cleanupRestore bool, es v3.ETCDService, k8sVersion string) error {
|
||||||
log.Infof(ctx, "[etcd] Removing snapshot [%s] from host [%s]", name, etcdHost.Address)
|
log.Infof(ctx, "[etcd] Removing snapshot [%s] from host [%s]", name, etcdHost.Address)
|
||||||
imageCfg := &container.Config{
|
imageCfg := &container.Config{
|
||||||
Image: etcdSnapshotImage,
|
Image: etcdSnapshotImage,
|
||||||
@ -684,11 +751,28 @@ func RunEtcdSnapshotRemove(ctx context.Context, etcdHost *hosts.Host, prsMap map
|
|||||||
}
|
}
|
||||||
|
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
|
||||||
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
|
||||||
},
|
|
||||||
RestartPolicy: container.RestartPolicy{Name: "no"},
|
RestartPolicy: container.RestartPolicy{Name: "no"},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binds := []string{
|
||||||
|
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
||||||
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
|
||||||
|
if hosts.IsDockerSELinuxEnabled(etcdHost) {
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdSnapshotRemoveContainerName, etcdHost.Address); err != nil {
|
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdSnapshotRemoveContainerName, etcdHost.Address); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -709,7 +793,7 @@ func RunEtcdSnapshotRemove(ctx context.Context, etcdHost *hosts.Host, prsMap map
|
|||||||
return docker.RemoveContainer(ctx, etcdHost.DClient, etcdHost.Address, EtcdSnapshotRemoveContainerName)
|
return docker.RemoveContainer(ctx, etcdHost.DClient, etcdHost.Address, EtcdSnapshotRemoveContainerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetEtcdSnapshotChecksum(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, alpineImage, snapshotName string) (string, error) {
|
func GetEtcdSnapshotChecksum(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, alpineImage, snapshotName, k8sVersion string) (string, error) {
|
||||||
var checksum string
|
var checksum string
|
||||||
var err error
|
var err error
|
||||||
var stderr string
|
var stderr string
|
||||||
@ -723,10 +807,26 @@ func GetEtcdSnapshotChecksum(ctx context.Context, etcdHost *hosts.Host, prsMap m
|
|||||||
},
|
},
|
||||||
Image: alpineImage,
|
Image: alpineImage,
|
||||||
}
|
}
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{}
|
||||||
Binds: []string{
|
|
||||||
"/opt/rke/:/opt/rke/:z",
|
binds := []string{
|
||||||
}}
|
"/opt/rke/:/opt/rke/:z",
|
||||||
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
|
||||||
|
if hosts.IsDockerSELinuxEnabled(etcdHost) {
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRunContainer(ctx, etcdHost.DClient, imageCfg, hostCfg, EtcdChecksumContainerName, etcdHost.Address, ETCDRole, prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, etcdHost.DClient, imageCfg, hostCfg, EtcdChecksumContainerName, etcdHost.Address, ETCDRole, prsMap); err != nil {
|
||||||
return checksum, err
|
return checksum, err
|
||||||
@ -788,7 +888,7 @@ func configS3BackupImgCmd(ctx context.Context, imageCfg *container.Config, bc *v
|
|||||||
return imageCfg
|
return imageCfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartBackupServer(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage string, name string) error {
|
func StartBackupServer(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage, name, k8sVersion string) error {
|
||||||
log.Infof(ctx, "[etcd] starting backup server on host [%s]", etcdHost.Address)
|
log.Infof(ctx, "[etcd] starting backup server on host [%s]", etcdHost.Address)
|
||||||
|
|
||||||
imageCfg := &container.Config{
|
imageCfg := &container.Config{
|
||||||
@ -805,12 +905,27 @@ func StartBackupServer(ctx context.Context, etcdHost *hosts.Host, prsMap map[str
|
|||||||
}
|
}
|
||||||
|
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
|
||||||
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))},
|
|
||||||
NetworkMode: container.NetworkMode("host"),
|
NetworkMode: container.NetworkMode("host"),
|
||||||
RestartPolicy: container.RestartPolicy{Name: "no"},
|
RestartPolicy: container.RestartPolicy{Name: "no"},
|
||||||
}
|
}
|
||||||
|
binds := []string{
|
||||||
|
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
if hosts.IsDockerSELinuxEnabled(etcdHost) {
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdServeBackupContainerName, etcdHost.Address); err != nil {
|
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdServeBackupContainerName, etcdHost.Address); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -836,7 +951,7 @@ func StartBackupServer(ctx context.Context, etcdHost *hosts.Host, prsMap map[str
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownloadEtcdSnapshotFromBackupServer(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage, name string, backupServer *hosts.Host) error {
|
func DownloadEtcdSnapshotFromBackupServer(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, etcdSnapshotImage, name string, backupServer *hosts.Host, k8sVersion string) error {
|
||||||
log.Infof(ctx, "[etcd] Get snapshot [%s] on host [%s]", name, etcdHost.Address)
|
log.Infof(ctx, "[etcd] Get snapshot [%s] on host [%s]", name, etcdHost.Address)
|
||||||
imageCfg := &container.Config{
|
imageCfg := &container.Config{
|
||||||
Cmd: []string{
|
Cmd: []string{
|
||||||
@ -853,12 +968,30 @@ func DownloadEtcdSnapshotFromBackupServer(ctx context.Context, etcdHost *hosts.H
|
|||||||
}
|
}
|
||||||
|
|
||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{
|
|
||||||
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
|
||||||
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))},
|
|
||||||
NetworkMode: container.NetworkMode("host"),
|
NetworkMode: container.NetworkMode("host"),
|
||||||
RestartPolicy: container.RestartPolicy{Name: "on-failure"},
|
RestartPolicy: container.RestartPolicy{Name: "on-failure"},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binds := []string{
|
||||||
|
fmt.Sprintf("%s:/backup:z", EtcdSnapshotPath),
|
||||||
|
fmt.Sprintf("%s:/etc/kubernetes:z", path.Join(etcdHost.PrefixPath, "/etc/kubernetes"))}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
binds = util.RemoveZFromBinds(binds)
|
||||||
|
|
||||||
|
if hosts.IsDockerSELinuxEnabled(etcdHost) {
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
hostCfg.Binds = binds
|
||||||
|
|
||||||
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdDownloadBackupContainerName, etcdHost.Address); err != nil {
|
if err := docker.DoRemoveContainer(ctx, etcdHost.DClient, EtcdDownloadBackupContainerName, etcdHost.Address); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -879,7 +1012,7 @@ func DownloadEtcdSnapshotFromBackupServer(ctx context.Context, etcdHost *hosts.H
|
|||||||
return docker.RemoveContainer(ctx, etcdHost.DClient, etcdHost.Address, EtcdDownloadBackupContainerName)
|
return docker.RemoveContainer(ctx, etcdHost.DClient, etcdHost.Address, EtcdDownloadBackupContainerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setEtcdPermissions(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, alpineImage string, process v3.Process) error {
|
func setEtcdPermissions(ctx context.Context, etcdHost *hosts.Host, prsMap map[string]v3.PrivateRegistry, alpineImage string, process v3.Process, k8sVersion string) error {
|
||||||
var dataBind string
|
var dataBind string
|
||||||
|
|
||||||
cmd := fmt.Sprintf("chmod 700 %s", EtcdDataDir)
|
cmd := fmt.Sprintf("chmod 700 %s", EtcdDataDir)
|
||||||
@ -901,6 +1034,20 @@ func setEtcdPermissions(ctx context.Context, etcdHost *hosts.Host, prsMap map[st
|
|||||||
hostCfg := &container.HostConfig{
|
hostCfg := &container.HostConfig{
|
||||||
Binds: []string{dataBind},
|
Binds: []string{dataBind},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
matchedRange, err := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
if hosts.IsDockerSELinuxEnabled(etcdHost) {
|
||||||
|
// We apply the label because we do not rewrite SELinux labels anymore on volume mounts (no :z)
|
||||||
|
logrus.Debugf("Applying security opt label [%s] for [%s] container on host [%s]", SELinuxLabel, EtcdPermFixContainerName, etcdHost.Address)
|
||||||
|
hostCfg.SecurityOpt = []string{SELinuxLabel}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := docker.DoRunOnetimeContainer(ctx, etcdHost.DClient, imageCfg, hostCfg, EtcdPermFixContainerName,
|
if err := docker.DoRunOnetimeContainer(ctx, etcdHost.DClient, imageCfg, hostCfg, EtcdPermFixContainerName,
|
||||||
etcdHost.Address, ETCDRole, prsMap); err != nil {
|
etcdHost.Address, ETCDRole, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -11,8 +11,8 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runKubeAPI(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeAPIProcess v3.Process, alpineImage string, certMap map[string]pki.CertificatePKI) error {
|
func runKubeAPI(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeAPIProcess v3.Process, alpineImage string, certMap map[string]pki.CertificatePKI, k8sVersion string) error {
|
||||||
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeAPIProcess, host)
|
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeAPIProcess, host, k8sVersion)
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.Address, ControlRole, prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.Address, ControlRole, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,8 @@ import (
|
|||||||
v3 "github.com/rancher/rke/types"
|
v3 "github.com/rancher/rke/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runKubeController(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, controllerProcess v3.Process, alpineImage string) error {
|
func runKubeController(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, controllerProcess v3.Process, alpineImage, k8sVersion string) error {
|
||||||
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(controllerProcess, host)
|
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(controllerProcess, host, k8sVersion)
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.Address, ControlRole, prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.Address, ControlRole, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -9,8 +9,8 @@ import (
|
|||||||
v3 "github.com/rancher/rke/types"
|
v3 "github.com/rancher/rke/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runKubelet(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeletProcess v3.Process, certMap map[string]pki.CertificatePKI, alpineImage string) error {
|
func runKubelet(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeletProcess v3.Process, certMap map[string]pki.CertificatePKI, alpineImage, k8sVersion string) error {
|
||||||
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeletProcess, host)
|
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeletProcess, host, k8sVersion)
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeletContainerName, host.Address, WorkerRole, prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeletContainerName, host.Address, WorkerRole, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,8 @@ import (
|
|||||||
v3 "github.com/rancher/rke/types"
|
v3 "github.com/rancher/rke/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runKubeproxy(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeProxyProcess v3.Process, alpineImage string) error {
|
func runKubeproxy(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeProxyProcess v3.Process, alpineImage, k8sVersion string) error {
|
||||||
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeProxyProcess, host)
|
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeProxyProcess, host, k8sVersion)
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.Address, WorkerRole, prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.Address, WorkerRole, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -13,8 +13,8 @@ const (
|
|||||||
NginxProxyEnvName = "CP_HOSTS"
|
NginxProxyEnvName = "CP_HOSTS"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runNginxProxy(ctx context.Context, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, proxyProcess v3.Process, alpineImage string) error {
|
func runNginxProxy(ctx context.Context, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, proxyProcess v3.Process, alpineImage, k8sVersion string) error {
|
||||||
imageCfg, hostCfg, _ := GetProcessConfig(proxyProcess, host)
|
imageCfg, hostCfg, _ := GetProcessConfig(proxyProcess, host, k8sVersion)
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole, prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,8 @@ import (
|
|||||||
v3 "github.com/rancher/rke/types"
|
v3 "github.com/rancher/rke/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runScheduler(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, schedulerProcess v3.Process, alpineImage string) error {
|
func runScheduler(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, schedulerProcess v3.Process, alpineImage, k8sVersion string) error {
|
||||||
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(schedulerProcess, host)
|
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(schedulerProcess, host, k8sVersion)
|
||||||
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.Address, ControlRole, prsMap); err != nil {
|
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.Address, ControlRole, prsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -53,16 +53,17 @@ const (
|
|||||||
|
|
||||||
ContainerNameLabel = "io.rancher.rke.container.name"
|
ContainerNameLabel = "io.rancher.rke.container.name"
|
||||||
MCSLabel = "label=level:s0:c1000,c1001"
|
MCSLabel = "label=level:s0:c1000,c1001"
|
||||||
|
SELinuxLabel = "label=type:rke_container_t"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RestartFunc func(context.Context, *hosts.Host) error
|
type RestartFunc func(context.Context, *hosts.Host) error
|
||||||
|
|
||||||
func runSidekick(ctx context.Context, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, sidecarProcess v3.Process) error {
|
func runSidekick(ctx context.Context, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, sidecarProcess v3.Process, k8sVersion string) error {
|
||||||
isRunning, err := docker.IsContainerRunning(ctx, host.DClient, host.Address, SidekickContainerName, true)
|
isRunning, err := docker.IsContainerRunning(ctx, host.DClient, host.Address, SidekickContainerName, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
imageCfg, hostCfg, _ := GetProcessConfig(sidecarProcess, host)
|
imageCfg, hostCfg, _ := GetProcessConfig(sidecarProcess, host, k8sVersion)
|
||||||
isUpgradable := false
|
isUpgradable := false
|
||||||
if isRunning {
|
if isRunning {
|
||||||
isUpgradable, err = docker.IsContainerUpgradable(ctx, host.DClient, imageCfg, hostCfg, SidekickContainerName, host.Address, SidekickServiceName)
|
isUpgradable, err = docker.IsContainerUpgradable(ctx, host.DClient, imageCfg, hostCfg, SidekickContainerName, host.Address, SidekickServiceName)
|
||||||
@ -102,7 +103,7 @@ func removeSidekick(ctx context.Context, host *hosts.Host) error {
|
|||||||
return docker.DoRemoveContainer(ctx, host.DClient, SidekickContainerName, host.Address)
|
return docker.DoRemoveContainer(ctx, host.DClient, SidekickContainerName, host.Address)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetProcessConfig(process v3.Process, host *hosts.Host) (*container.Config, *container.HostConfig, string) {
|
func GetProcessConfig(process v3.Process, host *hosts.Host, k8sVersion string) (*container.Config, *container.HostConfig, string) {
|
||||||
imageCfg := &container.Config{
|
imageCfg := &container.Config{
|
||||||
Entrypoint: process.Command,
|
Entrypoint: process.Command,
|
||||||
Cmd: process.Args,
|
Cmd: process.Args,
|
||||||
@ -143,6 +144,15 @@ func GetProcessConfig(process v3.Process, host *hosts.Host) (*container.Config,
|
|||||||
hostCfg.SecurityOpt = []string{MCSLabel}
|
hostCfg.SecurityOpt = []string{MCSLabel}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// We apply the label because we do not rewrite SELinux labels anymore on volume mounts (no :z)
|
||||||
|
// Limited to Kubernetes 1.22 and higher
|
||||||
|
matchedRange, _ := util.SemVerMatchRange(k8sVersion, util.SemVerK8sVersion122OrHigher)
|
||||||
|
|
||||||
|
if matchedRange {
|
||||||
|
logrus.Debugf("Applying security opt label [%s] for etcd container on host [%s]", SELinuxLabel, host.Address)
|
||||||
|
hostCfg.SecurityOpt = append(hostCfg.SecurityOpt, SELinuxLabel)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return imageCfg, hostCfg, process.HealthCheck.URL
|
return imageCfg, hostCfg, process.HealthCheck.URL
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ const (
|
|||||||
unschedulableControlTaint = "node-role.kubernetes.io/controlplane=true:NoSchedule"
|
unschedulableControlTaint = "node-role.kubernetes.io/controlplane=true:NoSchedule"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RunWorkerPlane(ctx context.Context, allHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage string) error {
|
func RunWorkerPlane(ctx context.Context, allHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage, k8sVersion string) error {
|
||||||
log.Infof(ctx, "[%s] Building up Worker Plane..", WorkerRole)
|
log.Infof(ctx, "[%s] Building up Worker Plane..", WorkerRole)
|
||||||
var errgrp errgroup.Group
|
var errgrp errgroup.Group
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ func RunWorkerPlane(ctx context.Context, allHosts []*hosts.Host, localConnDialer
|
|||||||
var errList []error
|
var errList []error
|
||||||
for host := range hostsQueue {
|
for host := range hostsQueue {
|
||||||
runHost := host.(*hosts.Host)
|
runHost := host.(*hosts.Host)
|
||||||
err := doDeployWorkerPlaneHost(ctx, runHost, localConnDialerFactory, prsMap, workerNodePlanMap[runHost.Address].Processes, certMap, updateWorkersOnly, alpineImage)
|
err := doDeployWorkerPlaneHost(ctx, runHost, localConnDialerFactory, prsMap, workerNodePlanMap[runHost.Address].Processes, certMap, updateWorkersOnly, alpineImage, k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
}
|
}
|
||||||
@ -52,14 +52,14 @@ func RunWorkerPlane(ctx context.Context, allHosts []*hosts.Host, localConnDialer
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpgradeWorkerPlaneForWorkerAndEtcdNodes(ctx context.Context, kubeClient *kubernetes.Clientset, mixedRolesHosts []*hosts.Host, workerOnlyHosts []*hosts.Host, inactiveHosts map[string]bool, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage string, upgradeStrategy *v3.NodeUpgradeStrategy, newHosts map[string]bool, maxUnavailable int) (string, error) {
|
func UpgradeWorkerPlaneForWorkerAndEtcdNodes(ctx context.Context, kubeClient *kubernetes.Clientset, mixedRolesHosts []*hosts.Host, workerOnlyHosts []*hosts.Host, inactiveHosts map[string]bool, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage string, upgradeStrategy *v3.NodeUpgradeStrategy, newHosts map[string]bool, maxUnavailable int, k8sVersion string) (string, error) {
|
||||||
log.Infof(ctx, "[%s] Upgrading Worker Plane..", WorkerRole)
|
log.Infof(ctx, "[%s] Upgrading Worker Plane..", WorkerRole)
|
||||||
var errMsgMaxUnavailableNotFailed string
|
var errMsgMaxUnavailableNotFailed string
|
||||||
updateNewHostsList(kubeClient, append(mixedRolesHosts, workerOnlyHosts...), newHosts)
|
updateNewHostsList(kubeClient, append(mixedRolesHosts, workerOnlyHosts...), newHosts)
|
||||||
if len(mixedRolesHosts) > 0 {
|
if len(mixedRolesHosts) > 0 {
|
||||||
log.Infof(ctx, "First checking and processing worker components for upgrades on nodes with etcd role one at a time")
|
log.Infof(ctx, "First checking and processing worker components for upgrades on nodes with etcd role one at a time")
|
||||||
}
|
}
|
||||||
multipleRolesHostsFailedToUpgrade, err := processWorkerPlaneForUpgrade(ctx, kubeClient, mixedRolesHosts, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage, 1, upgradeStrategy, newHosts, inactiveHosts)
|
multipleRolesHostsFailedToUpgrade, err := processWorkerPlaneForUpgrade(ctx, kubeClient, mixedRolesHosts, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage, 1, upgradeStrategy, newHosts, inactiveHosts, k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Failed to upgrade hosts: %v with error %v", strings.Join(multipleRolesHostsFailedToUpgrade, ","), err)
|
logrus.Errorf("Failed to upgrade hosts: %v with error %v", strings.Join(multipleRolesHostsFailedToUpgrade, ","), err)
|
||||||
return errMsgMaxUnavailableNotFailed, err
|
return errMsgMaxUnavailableNotFailed, err
|
||||||
@ -68,7 +68,7 @@ func UpgradeWorkerPlaneForWorkerAndEtcdNodes(ctx context.Context, kubeClient *ku
|
|||||||
if len(workerOnlyHosts) > 0 {
|
if len(workerOnlyHosts) > 0 {
|
||||||
log.Infof(ctx, "Now checking and upgrading worker components on nodes with only worker role %v at a time", maxUnavailable)
|
log.Infof(ctx, "Now checking and upgrading worker components on nodes with only worker role %v at a time", maxUnavailable)
|
||||||
}
|
}
|
||||||
workerOnlyHostsFailedToUpgrade, err := processWorkerPlaneForUpgrade(ctx, kubeClient, workerOnlyHosts, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage, maxUnavailable, upgradeStrategy, newHosts, inactiveHosts)
|
workerOnlyHostsFailedToUpgrade, err := processWorkerPlaneForUpgrade(ctx, kubeClient, workerOnlyHosts, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage, maxUnavailable, upgradeStrategy, newHosts, inactiveHosts, k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Failed to upgrade hosts: %v with error %v", strings.Join(workerOnlyHostsFailedToUpgrade, ","), err)
|
logrus.Errorf("Failed to upgrade hosts: %v with error %v", strings.Join(workerOnlyHostsFailedToUpgrade, ","), err)
|
||||||
if len(workerOnlyHostsFailedToUpgrade) >= maxUnavailable {
|
if len(workerOnlyHostsFailedToUpgrade) >= maxUnavailable {
|
||||||
@ -93,7 +93,7 @@ func updateNewHostsList(kubeClient *kubernetes.Clientset, allHosts []*hosts.Host
|
|||||||
|
|
||||||
func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Clientset, allHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory,
|
func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Clientset, allHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory,
|
||||||
prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage string,
|
prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage string,
|
||||||
maxUnavailable int, upgradeStrategy *v3.NodeUpgradeStrategy, newHosts, inactiveHosts map[string]bool) ([]string, error) {
|
maxUnavailable int, upgradeStrategy *v3.NodeUpgradeStrategy, newHosts, inactiveHosts map[string]bool, k8sVersion string) ([]string, error) {
|
||||||
var errgrp errgroup.Group
|
var errgrp errgroup.Group
|
||||||
var drainHelper drain.Helper
|
var drainHelper drain.Helper
|
||||||
var failedHosts []string
|
var failedHosts []string
|
||||||
@ -120,7 +120,7 @@ func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Cl
|
|||||||
runHost := host.(*hosts.Host)
|
runHost := host.(*hosts.Host)
|
||||||
logrus.Infof("[workerplane] Processing host %v", runHost.HostnameOverride)
|
logrus.Infof("[workerplane] Processing host %v", runHost.HostnameOverride)
|
||||||
if newHosts[runHost.HostnameOverride] {
|
if newHosts[runHost.HostnameOverride] {
|
||||||
if err := doDeployWorkerPlaneHost(ctx, runHost, localConnDialerFactory, prsMap, workerNodePlanMap[runHost.Address].Processes, certMap, updateWorkersOnly, alpineImage); err != nil {
|
if err := doDeployWorkerPlaneHost(ctx, runHost, localConnDialerFactory, prsMap, workerNodePlanMap[runHost.Address].Processes, certMap, updateWorkersOnly, alpineImage, k8sVersion); err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
hostsFailedToUpgrade <- runHost.HostnameOverride
|
hostsFailedToUpgrade <- runHost.HostnameOverride
|
||||||
hostsFailed.Store(runHost.HostnameOverride, true)
|
hostsFailed.Store(runHost.HostnameOverride, true)
|
||||||
@ -154,7 +154,7 @@ func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Cl
|
|||||||
if maxUnavailableHit || len(hostsFailedToUpgrade) >= maxUnavailable {
|
if maxUnavailableHit || len(hostsFailedToUpgrade) >= maxUnavailable {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
upgradable, err := isWorkerHostUpgradable(ctx, runHost, workerNodePlanMap[runHost.Address].Processes)
|
upgradable, err := isWorkerHostUpgradable(ctx, runHost, workerNodePlanMap[runHost.Address].Processes, k8sVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
hostsFailed.Store(runHost.HostnameOverride, true)
|
hostsFailed.Store(runHost.HostnameOverride, true)
|
||||||
@ -169,7 +169,7 @@ func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Cl
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := upgradeWorkerHost(ctx, kubeClient, runHost, upgradeStrategy.Drain != nil && *upgradeStrategy.Drain, drainHelper, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage); err != nil {
|
if err := upgradeWorkerHost(ctx, kubeClient, runHost, upgradeStrategy.Drain != nil && *upgradeStrategy.Drain, drainHelper, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage, k8sVersion); err != nil {
|
||||||
errList = append(errList, err)
|
errList = append(errList, err)
|
||||||
hostsFailed.Store(runHost.HostnameOverride, true)
|
hostsFailed.Store(runHost.HostnameOverride, true)
|
||||||
hostsFailedToUpgrade <- runHost.HostnameOverride
|
hostsFailedToUpgrade <- runHost.HostnameOverride
|
||||||
@ -192,13 +192,13 @@ func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Cl
|
|||||||
|
|
||||||
func upgradeWorkerHost(ctx context.Context, kubeClient *kubernetes.Clientset, runHost *hosts.Host, drainFlag bool, drainHelper drain.Helper,
|
func upgradeWorkerHost(ctx context.Context, kubeClient *kubernetes.Clientset, runHost *hosts.Host, drainFlag bool, drainHelper drain.Helper,
|
||||||
localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool,
|
localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool,
|
||||||
alpineImage string) error {
|
alpineImage, k8sVersion string) error {
|
||||||
// cordon and drain
|
// cordon and drain
|
||||||
if err := cordonAndDrainNode(kubeClient, runHost, drainFlag, drainHelper, WorkerRole); err != nil {
|
if err := cordonAndDrainNode(kubeClient, runHost, drainFlag, drainHelper, WorkerRole); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logrus.Debugf("[workerplane] upgrading host %v", runHost.HostnameOverride)
|
logrus.Debugf("[workerplane] upgrading host %v", runHost.HostnameOverride)
|
||||||
if err := doDeployWorkerPlaneHost(ctx, runHost, localConnDialerFactory, prsMap, workerNodePlanMap[runHost.Address].Processes, certMap, updateWorkersOnly, alpineImage); err != nil {
|
if err := doDeployWorkerPlaneHost(ctx, runHost, localConnDialerFactory, prsMap, workerNodePlanMap[runHost.Address].Processes, certMap, updateWorkersOnly, alpineImage, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// consider upgrade done when kubeclient lists node as ready
|
// consider upgrade done when kubeclient lists node as ready
|
||||||
@ -209,7 +209,7 @@ func upgradeWorkerHost(ctx context.Context, kubeClient *kubernetes.Clientset, ru
|
|||||||
return k8s.CordonUncordon(kubeClient, runHost.HostnameOverride, false)
|
return k8s.CordonUncordon(kubeClient, runHost.HostnameOverride, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func doDeployWorkerPlaneHost(ctx context.Context, host *hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage string) error {
|
func doDeployWorkerPlaneHost(ctx context.Context, host *hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage, k8sVersion string) error {
|
||||||
if updateWorkersOnly {
|
if updateWorkersOnly {
|
||||||
if !host.UpdateWorker {
|
if !host.UpdateWorker {
|
||||||
return nil
|
return nil
|
||||||
@ -225,7 +225,7 @@ func doDeployWorkerPlaneHost(ctx context.Context, host *hosts.Host, localConnDia
|
|||||||
host.ToAddTaints = append(host.ToAddTaints, unschedulableControlTaint)
|
host.ToAddTaints = append(host.ToAddTaints, unschedulableControlTaint)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return doDeployWorkerPlane(ctx, host, localConnDialerFactory, prsMap, processMap, certMap, alpineImage)
|
return doDeployWorkerPlane(ctx, host, localConnDialerFactory, prsMap, processMap, certMap, alpineImage, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func RemoveWorkerPlane(ctx context.Context, workerHosts []*hosts.Host, force bool) error {
|
func RemoveWorkerPlane(ctx context.Context, workerHosts []*hosts.Host, force bool) error {
|
||||||
@ -299,28 +299,28 @@ func RestartWorkerPlane(ctx context.Context, workerHosts []*hosts.Host) error {
|
|||||||
|
|
||||||
func doDeployWorkerPlane(ctx context.Context, host *hosts.Host,
|
func doDeployWorkerPlane(ctx context.Context, host *hosts.Host,
|
||||||
localConnDialerFactory hosts.DialerFactory,
|
localConnDialerFactory hosts.DialerFactory,
|
||||||
prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, certMap map[string]pki.CertificatePKI, alpineImage string) error {
|
prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, certMap map[string]pki.CertificatePKI, alpineImage, k8sVersion string) error {
|
||||||
// run nginx proxy
|
// run nginx proxy
|
||||||
if !host.IsControl {
|
if !host.IsControl {
|
||||||
if err := runNginxProxy(ctx, host, prsMap, processMap[NginxProxyContainerName], alpineImage); err != nil {
|
if err := runNginxProxy(ctx, host, prsMap, processMap[NginxProxyContainerName], alpineImage, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// run sidekick
|
// run sidekick
|
||||||
if err := runSidekick(ctx, host, prsMap, processMap[SidekickContainerName]); err != nil {
|
if err := runSidekick(ctx, host, prsMap, processMap[SidekickContainerName], k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// run kubelet
|
// run kubelet
|
||||||
if err := runKubelet(ctx, host, localConnDialerFactory, prsMap, processMap[KubeletContainerName], certMap, alpineImage); err != nil {
|
if err := runKubelet(ctx, host, localConnDialerFactory, prsMap, processMap[KubeletContainerName], certMap, alpineImage, k8sVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return runKubeproxy(ctx, host, localConnDialerFactory, prsMap, processMap[KubeproxyContainerName], alpineImage)
|
return runKubeproxy(ctx, host, localConnDialerFactory, prsMap, processMap[KubeproxyContainerName], alpineImage, k8sVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isWorkerHostUpgradable(ctx context.Context, host *hosts.Host, processMap map[string]v3.Process) (bool, error) {
|
func isWorkerHostUpgradable(ctx context.Context, host *hosts.Host, processMap map[string]v3.Process, k8sVersion string) (bool, error) {
|
||||||
for _, service := range []string{NginxProxyContainerName, SidekickContainerName, KubeletContainerName, KubeproxyContainerName} {
|
for _, service := range []string{NginxProxyContainerName, SidekickContainerName, KubeletContainerName, KubeproxyContainerName} {
|
||||||
process := processMap[service]
|
process := processMap[service]
|
||||||
imageCfg, hostCfg, _ := GetProcessConfig(process, host)
|
imageCfg, hostCfg, _ := GetProcessConfig(process, host, k8sVersion)
|
||||||
upgradable, err := docker.IsContainerUpgradable(ctx, host.DClient, imageCfg, hostCfg, service, host.Address, WorkerRole)
|
upgradable, err := docker.IsContainerUpgradable(ctx, host.DClient, imageCfg, hostCfg, service, host.Address, WorkerRole)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if client.IsErrNotFound(err) {
|
if client.IsErrNotFound(err) {
|
||||||
|
55
util/util.go
55
util/util.go
@ -11,13 +11,15 @@ import (
|
|||||||
|
|
||||||
"github.com/rancher/rke/metadata"
|
"github.com/rancher/rke/metadata"
|
||||||
|
|
||||||
|
sv "github.com/blang/semver"
|
||||||
"github.com/coreos/go-semver/semver"
|
"github.com/coreos/go-semver/semver"
|
||||||
ref "github.com/docker/distribution/reference"
|
ref "github.com/docker/distribution/reference"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
WorkerThreads = 50
|
WorkerThreads = 50
|
||||||
|
SemVerK8sVersion122OrHigher = ">=1.22.0-rancher0"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ProxyEnvVars = [3]string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}
|
var ProxyEnvVars = [3]string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}
|
||||||
@ -30,6 +32,57 @@ func StrToSemVer(version string) (*semver.Version, error) {
|
|||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SemVerMatchRange(k8sVersion, versionRange string) (bool, error) {
|
||||||
|
if len(k8sVersion) == 0 {
|
||||||
|
return false, fmt.Errorf("Cluster version has zero length")
|
||||||
|
}
|
||||||
|
toMatch, err := sv.Make(k8sVersion[1:])
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("Cluster version [%s] can not be parsed as semver: %v", k8sVersion, err)
|
||||||
|
}
|
||||||
|
semVerRange, err := sv.ParseRange(versionRange)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("Failed to parse semver range [%s]: %v", versionRange, err)
|
||||||
|
}
|
||||||
|
if semVerRange(toMatch) {
|
||||||
|
logrus.Debugf("SemVerMatchRange: Cluster version [%s] matches range [%s]", k8sVersion, versionRange)
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoveZFromBinds(binds []string) []string {
|
||||||
|
var returnSlice []string
|
||||||
|
for _, element := range binds {
|
||||||
|
lastIndex := strings.LastIndex(element, ":")
|
||||||
|
bindOptions := element[lastIndex+1:]
|
||||||
|
|
||||||
|
if bindOptions != "" {
|
||||||
|
var cleanBindOptions []string
|
||||||
|
|
||||||
|
splitBindOptions := strings.Split(bindOptions, ",")
|
||||||
|
if len(splitBindOptions) >= 1 {
|
||||||
|
for _, bindOption := range splitBindOptions {
|
||||||
|
if strings.EqualFold(bindOption, "z") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cleanBindOptions = append(cleanBindOptions, bindOption)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var newString string
|
||||||
|
if len(cleanBindOptions) > 0 {
|
||||||
|
newString = fmt.Sprintf("%s%s", element[:lastIndex], fmt.Sprintf(":%s", strings.Join(cleanBindOptions, ",")))
|
||||||
|
} else {
|
||||||
|
newString = fmt.Sprintf("%s%s", element[:lastIndex], strings.Join(cleanBindOptions, ","))
|
||||||
|
|
||||||
|
}
|
||||||
|
returnSlice = append(returnSlice, newString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return returnSlice
|
||||||
|
}
|
||||||
|
|
||||||
func GetObjectQueue(l interface{}) chan interface{} {
|
func GetObjectQueue(l interface{}) chan interface{} {
|
||||||
s := reflect.ValueOf(l)
|
s := reflect.ValueOf(l)
|
||||||
c := make(chan interface{}, s.Len())
|
c := make(chan interface{}, s.Len())
|
||||||
|
19
util/util_test.go
Normal file
19
util/util_test.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRemoveZFromBinds(t *testing.T) {
|
||||||
|
binds := []string{"/etc/kubernetes:/etc/kubernetes:z", "/var/log/kube-audit:/var/log/kube-audit:rw,z", "/var/lib/test:/var/lib/test:Z,ro", "/usr/local/lib/test:/usr/local/lib/test:ro,z,noexec", "/etc/normalz:/etc/normalz"}
|
||||||
|
expectedBinds := []string{"/etc/kubernetes:/etc/kubernetes", "/var/log/kube-audit:/var/log/kube-audit:rw", "/var/lib/test:/var/lib/test:ro", "/usr/local/lib/test:/usr/local/lib/test:ro,noexec", "/etc/normalz:/etc/normalz"}
|
||||||
|
|
||||||
|
removedBinds := RemoveZFromBinds(binds)
|
||||||
|
assert.ElementsMatch(t, expectedBinds, removedBinds)
|
||||||
|
|
||||||
|
emptyBinds, expectedEmptyBinds := []string{}, []string{}
|
||||||
|
removedEmptyBinds := RemoveZFromBinds(emptyBinds)
|
||||||
|
assert.ElementsMatch(t, expectedEmptyBinds, removedEmptyBinds)
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user