1
0
mirror of https://github.com/rancher/rke.git synced 2025-09-02 07:24:20 +00:00

Configure MCS labels if selinux is enabled

This commit is contained in:
Sebastiaan van Steenis
2019-08-14 12:53:32 +02:00
committed by Alena Prokharchyk
parent 5d8d538ce0
commit ac16bd8b33
10 changed files with 50 additions and 23 deletions

View File

@@ -36,7 +36,6 @@ const (
NetworkConfigurationEnv = "RKE_NETWORK_CONFIGURATION"
EtcdPathPrefix = "/registry"
ContainerNameLabel = "io.rancher.rke.container.name"
CloudConfigSumEnv = "RKE_CLOUD_CONFIG_CHECKSUM"
CloudProviderNameEnv = "RKE_CLOUD_PROVIDER_NAME"
@@ -279,7 +278,7 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOp
HealthCheck: healthCheck,
ImageRegistryAuthConfig: registryAuthConfig,
Labels: map[string]string{
ContainerNameLabel: services.KubeAPIContainerName,
services.ContainerNameLabel: services.KubeAPIContainerName,
},
}
}
@@ -370,7 +369,7 @@ func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, prefixPath string
HealthCheck: healthCheck,
ImageRegistryAuthConfig: registryAuthConfig,
Labels: map[string]string{
ContainerNameLabel: services.KubeControllerContainerName,
services.ContainerNameLabel: services.KubeControllerContainerName,
},
}
}
@@ -553,7 +552,7 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOp
HealthCheck: healthCheck,
ImageRegistryAuthConfig: registryAuthConfig,
Labels: map[string]string{
ContainerNameLabel: services.KubeletContainerName,
services.ContainerNameLabel: services.KubeletContainerName,
},
}
}
@@ -662,7 +661,7 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string, svc
Image: c.Services.Kubeproxy.Image,
ImageRegistryAuthConfig: registryAuthConfig,
Labels: map[string]string{
ContainerNameLabel: services.KubeproxyContainerName,
services.ContainerNameLabel: services.KubeproxyContainerName,
},
}
}
@@ -718,7 +717,7 @@ func (c *Cluster) BuildProxyProcess(host *hosts.Host, prefixPath string) v3.Proc
Image: c.SystemImages.NginxProxy,
ImageRegistryAuthConfig: registryAuthConfig,
Labels: map[string]string{
ContainerNameLabel: services.NginxProxyContainerName,
services.ContainerNameLabel: services.NginxProxyContainerName,
},
}
}
@@ -791,7 +790,7 @@ func (c *Cluster) BuildSchedulerProcess(host *hosts.Host, prefixPath string, svc
HealthCheck: healthCheck,
ImageRegistryAuthConfig: registryAuthConfig,
Labels: map[string]string{
ContainerNameLabel: services.SchedulerContainerName,
services.ContainerNameLabel: services.SchedulerContainerName,
},
}
}
@@ -856,7 +855,7 @@ func (c *Cluster) BuildSidecarProcess(host *hosts.Host, prefixPath string) v3.Pr
HealthCheck: v3.HealthCheck{},
ImageRegistryAuthConfig: registryAuthConfig,
Labels: map[string]string{
ContainerNameLabel: services.SidekickContainerName,
services.ContainerNameLabel: services.SidekickContainerName,
},
Command: Command,
}
@@ -990,7 +989,7 @@ func (c *Cluster) BuildEtcdProcess(host *hosts.Host, etcdHosts []*hosts.Host, pr
HealthCheck: healthCheck,
ImageRegistryAuthConfig: registryAuthConfig,
Labels: map[string]string{
ContainerNameLabel: services.EtcdContainerName,
services.ContainerNameLabel: services.EtcdContainerName,
},
}
}

View File

@@ -506,7 +506,8 @@ func IsContainerUpgradable(ctx context.Context, dClient *client.Client, imageCfg
!sliceEqualsIgnoreOrder(containerInspect.Config.Entrypoint, imageCfg.Entrypoint) ||
!sliceEqualsIgnoreOrder(containerInspect.Config.Cmd, imageCfg.Cmd) ||
!isContainerEnvChanged(containerInspect.Config.Env, imageCfg.Env, imageInspect.Config.Env) ||
!sliceEqualsIgnoreOrder(containerInspect.HostConfig.Binds, hostCfg.Binds) {
!sliceEqualsIgnoreOrder(containerInspect.HostConfig.Binds, hostCfg.Binds) ||
!sliceEqualsIgnoreOrder(containerInspect.HostConfig.SecurityOpt, hostCfg.SecurityOpt) {
logrus.Debugf("[%s] Container [%s] is eligible for upgrade on host [%s]", plane, containerName, hostname)
return true, nil
}
@@ -515,7 +516,11 @@ func IsContainerUpgradable(ctx context.Context, dClient *client.Client, imageCfg
}
func sliceEqualsIgnoreOrder(left, right []string) bool {
return sets.NewString(left...).Equal(sets.NewString(right...))
if equal := sets.NewString(left...).Equal(sets.NewString(right...)); !equal {
logrus.Debugf("slice is not equal, showing data in new value which is not in old value: %v", sets.NewString(right...).Difference(sets.NewString(left...)))
return false
}
return true
}
func IsSupportedDockerVersion(info types.Info, K8sVersion string) (bool, error) {

View File

@@ -55,7 +55,7 @@ func RunEtcdPlane(
if err := setEtcdPermissions(ctx, host, prsMap, alpineImage, etcdProcess); err != nil {
return err
}
imageCfg, hostCfg, _ := GetProcessConfig(etcdProcess)
imageCfg, hostCfg, _ := GetProcessConfig(etcdProcess, host)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, EtcdContainerName, host.Address, ETCDRole, prsMap); err != nil {
return err
}
@@ -84,7 +84,7 @@ func RunEtcdPlane(
clientkey := cert.EncodePrivateKeyPEM(certMap[pki.KubeNodeCertName].Key)
var healthy bool
for _, host := range etcdHosts {
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName])
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName], host)
if healthy = isEtcdHealthy(ctx, localConnDialerFactory, host, clientCert, clientkey, healthCheckURL); healthy {
break
}
@@ -226,7 +226,7 @@ func RemoveEtcdMember(ctx context.Context, etcdHost *hosts.Host, etcdHosts []*ho
}
func ReloadEtcdCluster(ctx context.Context, readyEtcdHosts []*hosts.Host, newHost *hosts.Host, localConnDialerFactory hosts.DialerFactory, cert, key []byte, prsMap map[string]v3.PrivateRegistry, etcdNodePlanMap map[string]v3.RKEConfigNodePlan, alpineImage string) error {
imageCfg, hostCfg, _ := GetProcessConfig(etcdNodePlanMap[newHost.Address].Processes[EtcdContainerName])
imageCfg, hostCfg, _ := GetProcessConfig(etcdNodePlanMap[newHost.Address].Processes[EtcdContainerName], newHost)
if err := setEtcdPermissions(ctx, newHost, prsMap, alpineImage, etcdNodePlanMap[newHost.Address].Processes[EtcdContainerName]); err != nil {
return err
@@ -241,7 +241,7 @@ func ReloadEtcdCluster(ctx context.Context, readyEtcdHosts []*hosts.Host, newHos
time.Sleep(EtcdInitWaitTime * time.Second)
var healthy bool
for _, host := range readyEtcdHosts {
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName])
_, _, healthCheckURL := GetProcessConfig(etcdNodePlanMap[host.Address].Processes[EtcdContainerName], host)
if healthy = isEtcdHealthy(ctx, localConnDialerFactory, host, cert, key, healthCheckURL); healthy {
break
}

View File

@@ -11,7 +11,7 @@ import (
func runKubeAPI(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeAPIProcess v3.Process, alpineImage string, certMap map[string]pki.CertificatePKI) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeAPIProcess)
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeAPIProcess, host)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.Address, ControlRole, prsMap); err != nil {
return err
}

View File

@@ -9,7 +9,7 @@ import (
)
func runKubeController(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, controllerProcess v3.Process, alpineImage string) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(controllerProcess)
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(controllerProcess, host)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.Address, ControlRole, prsMap); err != nil {
return err
}

View File

@@ -10,7 +10,7 @@ import (
)
func runKubelet(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeletProcess v3.Process, certMap map[string]pki.CertificatePKI, alpineImage string) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeletProcess)
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeletProcess, host)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeletContainerName, host.Address, WorkerRole, prsMap); err != nil {
return err
}

View File

@@ -9,7 +9,7 @@ import (
)
func runKubeproxy(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, kubeProxyProcess v3.Process, alpineImage string) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeProxyProcess)
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(kubeProxyProcess, host)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.Address, WorkerRole, prsMap); err != nil {
return err
}

View File

@@ -14,7 +14,7 @@ const (
)
func runNginxProxy(ctx context.Context, host *hosts.Host, prsMap map[string]v3.PrivateRegistry, proxyProcess v3.Process, alpineImage string) error {
imageCfg, hostCfg, _ := GetProcessConfig(proxyProcess)
imageCfg, hostCfg, _ := GetProcessConfig(proxyProcess, host)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole, prsMap); err != nil {
return err
}

View File

@@ -9,7 +9,7 @@ import (
)
func runScheduler(ctx context.Context, host *hosts.Host, df hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, schedulerProcess v3.Process, alpineImage string) error {
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(schedulerProcess)
imageCfg, hostCfg, healthCheckURL := GetProcessConfig(schedulerProcess, host)
if err := docker.DoRunContainer(ctx, host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.Address, ControlRole, prsMap); err != nil {
return err
}

View File

@@ -47,6 +47,9 @@ const (
KubeproxyPort = 10256
WorkerThreads = util.WorkerThreads
ContainerNameLabel = "io.rancher.rke.container.name"
MCSLabel = "label=level:s0:c1000,c1001"
)
type RestartFunc func(context.Context, *hosts.Host) error
@@ -56,7 +59,7 @@ func runSidekick(ctx context.Context, host *hosts.Host, prsMap map[string]v3.Pri
if err != nil {
return err
}
imageCfg, hostCfg, _ := GetProcessConfig(sidecarProcess)
imageCfg, hostCfg, _ := GetProcessConfig(sidecarProcess, host)
isUpgradable := false
if isRunning {
isUpgradable, err = docker.IsContainerUpgradable(ctx, host.DClient, imageCfg, hostCfg, SidekickContainerName, host.Address, SidekickServiceName)
@@ -96,7 +99,7 @@ func removeSidekick(ctx context.Context, host *hosts.Host) error {
return docker.DoRemoveContainer(ctx, host.DClient, SidekickContainerName, host.Address)
}
func GetProcessConfig(process v3.Process) (*container.Config, *container.HostConfig, string) {
func GetProcessConfig(process v3.Process, host *hosts.Host) (*container.Config, *container.HostConfig, string) {
imageCfg := &container.Config{
Entrypoint: process.Command,
Cmd: process.Args,
@@ -119,6 +122,26 @@ func GetProcessConfig(process v3.Process) (*container.Config, *container.HostCon
if len(process.RestartPolicy) > 0 {
hostCfg.RestartPolicy = container.RestartPolicy{Name: process.RestartPolicy}
}
for _, securityOpt := range host.DockerInfo.SecurityOptions {
// If Docker is configured with selinux-enabled:true, we need to specify MCS label to allow files from service-sidekick to be shared between containers
if securityOpt == "selinux" {
logrus.Debugf("Found selinux in DockerInfo.SecurityOptions on host [%s]", host.Address)
// Check for containers having the sidekick container
for _, volumeFrom := range hostCfg.VolumesFrom {
if volumeFrom == SidekickContainerName {
logrus.Debugf("Found [%s] in VolumesFrom on host [%s], applying MCSLabel [%s]", SidekickContainerName, host.Address, MCSLabel)
hostCfg.SecurityOpt = []string{MCSLabel}
}
}
// Check for sidekick container itself
if value, ok := imageCfg.Labels[ContainerNameLabel]; ok {
if value == SidekickContainerName {
logrus.Debugf("Found [%s=%s] in Labels on host [%s], applying MCSLabel [%s]", ContainerNameLabel, SidekickContainerName, host.Address, MCSLabel)
hostCfg.SecurityOpt = []string{MCSLabel}
}
}
}
}
return imageCfg, hostCfg, process.HealthCheck.URL
}