Merge pull request #54160 from mtaufen/runtime-config-to-flags

Automatic merge from submit-queue (batch tested with PRs 54160, 54016). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Move runtime-related flags from KubeletConfiguration to KubeletFlags

With respect to https://github.com/kubernetes/kubernetes/pull/53833#issuecomment-336317287, move runtime-related flags out of KubeletConfiguration.

Broader issue: https://github.com/kubernetes/features/issues/281

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-10-31 01:23:15 -07:00 committed by GitHub
commit 94935721d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 53 additions and 45 deletions

View File

@ -21,6 +21,7 @@ go_library(
"//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library",
"//pkg/kubelet/apis/kubeletconfig/validation:go_default_library",
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/util/taints:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -22,6 +22,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/config"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const (
@ -47,6 +48,7 @@ func NewContainerRuntimeOptions() *config.ContainerRuntimeOptions {
}
return &config.ContainerRuntimeOptions{
ContainerRuntime: kubetypes.DockerContainerRuntime,
DockerEndpoint: dockerEndpoint,
DockershimRootDirectory: "/var/lib/dockershim",
DockerDisableSharedPID: true,

View File

@ -411,7 +411,6 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat
fs.StringVar(&c.CgroupRoot, "cgroup-root", c.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
fs.StringVar(&c.CPUManagerPolicy, "cpu-manager-policy", c.CPUManagerPolicy, "<Warning: Alpha feature> CPU Manager policy to use. Possible values: 'none', 'static'. Default: 'none'")
fs.DurationVar(&c.CPUManagerReconcilePeriod.Duration, "cpu-manager-reconcile-period", c.CPUManagerReconcilePeriod.Duration, "<Warning: Alpha feature> CPU Manager reconciliation period. Examples: '10s', or '1m'. If not supplied, defaults to `NodeStatusUpdateFrequency`")
fs.StringVar(&c.ContainerRuntime, "container-runtime", c.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'.")
fs.DurationVar(&c.RuntimeRequestTimeout.Duration, "runtime-request-timeout", c.RuntimeRequestTimeout.Duration, "Timeout of all runtime requests except long running request - pull, logs, exec and attach. When timeout exceeded, kubelet will cancel the request, throw out an error and retry later.")
fs.StringVar(&c.LockFilePath, "lock-file", c.LockFilePath, "<Warning: Alpha feature> The path to file for kubelet to use as a lock file.")
fs.BoolVar(&c.ExitOnLockContention, "exit-on-lock-contention", c.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.")
@ -435,7 +434,6 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat
fs.Int32Var(&c.KubeAPIBurst, "kube-api-burst", c.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.BoolVar(&c.SerializeImagePulls, "serialize-image-pulls", c.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details.")
fs.StringVar(&c.RuntimeCgroups, "runtime-cgroups", c.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.")
fs.StringVar(&c.EvictionHard, "eviction-hard", c.EvictionHard, "A set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a pod eviction.")
fs.StringVar(&c.EvictionSoft, "eviction-soft", c.EvictionSoft, "A set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a pod eviction.")
fs.StringVar(&c.EvictionSoftGracePeriod, "eviction-soft-grace-period", c.EvictionSoftGracePeriod, "A set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a pod eviction.")

View File

@ -702,6 +702,8 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal.
k, err := builder(kubeCfg,
kubeDeps,
&kubeFlags.ContainerRuntimeOptions,
kubeFlags.ContainerRuntime,
kubeFlags.RuntimeCgroups,
kubeFlags.HostnameOverride,
kubeFlags.NodeIP,
kubeFlags.ProviderID,
@ -769,6 +771,8 @@ func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *kubele
func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeDeps *kubelet.Dependencies,
crOptions *config.ContainerRuntimeOptions,
containerRuntime string,
runtimeCgroups string,
hostnameOverride string,
nodeIP string,
providerID string,
@ -796,6 +800,8 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
k, err = kubelet.NewMainKubelet(kubeCfg,
kubeDeps,
crOptions,
containerRuntime,
runtimeCgroups,
hostnameOverride,
nodeIP,
providerID,
@ -920,7 +926,7 @@ func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConf
}
ds, err := dockershim.NewDockerService(dockerClient, r.PodSandboxImage, streamingConfig, &pluginSettings,
c.RuntimeCgroups, c.CgroupDriver, r.DockershimRootDirectory, r.DockerDisableSharedPID)
f.RuntimeCgroups, c.CgroupDriver, r.DockershimRootDirectory, r.DockerDisableSharedPID)
if err != nil {
return err
}

View File

@ -181,9 +181,6 @@ type KubeletConfiguration struct {
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
// +optional
CgroupDriver string
// Cgroups that container runtime is expected to be isolated in.
// +optional
RuntimeCgroups string
// SystemCgroups is absolute name of cgroups in which to place
// all non-kernel processes that are not already in a container. Empty
// for no container. Rolling back the flag requires a reboot.
@ -193,8 +190,6 @@ type KubeletConfiguration struct {
// If CgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy.
// +optional
CgroupRoot string
// containerRuntime is the container runtime to use.
ContainerRuntime string
// CPUManagerPolicy is the name of the policy to use.
CPUManagerPolicy string
// CPU Manager reconciliation period.

View File

@ -84,9 +84,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
if obj.VolumeStatsAggPeriod == zeroDuration {
obj.VolumeStatsAggPeriod = metav1.Duration{Duration: time.Minute}
}
if obj.ContainerRuntime == "" {
obj.ContainerRuntime = kubetypes.DockerContainerRuntime
}
if obj.RuntimeRequestTimeout == zeroDuration {
obj.RuntimeRequestTimeout = metav1.Duration{Duration: 2 * time.Minute}
}

View File

@ -169,8 +169,6 @@ type KubeletConfiguration struct {
VolumePluginDir string `json:"volumePluginDir"`
// kubeletCgroups is the absolute name of cgroups to isolate the kubelet in.
KubeletCgroups string `json:"kubeletCgroups"`
// runtimeCgroups are cgroups that container runtime is expected to be isolated in.
RuntimeCgroups string `json:"runtimeCgroups"`
// systemCgroups is absolute name of cgroups in which to place
// all non-kernel processes that are not already in a container. Empty
// for no container. Rolling back the flag requires a reboot.
@ -186,8 +184,6 @@ type KubeletConfiguration struct {
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
// +optional
CgroupDriver string `json:"cgroupDriver,omitempty"`
// containerRuntime is the container runtime to use.
ContainerRuntime string `json:"containerRuntime"`
// CPUManagerPolicy is the name of the policy to use.
CPUManagerPolicy string `json:"cpuManagerPolicy"`
// CPU Manager reconciliation period.

View File

@ -211,14 +211,12 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura
out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod
out.VolumePluginDir = in.VolumePluginDir
out.KubeletCgroups = in.KubeletCgroups
out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot
if err := v1.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
return err
}
out.CgroupDriver = in.CgroupDriver
out.ContainerRuntime = in.ContainerRuntime
out.CPUManagerPolicy = in.CPUManagerPolicy
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
@ -355,10 +353,8 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura
return err
}
out.CgroupDriver = in.CgroupDriver
out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot
out.ContainerRuntime = in.ContainerRuntime
out.CPUManagerPolicy = in.CPUManagerPolicy
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout

View File

@ -22,6 +22,14 @@ import (
)
type ContainerRuntimeOptions struct {
// General options.
// ContainerRuntime is the container runtime to use.
ContainerRuntime string
// RuntimeCgroups that container runtime is expected to be isolated in.
RuntimeCgroups string
// Docker-specific options.
// DockershimRootDirectory is the path to the dockershim root directory. Defaults to
@ -72,6 +80,10 @@ type ContainerRuntimeOptions struct {
}
func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) {
// General settings.
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'.")
fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.")
// Docker-specific settings.
fs.BoolVar(&s.ExperimentalDockershim, "experimental-dockershim", s.ExperimentalDockershim, "Enable dockershim only mode. In this mode, kubelet will only start dockershim without any other functionalities. This flag only serves test purpose, please do not use it unless you are conscious of what you are doing. [default=false]")
fs.MarkHidden("experimental-dockershim")

View File

@ -197,6 +197,8 @@ type Bootstrap interface {
type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeDeps *Dependencies,
crOptions *config.ContainerRuntimeOptions,
containerRuntime string,
runtimeCgroups string,
hostnameOverride string,
nodeIP string,
providerID string,
@ -318,6 +320,8 @@ func getRuntimeAndImageServices(remoteRuntimeEndpoint string, remoteImageEndpoin
func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeDeps *Dependencies,
crOptions *config.ContainerRuntimeOptions,
containerRuntime string,
runtimeCgroups string,
hostnameOverride string,
nodeIP string,
providerID string,
@ -504,20 +508,21 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
nodeRef: nodeRef,
nodeLabels: kubeCfg.NodeLabels,
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
os: kubeDeps.OSInterface,
oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer,
maxPods: int(kubeCfg.MaxPods),
podsPerCore: int(kubeCfg.PodsPerCore),
syncLoopMonitor: atomic.Value{},
resolverConfig: kubeCfg.ResolverConfig,
daemonEndpoints: daemonEndpoints,
containerManager: kubeDeps.ContainerManager,
nodeIP: net.ParseIP(nodeIP),
clock: clock.RealClock{},
os: kubeDeps.OSInterface,
oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer,
maxPods: int(kubeCfg.MaxPods),
podsPerCore: int(kubeCfg.PodsPerCore),
syncLoopMonitor: atomic.Value{},
resolverConfig: kubeCfg.ResolverConfig,
daemonEndpoints: daemonEndpoints,
containerManager: kubeDeps.ContainerManager,
containerRuntimeName: containerRuntime,
nodeIP: net.ParseIP(nodeIP),
clock: clock.RealClock{},
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
iptClient: utilipt.New(utilexec.New(), utildbus.New(), utilipt.ProtocolIpv4),
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
@ -539,7 +544,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
glog.Infof("Experimental host user namespace defaulting is enabled.")
}
hairpinMode, err := effectiveHairpinMode(kubeletconfiginternal.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, crOptions.NetworkPluginName)
hairpinMode, err := effectiveHairpinMode(kubeletconfiginternal.HairpinMode(kubeCfg.HairpinMode), containerRuntime, crOptions.NetworkPluginName)
if err != nil {
// This is a non-recoverable error. Returning it up the callstack will just
// lead to retries of the same failure, so just fail hard.
@ -593,7 +598,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
pluginSettings.LegacyRuntimeHost = nl
// rktnetes cannot be run with CRI.
if kubeCfg.ContainerRuntime != kubetypes.RktContainerRuntime {
if containerRuntime != kubetypes.RktContainerRuntime {
// kubelet defers to the runtime shim to setup networking. Setting
// this to nil will prevent it from trying to invoke the plugin.
// It's easier to always probe and initialize plugins till cri
@ -602,12 +607,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
// if left at nil, that means it is unneeded
var legacyLogProvider kuberuntime.LegacyLogProvider
switch kubeCfg.ContainerRuntime {
switch containerRuntime {
case kubetypes.DockerContainerRuntime:
// Create and start the CRI shim running as a grpc server.
streamingConfig := getStreamingConfig(kubeCfg, kubeDeps)
ds, err := dockershim.NewDockerService(kubeDeps.DockerClient, crOptions.PodSandboxImage, streamingConfig,
&pluginSettings, kubeCfg.RuntimeCgroups, kubeCfg.CgroupDriver, crOptions.DockershimRootDirectory,
&pluginSettings, runtimeCgroups, kubeCfg.CgroupDriver, crOptions.DockershimRootDirectory,
crOptions.DockerDisableSharedPID)
if err != nil {
return nil, err
@ -642,7 +647,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
// No-op.
break
default:
return nil, fmt.Errorf("unsupported CRI runtime: %q", kubeCfg.ContainerRuntime)
return nil, fmt.Errorf("unsupported CRI runtime: %q", containerRuntime)
}
runtimeService, imageService, err := getRuntimeAndImageServices(remoteRuntimeEndpoint, remoteImageEndpoint, kubeCfg.RuntimeRequestTimeout)
if err != nil {
@ -681,7 +686,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
// cri-o relies on cadvisor as a temporary workaround. The code should
// be removed. Related issue:
// https://github.com/kubernetes/kubernetes/issues/51798
if (kubeCfg.ContainerRuntime == kubetypes.DockerContainerRuntime &&
if (containerRuntime == kubetypes.DockerContainerRuntime &&
goruntime.GOOS == "linux") || remoteRuntimeEndpoint == "/var/run/crio.sock" {
klet.StatsProvider = stats.NewCadvisorStatsProvider(
klet.cadvisor,
@ -879,11 +884,11 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
opt(klet)
}
klet.appArmorValidator = apparmor.NewValidator(kubeCfg.ContainerRuntime)
klet.appArmorValidator = apparmor.NewValidator(containerRuntime)
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator))
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime))
if utilfeature.DefaultFeatureGate.Enabled(features.Accelerators) {
if kubeCfg.ContainerRuntime == kubetypes.DockerContainerRuntime {
if containerRuntime == kubetypes.DockerContainerRuntime {
if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, kubeDeps.DockerClient); err != nil {
return nil, err
}
@ -1028,6 +1033,9 @@ type Kubelet struct {
// Reference to this node.
nodeRef *v1.ObjectReference
// The name of the container runtime
containerRuntimeName string
// Container runtime.
containerRuntime kubecontainer.Runtime
@ -2126,7 +2134,7 @@ func (kl *Kubelet) updateRuntimeUp() {
}
// rkt uses the legacy, non-CRI integration. Don't check the runtime
// conditions for it.
if kl.kubeletConfiguration.ContainerRuntime != kubetypes.RktContainerRuntime {
if kl.containerRuntimeName != kubetypes.RktContainerRuntime {
if s == nil {
glog.Errorf("Container runtime status is nil")
return

View File

@ -140,9 +140,7 @@ func GetHollowKubeletConfig(
c.ImageGCLowThresholdPercent = 80
c.VolumeStatsAggPeriod.Duration = time.Minute
c.CgroupRoot = ""
c.ContainerRuntime = kubetypes.DockerContainerRuntime
c.CPUCFSQuota = true
c.RuntimeCgroups = ""
c.EnableControllerAttachDetach = false
c.EnableDebuggingHandlers = true
c.EnableServer = true

View File

@ -342,7 +342,6 @@ func recordSystemCgroupProcesses() {
}
cgroups := map[string]string{
"kubelet": cfg.KubeletCgroups,
"runtime": cfg.RuntimeCgroups,
"misc": cfg.SystemCgroups,
}
for name, cgroup := range cgroups {