mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Replace --resource-container
and --system-container
with
`--kubelet-cgroups` and `--system-cgroups` respectively. Updated `--runtime-container` to `--runtime-cgroups`. Cleaned up most of the kubelet code that consumes these flags to match the flag name changes. Signed-off-by: Vishnu kannan <vishnuk@google.com>
This commit is contained in:
parent
51e4ccf106
commit
575812787d
@ -111,15 +111,15 @@ func NewKubeletServer() *KubeletServer {
|
||||
RegisterSchedulable: true,
|
||||
RegistryBurst: 10,
|
||||
RegistryPullQPS: 5.0,
|
||||
ResourceContainer: "",
|
||||
KubeletCgroups: "",
|
||||
RktPath: "",
|
||||
RktStage1Image: "",
|
||||
RootDirectory: defaultRootDir,
|
||||
RuntimeContainer: "",
|
||||
RuntimeCgroups: "",
|
||||
SerializeImagePulls: true,
|
||||
StreamingConnectionIdleTimeout: unversioned.Duration{4 * time.Hour},
|
||||
SyncFrequency: unversioned.Duration{1 * time.Minute},
|
||||
SystemContainer: "",
|
||||
SystemCgroups: "",
|
||||
ReconcileCIDR: true,
|
||||
KubeAPIQPS: 5.0,
|
||||
KubeAPIBurst: 10,
|
||||
@ -191,13 +191,20 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&s.VolumePluginDir, "volume-plugin-dir", s.VolumePluginDir, "<Warning: Alpha feature> The full path of the directory in which to search for additional third party volume plugins")
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
|
||||
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Optional absolute name of the resource-only container to create and run the Kubelet in.")
|
||||
|
||||
fs.StringVar(&s.KubeletCgroups, "resource-container", s.KubeletCgroups, "Optional absolute name of the resource-only container to create and run the Kubelet in.")
|
||||
fs.MarkDeprecated("resource-container", "Use --kubelet-cgroups instead. Will be removed in a future version.")
|
||||
fs.StringVar(&s.KubeletCgroups, "kubelet-cgroups", s.KubeletCgroups, "Optional absolute name of cgroups to create and run the Kubelet in.")
|
||||
|
||||
fs.StringVar(&s.SystemCgroups, "system-container", s.SystemCgroups, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
|
||||
fs.MarkDeprecated("system-container", "Use --system-cgroups instead. Will be removed in a future version.")
|
||||
fs.StringVar(&s.SystemCgroups, "system-cgroups", s.SystemCgroups, "Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under `/`. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
|
||||
|
||||
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
|
||||
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
|
||||
fs.StringVar(&s.LockFilePath, "lock-file", s.LockFilePath, "<Warning: Alpha feature> The path to file for kubelet to use as a lock file.")
|
||||
fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'")
|
||||
fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used")
|
||||
fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
|
||||
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
|
||||
fs.BoolVar(&s.HairpinMode, "configure-hairpin-mode", s.HairpinMode, "If true, kubelet will set the hairpin mode flag on container interfaces. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service.")
|
||||
fs.IntVar(&s.MaxPods, "max-pods", s.MaxPods, "Number of Pods that can run on this Kubelet.")
|
||||
@ -222,5 +229,5 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&s.OutOfDiskTransitionFrequency.Duration, "outofdisk-transition-frequency", s.OutOfDiskTransitionFrequency.Duration, "Duration for which the kubelet has to wait before transitioning out of out-of-disk node condition status. Default: 5m0s")
|
||||
fs.StringVar(&s.NodeIP, "node-ip", s.NodeIP, "IP address of the node. If set, kubelet will use this IP address for the node")
|
||||
fs.BoolVar(&s.EnableCustomMetrics, "enable-custom-metrics", s.EnableCustomMetrics, "Support for gathering custom metrics.")
|
||||
fs.StringVar(&s.RuntimeContainer, "runtime-container", s.RuntimeContainer, "Optional absolute name of cgroups to create and run the runtime in.")
|
||||
fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.")
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
|
||||
CPUCFSQuota: s.CPUCFSQuota,
|
||||
DiskSpacePolicy: diskSpacePolicy,
|
||||
DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
|
||||
RuntimeContainer: s.RuntimeContainer,
|
||||
RuntimeCgroups: s.RuntimeCgroups,
|
||||
DockerExecHandler: dockerExecHandler,
|
||||
EnableCustomMetrics: s.EnableCustomMetrics,
|
||||
EnableDebuggingHandlers: s.EnableDebuggingHandlers,
|
||||
@ -236,7 +236,7 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
|
||||
RegistryPullQPS: s.RegistryPullQPS,
|
||||
ResolverConfig: s.ResolverConfig,
|
||||
Reservation: *reservation,
|
||||
ResourceContainer: s.ResourceContainer,
|
||||
KubeletCgroups: s.KubeletCgroups,
|
||||
RktPath: s.RktPath,
|
||||
RktStage1Image: s.RktStage1Image,
|
||||
RootDirectory: s.RootDirectory,
|
||||
@ -245,7 +245,7 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
|
||||
StandaloneMode: (len(s.APIServerList) == 0),
|
||||
StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout.Duration,
|
||||
SyncFrequency: s.SyncFrequency.Duration,
|
||||
SystemContainer: s.SystemContainer,
|
||||
SystemCgroups: s.SystemCgroups,
|
||||
TLSOptions: tlsOptions,
|
||||
Writer: writer,
|
||||
VolumePlugins: ProbeVolumePlugins(s.VolumePluginDir),
|
||||
@ -306,15 +306,15 @@ func Run(s *options.KubeletServer, kcfg *KubeletConfig) error {
|
||||
}
|
||||
|
||||
if kcfg.ContainerManager == nil {
|
||||
if kcfg.SystemContainer != "" && kcfg.CgroupRoot == "" {
|
||||
if kcfg.SystemCgroups != "" && kcfg.CgroupRoot == "" {
|
||||
return fmt.Errorf("invalid configuration: system container was specified and cgroup root was not specified")
|
||||
}
|
||||
|
||||
kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, kcfg.CAdvisorInterface, cm.NodeConfig{
|
||||
RuntimeContainerName: kcfg.RuntimeContainer,
|
||||
SystemContainerName: kcfg.SystemContainer,
|
||||
KubeletContainerName: kcfg.ResourceContainer,
|
||||
ContainerRuntime: kcfg.ContainerRuntime,
|
||||
RuntimeCgroupsName: kcfg.RuntimeCgroups,
|
||||
SystemCgroupsName: kcfg.SystemCgroups,
|
||||
KubeletCgroupsName: kcfg.KubeletCgroups,
|
||||
ContainerRuntime: kcfg.ContainerRuntime,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -510,7 +510,7 @@ func SimpleKubelet(client *clientset.Clientset,
|
||||
CPUCFSQuota: true,
|
||||
DiskSpacePolicy: diskSpacePolicy,
|
||||
DockerClient: dockerClient,
|
||||
RuntimeContainer: "",
|
||||
RuntimeCgroups: "",
|
||||
DockerExecHandler: &dockertools.NativeExecHandler{},
|
||||
EnableCustomMetrics: false,
|
||||
EnableDebuggingHandlers: true,
|
||||
@ -539,11 +539,11 @@ func SimpleKubelet(client *clientset.Clientset,
|
||||
RegistryBurst: 10,
|
||||
RegistryPullQPS: 5.0,
|
||||
ResolverConfig: kubetypes.ResolvConfDefault,
|
||||
ResourceContainer: "/kubelet",
|
||||
KubeletCgroups: "/kubelet",
|
||||
RootDirectory: rootDir,
|
||||
SerializeImagePulls: true,
|
||||
SyncFrequency: syncFrequency,
|
||||
SystemContainer: "",
|
||||
SystemCgroups: "",
|
||||
TLSOptions: tlsOptions,
|
||||
VolumePlugins: volumePlugins,
|
||||
Writer: &io.StdWriter{},
|
||||
@ -686,7 +686,7 @@ type KubeletConfig struct {
|
||||
CPUCFSQuota bool
|
||||
DiskSpacePolicy kubelet.DiskSpacePolicy
|
||||
DockerClient dockertools.DockerInterface
|
||||
RuntimeContainer string
|
||||
RuntimeCgroups string
|
||||
DockerExecHandler dockertools.ExecHandler
|
||||
EnableCustomMetrics bool
|
||||
EnableDebuggingHandlers bool
|
||||
@ -733,7 +733,7 @@ type KubeletConfig struct {
|
||||
RegistryPullQPS float64
|
||||
Reservation kubetypes.Reservation
|
||||
ResolverConfig string
|
||||
ResourceContainer string
|
||||
KubeletCgroups string
|
||||
RktPath string
|
||||
RktStage1Image string
|
||||
RootDirectory string
|
||||
@ -742,7 +742,7 @@ type KubeletConfig struct {
|
||||
StandaloneMode bool
|
||||
StreamingConnectionIdleTimeout time.Duration
|
||||
SyncFrequency time.Duration
|
||||
SystemContainer string
|
||||
SystemCgroups string
|
||||
TLSOptions *server.TLSOptions
|
||||
Writer io.Writer
|
||||
VolumePlugins []volume.VolumePlugin
|
||||
|
@ -180,7 +180,7 @@ func (s *KubeletExecutorServer) runKubelet(
|
||||
|
||||
return decorated, pc, nil
|
||||
}
|
||||
kcfg.RuntimeContainer = "" // don't move the docker daemon into a cgroup
|
||||
kcfg.RuntimeCgroups = "" // don't move the docker daemon into a cgroup
|
||||
kcfg.Hostname = kcfg.HostnameOverride
|
||||
kcfg.KubeClient = apiclient
|
||||
|
||||
@ -201,7 +201,7 @@ func (s *KubeletExecutorServer) runKubelet(
|
||||
kcfg.NodeName = kcfg.HostnameOverride
|
||||
kcfg.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kcfg.Recorder) // override the default pod source
|
||||
kcfg.StandaloneMode = false
|
||||
kcfg.SystemContainer = "" // don't take control over other system processes.
|
||||
kcfg.SystemCgroups = "" // don't take control over other system processes.
|
||||
if kcfg.Cloud != nil {
|
||||
// fail early and hard because having the cloud provider loaded would go unnoticed,
|
||||
// but break bigger cluster because accessing the state.json from every slave kills the master.
|
||||
@ -217,10 +217,10 @@ func (s *KubeletExecutorServer) runKubelet(
|
||||
|
||||
kcfg.CAdvisorInterface = cAdvisorInterface
|
||||
kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, cAdvisorInterface, cm.NodeConfig{
|
||||
RuntimeContainerName: kcfg.RuntimeContainer,
|
||||
SystemContainerName: kcfg.SystemContainer,
|
||||
KubeletContainerName: kcfg.ResourceContainer,
|
||||
ContainerRuntime: kcfg.ContainerRuntime,
|
||||
RuntimeCgroupsName: kcfg.RuntimeCgroups,
|
||||
SystemCgroupsName: kcfg.SystemCgroups,
|
||||
KubeletCgroupsName: kcfg.KubeletCgroups,
|
||||
ContainerRuntime: kcfg.ContainerRuntime,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -171,6 +171,7 @@ kubectl-path
|
||||
kubelet-address
|
||||
kubelet-cadvisor-port
|
||||
kubelet-certificate-authority
|
||||
kubelet-cgroups
|
||||
kubelet-client-certificate
|
||||
kubelet-client-key
|
||||
kubelet-docker-endpoint
|
||||
@ -311,7 +312,7 @@ root-ca-file
|
||||
root-dir
|
||||
run-proxy
|
||||
runtime-config
|
||||
runtime-container
|
||||
runtime-cgroups
|
||||
save-config
|
||||
scheduler-config
|
||||
scheduler-name
|
||||
@ -348,6 +349,7 @@ storage-versions
|
||||
streaming-connection-idle-timeout
|
||||
suicide-timeout
|
||||
sync-frequency
|
||||
system-cgroups
|
||||
system-container
|
||||
system-reserved
|
||||
target-port
|
||||
|
@ -52,9 +52,9 @@ type KubeProxyConfiguration struct {
|
||||
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
||||
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
||||
PortRange string `json:"portRange"`
|
||||
// resourceContainer is the bsolute name of the resource-only container to create and run
|
||||
// resourceContainer is the absolute name of the resource-only container to create and run
|
||||
// the Kube-proxy in (Default: /kube-proxy).
|
||||
ResourceContainer string `json:"resourceContainer"`
|
||||
ResourceContainer string `json:"kubeletCgroups"`
|
||||
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
||||
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"`
|
||||
@ -223,9 +223,14 @@ type KubeletConfiguration struct {
|
||||
CloudProvider string `json:"cloudProvider,omitempty"`
|
||||
// cloudConfigFile is the path to the cloud provider configuration file.
|
||||
CloudConfigFile string `json:"cloudConfigFile,omitempty"`
|
||||
// resourceContainer is the absolute name of the resource-only container
|
||||
// to create and run the Kubelet in.
|
||||
ResourceContainer string `json:"resourceContainer,omitempty"`
|
||||
// KubeletCgroups is the absolute name of cgroups to isolate the kubelet in.
|
||||
KubeletCgroups string `json:"kubeletCgroups,omitempty"`
|
||||
// Cgroups that container runtime is expected to be isolated in.
|
||||
RuntimeCgroups string `json:"runtimeCgroups,omitempty"`
|
||||
// SystemCgroups is absolute name of cgroups in which to place
|
||||
// all non-kernel processes that are not already in a container. Empty
|
||||
// for no container. Rolling back the flag requires a reboot.
|
||||
SystemCgroups string `json:"systemContainer,omitempty"`
|
||||
// cgroupRoot is the root cgroup to use for pods. This is handled by the
|
||||
// container runtime on a best effort basis.
|
||||
CgroupRoot string `json:"cgroupRoot,omitempty"`
|
||||
@ -241,10 +246,6 @@ type KubeletConfiguration struct {
|
||||
// rktStage1Image is the image to use as stage1. Local paths and
|
||||
// http/https URLs are supported.
|
||||
RktStage1Image string `json:"rktStage1Image,omitempty"`
|
||||
// systemContainer is the resource-only container in which to place
|
||||
// all non-kernel processes that are not already in a container. Empty
|
||||
// for no container. Rolling back the flag requires a reboot.
|
||||
SystemContainer string `json:"systemContainer"`
|
||||
// configureCBR0 enables the kublet to configure cbr0 based on
|
||||
// Node.Spec.PodCIDR.
|
||||
ConfigureCBR0 bool `json:"configureCbr0"`
|
||||
@ -304,8 +305,6 @@ type KubeletConfiguration struct {
|
||||
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"`
|
||||
// enable gathering custom metrics.
|
||||
EnableCustomMetrics bool `json:"enableCustomMetrics"`
|
||||
// The cgroup that container runtime is expected to be isolated in.
|
||||
RuntimeContainer string `json:"runtimeContainer,omitempty"`
|
||||
}
|
||||
|
||||
type KubeSchedulerConfiguration struct {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -27,17 +27,17 @@ type ContainerManager interface {
|
||||
// - Creates the system container where all non-containerized processes run.
|
||||
Start() error
|
||||
|
||||
// Returns resources allocated to system containers in the machine.
|
||||
// These containers include the system and Kubernetes services.
|
||||
SystemContainersLimit() api.ResourceList
|
||||
// Returns resources allocated to system cgroups in the machine.
|
||||
// These cgroups include the system and Kubernetes services.
|
||||
SystemCgroupsLimit() api.ResourceList
|
||||
|
||||
// Returns a NodeConfig that is being used by the container manager.
|
||||
GetNodeConfig() NodeConfig
|
||||
}
|
||||
|
||||
type NodeConfig struct {
|
||||
RuntimeContainerName string
|
||||
SystemContainerName string
|
||||
KubeletContainerName string
|
||||
ContainerRuntime string
|
||||
RuntimeCgroupsName string
|
||||
SystemCgroupsName string
|
||||
KubeletCgroupsName string
|
||||
ContainerRuntime string
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ type systemContainer struct {
|
||||
manager *fs.Manager
|
||||
}
|
||||
|
||||
func newSystemContainer(containerName string) *systemContainer {
|
||||
func newSystemCgroups(containerName string) *systemContainer {
|
||||
return &systemContainer{
|
||||
name: containerName,
|
||||
manager: createManager(containerName),
|
||||
@ -193,8 +193,8 @@ func (cm *containerManagerImpl) setupNode() error {
|
||||
|
||||
systemContainers := []*systemContainer{}
|
||||
if cm.ContainerRuntime == "docker" {
|
||||
if cm.RuntimeContainerName != "" {
|
||||
cont := newSystemContainer(cm.RuntimeContainerName)
|
||||
if cm.RuntimeCgroupsName != "" {
|
||||
cont := newSystemCgroups(cm.RuntimeCgroupsName)
|
||||
info, err := cm.cadvisorInterface.MachineInfo()
|
||||
var capacity = api.ResourceList{}
|
||||
if err != nil {
|
||||
@ -203,16 +203,16 @@ func (cm *containerManagerImpl) setupNode() error {
|
||||
}
|
||||
memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100))
|
||||
if memoryLimit < MinDockerMemoryLimit {
|
||||
glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.RuntimeContainerName, MinDockerMemoryLimit)
|
||||
glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.RuntimeCgroupsName, MinDockerMemoryLimit)
|
||||
memoryLimit = MinDockerMemoryLimit
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.RuntimeContainerName, memoryLimit)
|
||||
glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.RuntimeCgroupsName, memoryLimit)
|
||||
|
||||
dockerContainer := &fs.Manager{
|
||||
Cgroups: &configs.Cgroup{
|
||||
Parent: "/",
|
||||
Name: cm.RuntimeContainerName,
|
||||
Name: cm.RuntimeCgroupsName,
|
||||
Resources: &configs.Resources{
|
||||
Memory: memoryLimit,
|
||||
MemorySwap: -1,
|
||||
@ -229,16 +229,16 @@ func (cm *containerManagerImpl) setupNode() error {
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
} else {
|
||||
cm.RuntimeContainerName = cont
|
||||
cm.RuntimeCgroupsName = cont
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cm.SystemContainerName != "" {
|
||||
if cm.SystemContainerName == "/" {
|
||||
if cm.SystemCgroupsName != "" {
|
||||
if cm.SystemCgroupsName == "/" {
|
||||
return fmt.Errorf("system container cannot be root (\"/\")")
|
||||
}
|
||||
cont := newSystemContainer(cm.SystemContainerName)
|
||||
cont := newSystemCgroups(cm.SystemCgroupsName)
|
||||
rootContainer := &fs.Manager{
|
||||
Cgroups: &configs.Cgroup{
|
||||
Parent: "/",
|
||||
@ -246,17 +246,17 @@ func (cm *containerManagerImpl) setupNode() error {
|
||||
},
|
||||
}
|
||||
cont.ensureStateFunc = func(manager *fs.Manager) error {
|
||||
return ensureSystemContainer(rootContainer, manager)
|
||||
return ensureSystemCgroups(rootContainer, manager)
|
||||
}
|
||||
systemContainers = append(systemContainers, cont)
|
||||
}
|
||||
|
||||
if cm.KubeletContainerName != "" {
|
||||
cont := newSystemContainer(cm.KubeletContainerName)
|
||||
if cm.KubeletCgroupsName != "" {
|
||||
cont := newSystemCgroups(cm.KubeletCgroupsName)
|
||||
manager := fs.Manager{
|
||||
Cgroups: &configs.Cgroup{
|
||||
Parent: "/",
|
||||
Name: cm.KubeletContainerName,
|
||||
Name: cm.KubeletCgroupsName,
|
||||
Resources: &configs.Resources{
|
||||
AllowAllDevices: true,
|
||||
},
|
||||
@ -271,7 +271,7 @@ func (cm *containerManagerImpl) setupNode() error {
|
||||
if err != nil {
|
||||
glog.Error("failed to find cgroups of kubelet - %v", err)
|
||||
} else {
|
||||
cm.KubeletContainerName = cont
|
||||
cm.KubeletCgroupsName = cont
|
||||
}
|
||||
}
|
||||
|
||||
@ -328,7 +328,7 @@ func (cm *containerManagerImpl) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) SystemContainersLimit() api.ResourceList {
|
||||
func (cm *containerManagerImpl) SystemCgroupsLimit() api.ResourceList {
|
||||
cpuLimit := int64(0)
|
||||
|
||||
// Sum up resources of all external containers.
|
||||
@ -435,7 +435,7 @@ func getContainer(pid int) (string, error) {
|
||||
// The reason of leaving kernel threads at root cgroup is that we don't want to tie the
|
||||
// execution of these threads with to-be defined /system quota and create priority inversions.
|
||||
//
|
||||
func ensureSystemContainer(rootContainer *fs.Manager, manager *fs.Manager) error {
|
||||
func ensureSystemCgroups(rootContainer *fs.Manager, manager *fs.Manager) error {
|
||||
// Move non-kernel PIDs to the system container.
|
||||
attemptsRemaining := 10
|
||||
var errs []error
|
||||
|
@ -30,7 +30,7 @@ func (cm *containerManagerStub) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) SystemContainersLimit() api.ResourceList {
|
||||
func (cm *containerManagerStub) SystemCgroupsLimit() api.ResourceList {
|
||||
return api.ResourceList{}
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ func (unsupportedContainerManager) Start() error {
|
||||
return fmt.Errorf("Container Manager is unsupported in this build")
|
||||
}
|
||||
|
||||
func (unsupportedContainerManager) SystemContainersLimit() api.ResourceList {
|
||||
func (unsupportedContainerManager) SystemCgroupsLimit() api.ResourceList {
|
||||
return api.ResourceList{}
|
||||
}
|
||||
|
||||
|
@ -119,9 +119,9 @@ func (sb *summaryBuilder) build() (*Summary, error) {
|
||||
}
|
||||
|
||||
systemContainers := map[string]string{
|
||||
SystemContainerKubelet: sb.nodeConfig.KubeletContainerName,
|
||||
SystemContainerRuntime: sb.nodeConfig.RuntimeContainerName,
|
||||
SystemContainerMisc: sb.nodeConfig.SystemContainerName,
|
||||
SystemContainerKubelet: sb.nodeConfig.KubeletCgroupsName,
|
||||
SystemContainerRuntime: sb.nodeConfig.RuntimeCgroupsName,
|
||||
SystemContainerMisc: sb.nodeConfig.SystemCgroupsName,
|
||||
}
|
||||
for sys, name := range systemContainers {
|
||||
if info, ok := sb.infos[name]; ok {
|
||||
|
@ -48,9 +48,9 @@ func TestBuildSummary(t *testing.T) {
|
||||
node := api.Node{}
|
||||
node.Name = "FooNode"
|
||||
nodeConfig := cm.NodeConfig{
|
||||
RuntimeContainerName: "/docker-daemon",
|
||||
SystemContainerName: "/system",
|
||||
KubeletContainerName: "/kubelet",
|
||||
RuntimeCgroupsName: "/docker-daemon",
|
||||
SystemCgroupsName: "/system",
|
||||
KubeletCgroupsName: "/kubelet",
|
||||
}
|
||||
const (
|
||||
namespace0 = "test0"
|
||||
|
Loading…
Reference in New Issue
Block a user