Merge pull request #36767 from vishh/rename-cgroups-flags

Automatic merge from submit-queue

[kubelet] rename --cgroups-per-qos to --experimental-cgroups-per-qos

This reflects the true nature of "cgroups per qos" feature.

```release-note
 * Rename `--cgroups-per-qos` to `--experimental-cgroups-per-qos` in Kubelet
```
This commit is contained in:
Kubernetes Submit Queue 2016-11-14 17:35:19 -08:00 committed by GitHub
commit 3245e8b355
21 changed files with 38 additions and 38 deletions

View File

@ -194,7 +194,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.MarkDeprecated("system-container", "Use --system-cgroups instead. Will be removed in a future version.") fs.MarkDeprecated("system-container", "Use --system-cgroups instead. Will be removed in a future version.")
fs.StringVar(&s.SystemCgroups, "system-cgroups", s.SystemCgroups, "Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under `/`. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").") fs.StringVar(&s.SystemCgroups, "system-cgroups", s.SystemCgroups, "Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under `/`. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
fs.BoolVar(&s.CgroupsPerQOS, "cgroups-per-qos", s.CgroupsPerQOS, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.") fs.BoolVar(&s.ExperimentalCgroupsPerQOS, "experimental-cgroups-per-qos", s.ExperimentalCgroupsPerQOS, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
fs.StringVar(&s.CgroupDriver, "cgroup-driver", s.CgroupDriver, "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'") fs.StringVar(&s.CgroupDriver, "cgroup-driver", s.CgroupDriver, "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'")
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.") fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.") fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")

View File

@ -442,7 +442,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
SystemCgroupsName: s.SystemCgroups, SystemCgroupsName: s.SystemCgroups,
KubeletCgroupsName: s.KubeletCgroups, KubeletCgroupsName: s.KubeletCgroups,
ContainerRuntime: s.ContainerRuntime, ContainerRuntime: s.ContainerRuntime,
CgroupsPerQOS: s.CgroupsPerQOS, CgroupsPerQOS: s.ExperimentalCgroupsPerQOS,
CgroupRoot: s.CgroupRoot, CgroupRoot: s.CgroupRoot,
CgroupDriver: s.CgroupDriver, CgroupDriver: s.CgroupDriver,
ProtectKernelDefaults: s.ProtectKernelDefaults, ProtectKernelDefaults: s.ProtectKernelDefaults,

View File

@ -59,7 +59,6 @@ cert-dir
certificate-authority certificate-authority
cgroup-driver cgroup-driver
cgroup-root cgroup-root
cgroups-per-qos
chaos-chance chaos-chance
clean-start clean-start
cleanup cleanup
@ -189,6 +188,7 @@ executor-suicide-timeout
exit-on-lock-contention exit-on-lock-contention
experimental-allowed-unsafe-sysctls experimental-allowed-unsafe-sysctls
experimental-bootstrap-kubeconfig experimental-bootstrap-kubeconfig
experimental-cgroups-per-qos
experimental-keystone-url experimental-keystone-url
experimental-keystone-ca-file experimental-keystone-ca-file
experimental-mounter-path experimental-mounter-path

View File

@ -1401,7 +1401,7 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
yyq153[55] = x.CloudProvider != "" yyq153[55] = x.CloudProvider != ""
yyq153[56] = x.CloudConfigFile != "" yyq153[56] = x.CloudConfigFile != ""
yyq153[57] = x.KubeletCgroups != "" yyq153[57] = x.KubeletCgroups != ""
yyq153[58] = x.CgroupsPerQOS != false yyq153[58] = x.ExperimentalCgroupsPerQOS != false
yyq153[59] = x.CgroupDriver != "" yyq153[59] = x.CgroupDriver != ""
yyq153[60] = x.RuntimeCgroups != "" yyq153[60] = x.RuntimeCgroups != ""
yyq153[61] = x.SystemCgroups != "" yyq153[61] = x.SystemCgroups != ""
@ -2647,7 +2647,7 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
_ = yym345 _ = yym345
if false { if false {
} else { } else {
r.EncodeBool(bool(x.CgroupsPerQOS)) r.EncodeBool(bool(x.ExperimentalCgroupsPerQOS))
} }
} else { } else {
r.EncodeBool(false) r.EncodeBool(false)
@ -2655,13 +2655,13 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
} else { } else {
if yyq153[58] { if yyq153[58] {
z.EncSendContainerState(codecSelfer_containerMapKey1234) z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("cgroupsPerQOS")) r.EncodeString(codecSelferC_UTF81234, string("experimentalCgroupsPerQOS"))
z.EncSendContainerState(codecSelfer_containerMapValue1234) z.EncSendContainerState(codecSelfer_containerMapValue1234)
yym346 := z.EncBinary() yym346 := z.EncBinary()
_ = yym346 _ = yym346
if false { if false {
} else { } else {
r.EncodeBool(bool(x.CgroupsPerQOS)) r.EncodeBool(bool(x.ExperimentalCgroupsPerQOS))
} }
} }
} }
@ -4378,11 +4378,11 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode
} else { } else {
x.KubeletCgroups = string(r.DecodeString()) x.KubeletCgroups = string(r.DecodeString())
} }
case "cgroupsPerQOS": case "experimentalCgroupsPerQOS":
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.CgroupsPerQOS = false x.ExperimentalCgroupsPerQOS = false
} else { } else {
x.CgroupsPerQOS = bool(r.DecodeBool()) x.ExperimentalCgroupsPerQOS = bool(r.DecodeBool())
} }
case "cgroupDriver": case "cgroupDriver":
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
@ -5807,9 +5807,9 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco
} }
z.DecSendContainerState(codecSelfer_containerArrayElem1234) z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.CgroupsPerQOS = false x.ExperimentalCgroupsPerQOS = false
} else { } else {
x.CgroupsPerQOS = bool(r.DecodeBool()) x.ExperimentalCgroupsPerQOS = bool(r.DecodeBool())
} }
yyj649++ yyj649++
if yyhl649 { if yyhl649 {

View File

@ -294,7 +294,7 @@ type KubeletConfiguration struct {
// And all Burstable and BestEffort pods are brought up under their // And all Burstable and BestEffort pods are brought up under their
// specific top level QoS cgroup. // specific top level QoS cgroup.
// +optional // +optional
CgroupsPerQOS bool `json:"cgroupsPerQOS,omitempty"` ExperimentalCgroupsPerQOS bool `json:"experimentalCgroupsPerQOS,omitempty"`
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) // driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
// +optional // +optional
CgroupDriver string `json:"cgroupDriver,omitempty"` CgroupDriver string `json:"cgroupDriver,omitempty"`

View File

@ -204,8 +204,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
if obj.CertDirectory == "" { if obj.CertDirectory == "" {
obj.CertDirectory = "/var/run/kubernetes" obj.CertDirectory = "/var/run/kubernetes"
} }
if obj.CgroupsPerQOS == nil { if obj.ExperimentalCgroupsPerQOS == nil {
obj.CgroupsPerQOS = boolVar(false) obj.ExperimentalCgroupsPerQOS = boolVar(false)
} }
if obj.ContainerRuntime == "" { if obj.ContainerRuntime == "" {
obj.ContainerRuntime = "docker" obj.ContainerRuntime = "docker"
@ -391,9 +391,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
temp := int32(defaultIPTablesDropBit) temp := int32(defaultIPTablesDropBit)
obj.IPTablesDropBit = &temp obj.IPTablesDropBit = &temp
} }
if obj.CgroupsPerQOS == nil { if obj.ExperimentalCgroupsPerQOS == nil {
temp := false temp := false
obj.CgroupsPerQOS = &temp obj.ExperimentalCgroupsPerQOS = &temp
} }
if obj.CgroupDriver == "" { if obj.CgroupDriver == "" {
obj.CgroupDriver = "cgroupfs" obj.CgroupDriver = "cgroupfs"
@ -401,8 +401,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
// NOTE: this is for backwards compatibility with earlier releases where cgroup-root was optional. // NOTE: this is for backwards compatibility with earlier releases where cgroup-root was optional.
// if cgroups per qos is not enabled, and cgroup-root is not specified, we need to default to the // if cgroups per qos is not enabled, and cgroup-root is not specified, we need to default to the
// container runtime default and not default to the root cgroup. // container runtime default and not default to the root cgroup.
if obj.CgroupsPerQOS != nil { if obj.ExperimentalCgroupsPerQOS != nil {
if *obj.CgroupsPerQOS { if *obj.ExperimentalCgroupsPerQOS {
if obj.CgroupRoot == "" { if obj.CgroupRoot == "" {
obj.CgroupRoot = "/" obj.CgroupRoot = "/"
} }

View File

@ -355,7 +355,7 @@ type KubeletConfiguration struct {
// And all Burstable and BestEffort pods are brought up under their // And all Burstable and BestEffort pods are brought up under their
// specific top level QoS cgroup. // specific top level QoS cgroup.
// +optional // +optional
CgroupsPerQOS *bool `json:"cgroupsPerQOS,omitempty"` ExperimentalCgroupsPerQOS *bool `json:"experimentalCgroupsPerQOS,omitempty"`
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) // driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
// +optional // +optional
CgroupDriver string `json:"cgroupDriver,omitempty"` CgroupDriver string `json:"cgroupDriver,omitempty"`

View File

@ -330,7 +330,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
out.RuntimeCgroups = in.RuntimeCgroups out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot out.CgroupRoot = in.CgroupRoot
if err := api.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil { if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
return err return err
} }
out.CgroupDriver = in.CgroupDriver out.CgroupDriver = in.CgroupDriver
@ -496,7 +496,7 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
out.CloudProvider = in.CloudProvider out.CloudProvider = in.CloudProvider
out.CloudConfigFile = in.CloudConfigFile out.CloudConfigFile = in.CloudConfigFile
out.KubeletCgroups = in.KubeletCgroups out.KubeletCgroups = in.KubeletCgroups
if err := api.Convert_bool_To_Pointer_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil { if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
return err return err
} }
out.CgroupDriver = in.CgroupDriver out.CgroupDriver = in.CgroupDriver

View File

@ -302,12 +302,12 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
out.RuntimeCgroups = in.RuntimeCgroups out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot out.CgroupRoot = in.CgroupRoot
if in.CgroupsPerQOS != nil { if in.ExperimentalCgroupsPerQOS != nil {
in, out := &in.CgroupsPerQOS, &out.CgroupsPerQOS in, out := &in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS
*out = new(bool) *out = new(bool)
**out = **in **out = **in
} else { } else {
out.CgroupsPerQOS = nil out.ExperimentalCgroupsPerQOS = nil
} }
out.CgroupDriver = in.CgroupDriver out.CgroupDriver = in.CgroupDriver
out.ContainerRuntime = in.ContainerRuntime out.ContainerRuntime = in.ContainerRuntime

View File

@ -308,7 +308,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
out.CloudProvider = in.CloudProvider out.CloudProvider = in.CloudProvider
out.CloudConfigFile = in.CloudConfigFile out.CloudConfigFile = in.CloudConfigFile
out.KubeletCgroups = in.KubeletCgroups out.KubeletCgroups = in.KubeletCgroups
out.CgroupsPerQOS = in.CgroupsPerQOS out.ExperimentalCgroupsPerQOS = in.ExperimentalCgroupsPerQOS
out.CgroupDriver = in.CgroupDriver out.CgroupDriver = in.CgroupDriver
out.RuntimeCgroups = in.RuntimeCgroups out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups out.SystemCgroups = in.SystemCgroups

View File

@ -2600,7 +2600,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
Format: "", Format: "",
}, },
}, },
"cgroupsPerQOS": { "experimentalCgroupsPerQOS": {
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup.", Description: "Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup.",
Type: []string{"boolean"}, Type: []string{"boolean"},
@ -14428,7 +14428,7 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
Format: "", Format: "",
}, },
}, },
"cgroupsPerQOS": { "experimentalCgroupsPerQOS": {
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup.", Description: "Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup.",
Type: []string{"boolean"}, Type: []string{"boolean"},

View File

@ -435,7 +435,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration, nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
os: kubeDeps.OSInterface, os: kubeDeps.OSInterface,
oomWatcher: oomWatcher, oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS, cgroupsPerQOS: kubeCfg.ExperimentalCgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot, cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter, mounter: kubeDeps.Mounter,
writer: kubeDeps.Writer, writer: kubeDeps.Writer,

View File

@ -138,7 +138,7 @@ func GetHollowKubeletConfig(
c.EnableCustomMetrics = false c.EnableCustomMetrics = false
c.EnableDebuggingHandlers = true c.EnableDebuggingHandlers = true
c.EnableServer = true c.EnableServer = true
c.CgroupsPerQOS = false c.ExperimentalCgroupsPerQOS = false
// hairpin-veth is used to allow hairpin packets. Note that this deviates from // hairpin-veth is used to allow hairpin packets. Note that this deviates from
// what the "real" kubelet currently does, because there's no way to // what the "real" kubelet currently does, because there's no way to
// set promiscuous mode on docker0. // set promiscuous mode on docker0.

View File

@ -224,7 +224,7 @@ func RegisterNodeFlags() {
// TODO(random-liu): Find someway to get kubelet configuration, and automatic config and filter test based on the configuration. // TODO(random-liu): Find someway to get kubelet configuration, and automatic config and filter test based on the configuration.
flag.BoolVar(&TestContext.DisableKubenet, "disable-kubenet", false, "If true, start kubelet without kubenet. (default false)") flag.BoolVar(&TestContext.DisableKubenet, "disable-kubenet", false, "If true, start kubelet without kubenet. (default false)")
flag.StringVar(&TestContext.EvictionHard, "eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", "The hard eviction thresholds. If set, pods get evicted when the specified resources drop below the thresholds.") flag.StringVar(&TestContext.EvictionHard, "eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", "The hard eviction thresholds. If set, pods get evicted when the specified resources drop below the thresholds.")
flag.BoolVar(&TestContext.CgroupsPerQOS, "cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.") flag.BoolVar(&TestContext.CgroupsPerQOS, "experimental-cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
flag.StringVar(&TestContext.CgroupDriver, "cgroup-driver", "", "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'") flag.StringVar(&TestContext.CgroupDriver, "cgroup-driver", "", "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'")
flag.StringVar(&TestContext.ManifestPath, "manifest-path", "", "The path to the static pod manifest file.") flag.StringVar(&TestContext.ManifestPath, "manifest-path", "", "The path to the static pod manifest file.")
flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.") flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")

View File

@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]"' GINKGO_FLAGS='--skip="\[Flaky\]"'
SETUP_NODE=false SETUP_NODE=false
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --cgroups-per-qos=true' TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
PARALLELISM=1 PARALLELISM=1

View File

@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"' GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
SETUP_NODE=false SETUP_NODE=false
TEST_ARGS=--cgroups-per-qos=true TEST_ARGS=--experimental-cgroups-per-qos=true
TIMEOUT=1h TIMEOUT=1h

View File

@ -5,4 +5,4 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true CLEANUP=true
GINKGO_FLAGS='--focus="\[Flaky\]"' GINKGO_FLAGS='--focus="\[Flaky\]"'
SETUP_NODE=false SETUP_NODE=false
TEST_ARGS=--cgroups-per-qos=true TEST_ARGS=--experimental-cgroups-per-qos=true

View File

@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-pr-node-e2e
CLEANUP=true CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2' GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
SETUP_NODE=false SETUP_NODE=false
TEST_ARGS=--cgroups-per-qos=true TEST_ARGS=--experimental-cgroups-per-qos=true

View File

@ -5,6 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true CLEANUP=true
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"' GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
SETUP_NODE=false SETUP_NODE=false
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --cgroups-per-qos=true' TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
PARALLELISM=1 PARALLELISM=1
TIMEOUT=3h TIMEOUT=3h

View File

@ -18,4 +18,4 @@ CLEANUP=true
# If true, current user will be added to the docker group on test node # If true, current user will be added to the docker group on test node
SETUP_NODE=false SETUP_NODE=false
# If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run # If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run
TEST_ARGS=--cgroups-per-qos=true TEST_ARGS=--experimental-cgroups-per-qos=true

View File

@ -223,7 +223,7 @@ func (e *E2EServices) startKubelet() (*server, error) {
} }
if framework.TestContext.CgroupsPerQOS { if framework.TestContext.CgroupsPerQOS {
cmdArgs = append(cmdArgs, cmdArgs = append(cmdArgs,
"--cgroups-per-qos", "true", "--experimental-cgroups-per-qos", "true",
"--cgroup-root", "/", "--cgroup-root", "/",
) )
} }