mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #39972 from derekwaynecarr/pod-cgroups-default
Automatic merge from submit-queue (batch tested with PRs 40289, 40877, 40879, 39972, 40942) Rename experimental-cgroups-per-pod flag **What this PR does / why we need it**: 1. Rename `experimental-cgroups-per-qos` to `cgroups-per-qos` 1. Update hack/local-up-cluster to match `CGROUP_DRIVER` with docker runtime if used. **Special notes for your reviewer**: We plan to roll this feature out in the upcoming release. Previous node e2e runs were running with this feature on by default. We will default this feature on for all e2es next week. **Release note**: ```release-note Rename --experiemental-cgroups-per-qos to --cgroups-per-qos ```
This commit is contained in:
commit
a777a8e3ba
2
Makefile
2
Makefile
@ -233,7 +233,7 @@ define TEST_E2E_NODE_HELP_INFO
|
||||
# Example:
|
||||
# make test-e2e-node FOCUS=Kubelet SKIP=container
|
||||
# make test-e2e-node REMOTE=true DELETE_INSTANCES=true
|
||||
# make test-e2e-node TEST_ARGS="--experimental-cgroups-per-qos=true"
|
||||
# make test-e2e-node TEST_ARGS="--cgroups-per-qos=true"
|
||||
# Build and run tests.
|
||||
endef
|
||||
.PHONY: test-e2e-node
|
||||
|
@ -196,7 +196,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.MarkDeprecated("system-container", "Use --system-cgroups instead. Will be removed in a future version.")
|
||||
fs.StringVar(&s.SystemCgroups, "system-cgroups", s.SystemCgroups, "Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under `/`. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
|
||||
|
||||
fs.BoolVar(&s.ExperimentalCgroupsPerQOS, "experimental-cgroups-per-qos", s.ExperimentalCgroupsPerQOS, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
fs.BoolVar(&s.CgroupsPerQOS, "cgroups-per-qos", s.CgroupsPerQOS, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||
fs.StringVar(&s.CgroupDriver, "cgroup-driver", s.CgroupDriver, "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'")
|
||||
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
|
||||
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
|
||||
|
@ -460,7 +460,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
|
||||
SystemCgroupsName: s.SystemCgroups,
|
||||
KubeletCgroupsName: s.KubeletCgroups,
|
||||
ContainerRuntime: s.ContainerRuntime,
|
||||
CgroupsPerQOS: s.ExperimentalCgroupsPerQOS,
|
||||
CgroupsPerQOS: s.CgroupsPerQOS,
|
||||
CgroupRoot: s.CgroupRoot,
|
||||
CgroupDriver: s.CgroupDriver,
|
||||
ProtectKernelDefaults: s.ProtectKernelDefaults,
|
||||
|
@ -34,7 +34,7 @@ NET_PLUGIN=${NET_PLUGIN:-""}
|
||||
NET_PLUGIN_DIR=${NET_PLUGIN_DIR:-""}
|
||||
SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/24}
|
||||
# if enabled, must set CGROUP_ROOT
|
||||
EXPERIMENTAL_CGROUPS_PER_QOS=${EXPERIMENTAL_CGROUPS_PER_QOS:-false}
|
||||
CGROUPS_PER_QOS=${CGROUPS_PER_QOS:-false}
|
||||
# this is not defaulted to preserve backward compatibility.
|
||||
# if EXPERIMENTAL_CGROUPS_PER_QOS is enabled, recommend setting to /
|
||||
CGROUP_ROOT=${CGROUP_ROOT:-""}
|
||||
@ -189,6 +189,17 @@ CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
|
||||
ROOT_CA_FILE=$CERT_DIR/apiserver.crt
|
||||
EXPERIMENTAL_CRI=${EXPERIMENTAL_CRI:-"false"}
|
||||
|
||||
# name of the cgroup driver, i.e. cgroupfs or systemd
|
||||
if [[ ${CONTAINER_RUNTIME} == "docker" ]]; then
|
||||
# default cgroup driver to match what is reported by docker to simplify local development
|
||||
if [[ -z ${CGROUP_DRIVER} ]]; then
|
||||
# match driver with docker runtime reported value (they must match)
|
||||
CGROUP_DRIVER=$(docker info | grep "Cgroup Driver:" | cut -f3- -d' ')
|
||||
echo "Kubelet cgroup driver defaulted to use: ${CGROUP_DRIVER}"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
|
||||
# Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
|
||||
mkdir -p "${CERT_DIR}" &>/dev/null || sudo mkdir -p "${CERT_DIR}"
|
||||
@ -542,7 +553,7 @@ function start_kubelet {
|
||||
--feature-gates="${FEATURE_GATES}" \
|
||||
--cpu-cfs-quota=${CPU_CFS_QUOTA} \
|
||||
--enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" \
|
||||
--experimental-cgroups-per-qos=${EXPERIMENTAL_CGROUPS_PER_QOS} \
|
||||
--cgroups-per-qos=${CGROUPS_PER_QOS} \
|
||||
--cgroup-driver=${CGROUP_DRIVER} \
|
||||
--cgroup-root=${CGROUP_ROOT} \
|
||||
--keep-terminated-pod-volumes=true \
|
||||
|
@ -63,6 +63,7 @@ cert-dir
|
||||
certificate-authority
|
||||
cgroup-driver
|
||||
cgroup-root
|
||||
cgroups-per-qos
|
||||
chaos-chance
|
||||
clean-start
|
||||
cleanup
|
||||
@ -197,7 +198,6 @@ executor-suicide-timeout
|
||||
exit-on-lock-contention
|
||||
experimental-allowed-unsafe-sysctls
|
||||
experimental-bootstrap-kubeconfig
|
||||
experimental-cgroups-per-qos
|
||||
experimental-keystone-url
|
||||
experimental-keystone-ca-file
|
||||
experimental-mounter-path
|
||||
|
@ -298,7 +298,7 @@ type KubeletConfiguration struct {
|
||||
// And all Burstable and BestEffort pods are brought up under their
|
||||
// specific top level QoS cgroup.
|
||||
// +optional
|
||||
ExperimentalCgroupsPerQOS bool
|
||||
CgroupsPerQOS bool
|
||||
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
|
||||
// +optional
|
||||
CgroupDriver string
|
||||
@ -311,7 +311,7 @@ type KubeletConfiguration struct {
|
||||
// +optional
|
||||
SystemCgroups string
|
||||
// CgroupRoot is the root cgroup to use for pods.
|
||||
// If ExperimentalCgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy.
|
||||
// If CgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy.
|
||||
// +optional
|
||||
CgroupRoot string
|
||||
// containerRuntime is the container runtime to use.
|
||||
|
@ -204,8 +204,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
||||
if obj.CertDirectory == "" {
|
||||
obj.CertDirectory = "/var/run/kubernetes"
|
||||
}
|
||||
if obj.ExperimentalCgroupsPerQOS == nil {
|
||||
obj.ExperimentalCgroupsPerQOS = boolVar(false)
|
||||
if obj.CgroupsPerQOS == nil {
|
||||
obj.CgroupsPerQOS = boolVar(false)
|
||||
}
|
||||
if obj.ContainerRuntime == "" {
|
||||
obj.ContainerRuntime = "docker"
|
||||
@ -394,9 +394,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
||||
temp := int32(defaultIPTablesDropBit)
|
||||
obj.IPTablesDropBit = &temp
|
||||
}
|
||||
if obj.ExperimentalCgroupsPerQOS == nil {
|
||||
if obj.CgroupsPerQOS == nil {
|
||||
temp := false
|
||||
obj.ExperimentalCgroupsPerQOS = &temp
|
||||
obj.CgroupsPerQOS = &temp
|
||||
}
|
||||
if obj.CgroupDriver == "" {
|
||||
obj.CgroupDriver = "cgroupfs"
|
||||
@ -404,8 +404,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
||||
// NOTE: this is for backwards compatibility with earlier releases where cgroup-root was optional.
|
||||
// if cgroups per qos is not enabled, and cgroup-root is not specified, we need to default to the
|
||||
// container runtime default and not default to the root cgroup.
|
||||
if obj.ExperimentalCgroupsPerQOS != nil {
|
||||
if *obj.ExperimentalCgroupsPerQOS {
|
||||
if obj.CgroupsPerQOS != nil {
|
||||
if *obj.CgroupsPerQOS {
|
||||
if obj.CgroupRoot == "" {
|
||||
obj.CgroupRoot = "/"
|
||||
}
|
||||
|
@ -359,7 +359,7 @@ type KubeletConfiguration struct {
|
||||
// And all Burstable and BestEffort pods are brought up under their
|
||||
// specific top level QoS cgroup.
|
||||
// +optional
|
||||
ExperimentalCgroupsPerQOS *bool `json:"experimentalCgroupsPerQOS,omitempty"`
|
||||
CgroupsPerQOS *bool `json:"cgroupsPerQOS,omitempty"`
|
||||
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
|
||||
// +optional
|
||||
CgroupDriver string `json:"cgroupDriver,omitempty"`
|
||||
|
@ -400,7 +400,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
|
||||
out.RuntimeCgroups = in.RuntimeCgroups
|
||||
out.SystemCgroups = in.SystemCgroups
|
||||
out.CgroupRoot = in.CgroupRoot
|
||||
if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
|
||||
if err := api.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
@ -569,7 +569,7 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
|
||||
out.CloudProvider = in.CloudProvider
|
||||
out.CloudConfigFile = in.CloudConfigFile
|
||||
out.KubeletCgroups = in.KubeletCgroups
|
||||
if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
|
||||
if err := api.Convert_bool_To_Pointer_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
|
@ -229,8 +229,8 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.ExperimentalCgroupsPerQOS != nil {
|
||||
in, out := &in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS
|
||||
if in.CgroupsPerQOS != nil {
|
||||
in, out := &in.CgroupsPerQOS, &out.CgroupsPerQOS
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
|
@ -10168,7 +10168,7 @@ func GetOpenAPIDefinitions(ref openapi.ReferenceCallback) map[string]openapi.Ope
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"experimentalCgroupsPerQOS": {
|
||||
"cgroupsPerQOS": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup.",
|
||||
Type: []string{"boolean"},
|
||||
|
@ -209,7 +209,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
if nodeConfig.CgroupsPerQOS {
|
||||
// this does default to / when enabled, but this tests against regressions.
|
||||
if nodeConfig.CgroupRoot == "" {
|
||||
return nil, fmt.Errorf("invalid configuration: experimental-cgroups-per-qos was specified and cgroup-root was not specified. To enable the QoS cgroup hierarchy you need to specify a valid cgroup-root")
|
||||
return nil, fmt.Errorf("invalid configuration: cgroups-per-qos was specified and cgroup-root was not specified. To enable the QoS cgroup hierarchy you need to specify a valid cgroup-root")
|
||||
}
|
||||
|
||||
// we need to check that the cgroup root actually exists for each subsystem
|
||||
|
@ -453,7 +453,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
||||
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
|
||||
os: kubeDeps.OSInterface,
|
||||
oomWatcher: oomWatcher,
|
||||
cgroupsPerQOS: kubeCfg.ExperimentalCgroupsPerQOS,
|
||||
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
|
||||
cgroupRoot: kubeCfg.CgroupRoot,
|
||||
mounter: kubeDeps.Mounter,
|
||||
writer: kubeDeps.Writer,
|
||||
|
@ -142,7 +142,7 @@ func GetHollowKubeletConfig(
|
||||
c.EnableCustomMetrics = false
|
||||
c.EnableDebuggingHandlers = true
|
||||
c.EnableServer = true
|
||||
c.ExperimentalCgroupsPerQOS = false
|
||||
c.CgroupsPerQOS = false
|
||||
// hairpin-veth is used to allow hairpin packets. Note that this deviates from
|
||||
// what the "real" kubelet currently does, because there's no way to
|
||||
// set promiscuous mode on docker0.
|
||||
|
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
Describe("QOS containers", func() {
|
||||
Context("On enabling QOS cgroup hierarchy", func() {
|
||||
It("Top level QoS containers should have been created", func() {
|
||||
if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
cgroupsToVerify := []cm.CgroupName{cm.CgroupName(v1.PodQOSBurstable), cm.CgroupName(v1.PodQOSBestEffort)}
|
||||
@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
Describe("Pod containers", func() {
|
||||
Context("On scheduling a Guaranteed Pod", func() {
|
||||
It("Pod containers should have been created under the cgroup-root", func() {
|
||||
if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
var (
|
||||
@ -202,7 +202,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
})
|
||||
Context("On scheduling a BestEffort Pod", func() {
|
||||
It("Pod containers should have been created under the BestEffort cgroup", func() {
|
||||
if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
var (
|
||||
@ -246,7 +246,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
|
||||
})
|
||||
Context("On scheduling a Burstable Pod", func() {
|
||||
It("Pod containers should have been created under the Burstable cgroup", func() {
|
||||
if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS {
|
||||
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {
|
||||
return
|
||||
}
|
||||
var (
|
||||
|
@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
|
||||
KUBELET_ARGS='--cgroups-per-qos=false --cgroup-root=/'
|
||||
PARALLELISM=1
|
||||
|
@ -3,4 +3,4 @@ GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
|
||||
GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
|
||||
KUBELET_ARGS='--cgroups-per-qos=false --cgroup-root=/'
|
||||
|
@ -4,5 +4,5 @@ GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
|
||||
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
TIMEOUT=1h
|
||||
|
@ -5,4 +5,4 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Flaky\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
@ -4,5 +4,5 @@ GCE_ZONE=us-central1-f
|
||||
GCE_PROJECT=k8s-jkns-pr-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
|
||||
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
|
||||
|
@ -5,6 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
|
||||
CLEANUP=true
|
||||
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
|
||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
PARALLELISM=1
|
||||
TIMEOUT=3h
|
||||
|
@ -20,6 +20,6 @@ CLEANUP=true
|
||||
# KUBELET_ARGS are the arguments passed to kubelet. The args will override corresponding default kubelet
|
||||
# setting in the test framework and --kubelet-flags in TEST_ARGS.
|
||||
# If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run
|
||||
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
|
||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||
# TEST_ARGS are args passed to node e2e test.
|
||||
TEST_ARGS=''
|
||||
|
Loading…
Reference in New Issue
Block a user