From 3bad47e8ede2219970e9e9e023dcc4cd88e03674 Mon Sep 17 00:00:00 2001 From: Harshal Patil Date: Tue, 9 Jul 2024 14:49:21 -0400 Subject: [PATCH] Set only compressible resources on system slice Signed-off-by: Harshal Patil --- cmd/kubelet/app/options/options.go | 2 +- cmd/kubelet/app/server.go | 1 + .../apis/config/validation/validation.go | 33 +++++++-- .../apis/config/validation/validation_test.go | 36 ++++++++- .../cm/node_container_manager_linux.go | 73 ++++++++++++++----- .../cm/node_container_manager_linux_test.go | 64 +++++++++++++++- pkg/kubelet/types/constants.go | 10 ++- 7 files changed, 184 insertions(+), 35 deletions(-) diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 7102e6bade8..423c607c059 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -498,7 +498,7 @@ func AddKubeletConfigFlags(mainfs *pflag.FlagSet, c *kubeletconfig.KubeletConfig // Node Allocatable Flags fs.Var(cliflag.NewMapStringString(&c.SystemReserved), "system-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid=1000) pairs that describe resources reserved for non-kubernetes components. Currently only cpu, memory, pid and local ephemeral storage for root file system are supported. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more detail. [default=none]") fs.Var(cliflag.NewMapStringString(&c.KubeReserved), "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid=1000) pairs that describe resources reserved for kubernetes system components. Currently only cpu, memory, pid and local ephemeral storage for root file system are supported. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more detail. [default=none]") - fs.StringSliceVar(&c.EnforceNodeAllocatable, "enforce-node-allocatable", c.EnforceNodeAllocatable, "A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. Acceptable options are 'none', 'pods', 'system-reserved', and 'kube-reserved'. If the latter two options are specified, '--system-reserved-cgroup' and '--kube-reserved-cgroup' must also be set, respectively. If 'none' is specified, no additional options should be set. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details.") + fs.StringSliceVar(&c.EnforceNodeAllocatable, "enforce-node-allocatable", c.EnforceNodeAllocatable, "A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. Acceptable options are 'none', 'pods', 'system-reserved', 'system-reserved-compressible', 'kube-reserved' and 'kube-reserved-compressible'. If any of the latter four options are specified, '--system-reserved-cgroup' and '--kube-reserved-cgroup' must also be set, respectively. If 'none' is specified, no additional options should be set. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details.") fs.StringVar(&c.SystemReservedCgroup, "system-reserved-cgroup", c.SystemReservedCgroup, "Absolute name of the top level cgroup that is used to manage non-kubernetes components for which compute resources were reserved via '--system-reserved' flag. Ex. '/system-reserved'. [default='']") fs.StringVar(&c.KubeReservedCgroup, "kube-reserved-cgroup", c.KubeReservedCgroup, "Absolute name of the top level cgroup that is used to manage kubernetes components for which compute resources were reserved via '--kube-reserved' flag. Ex. '/kube-reserved'. [default='']") logsapi.AddFlags(&c.Logging, fs) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 5e2300ddbb8..3a9b5bd75ea 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -807,6 +807,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend if err != nil { return fmt.Errorf("--system-reserved value failed to parse: %w", err) } + var hardEvictionThresholds []evictionapi.Threshold // If the user requested to ignore eviction thresholds, then do not set valid values for hardEvictionThresholds here. if !s.ExperimentalNodeAllocatableIgnoreEvictionThreshold { diff --git a/pkg/kubelet/apis/config/validation/validation.go b/pkg/kubelet/apis/config/validation/validation.go index 2fd6b14d374..afc4735daac 100644 --- a/pkg/kubelet/apis/config/validation/validation.go +++ b/pkg/kubelet/apis/config/validation/validation.go @@ -203,6 +203,26 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration, featur allErrors = append(allErrors, fmt.Errorf("invalid configuration: memorySwap.swapBehavior cannot be set when NodeSwap feature flag is disabled")) } + // Check for mutually exclusive keys before the main validation loop + reservedKeys := map[string]bool{ + kubetypes.SystemReservedEnforcementKey: false, + kubetypes.SystemReservedCompressibleEnforcementKey: false, + kubetypes.KubeReservedEnforcementKey: false, + kubetypes.KubeReservedCompressibleEnforcementKey: false, + } + + for _, val := range kc.EnforceNodeAllocatable { + reservedKeys[val] = true + } + + if reservedKeys[kubetypes.SystemReservedCompressibleEnforcementKey] && reservedKeys[kubetypes.SystemReservedEnforcementKey] { + allErrors = append(allErrors, fmt.Errorf("invalid configuration: both %q and %q cannot be specified together in enforceNodeAllocatable (--enforce-node-allocatable)", kubetypes.SystemReservedEnforcementKey, kubetypes.SystemReservedCompressibleEnforcementKey)) + } + + if reservedKeys[kubetypes.KubeReservedCompressibleEnforcementKey] && reservedKeys[kubetypes.KubeReservedEnforcementKey] { + allErrors = append(allErrors, fmt.Errorf("invalid configuration: both %q and %q cannot be specified together in enforceNodeAllocatable (--enforce-node-allocatable)", kubetypes.KubeReservedEnforcementKey, kubetypes.KubeReservedCompressibleEnforcementKey)) + } + uniqueEnforcements := sets.Set[string]{} for _, val := range kc.EnforceNodeAllocatable { if uniqueEnforcements.Has(val) { @@ -213,13 +233,13 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration, featur switch val { case kubetypes.NodeAllocatableEnforcementKey: - case kubetypes.SystemReservedEnforcementKey: + case kubetypes.SystemReservedEnforcementKey, kubetypes.SystemReservedCompressibleEnforcementKey: if kc.SystemReservedCgroup == "" { - allErrors = append(allErrors, fmt.Errorf("invalid configuration: systemReservedCgroup (--system-reserved-cgroup) must be specified when %q contained in enforceNodeAllocatable (--enforce-node-allocatable)", kubetypes.SystemReservedEnforcementKey)) + allErrors = append(allErrors, fmt.Errorf("invalid configuration: systemReservedCgroup (--system-reserved-cgroup) must be specified when %q or %q included in enforceNodeAllocatable (--enforce-node-allocatable)", kubetypes.SystemReservedEnforcementKey, kubetypes.SystemReservedCompressibleEnforcementKey)) } - case kubetypes.KubeReservedEnforcementKey: + case kubetypes.KubeReservedEnforcementKey, kubetypes.KubeReservedCompressibleEnforcementKey: if kc.KubeReservedCgroup == "" { - allErrors = append(allErrors, fmt.Errorf("invalid configuration: kubeReservedCgroup (--kube-reserved-cgroup) must be specified when %q contained in enforceNodeAllocatable (--enforce-node-allocatable)", kubetypes.KubeReservedEnforcementKey)) + allErrors = append(allErrors, fmt.Errorf("invalid configuration: kubeReservedCgroup (--kube-reserved-cgroup) must be specified when %q or %q included in enforceNodeAllocatable (--enforce-node-allocatable)", kubetypes.KubeReservedEnforcementKey, kubetypes.KubeReservedCompressibleEnforcementKey)) } case kubetypes.NodeAllocatableNoneKey: if len(kc.EnforceNodeAllocatable) > 1 { @@ -228,8 +248,9 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration, featur // skip all further checks when this is explicitly "none" continue default: - allErrors = append(allErrors, fmt.Errorf("invalid configuration: option %q specified for enforceNodeAllocatable (--enforce-node-allocatable). Valid options are %q, %q, %q, or %q", - val, kubetypes.NodeAllocatableEnforcementKey, kubetypes.SystemReservedEnforcementKey, kubetypes.KubeReservedEnforcementKey, kubetypes.NodeAllocatableNoneKey)) + allErrors = append(allErrors, fmt.Errorf("invalid configuration: option %q specified for enforceNodeAllocatable (--enforce-node-allocatable). Valid options are %q, %q, %q, %q, %q or %q", + val, kubetypes.NodeAllocatableEnforcementKey, kubetypes.SystemReservedEnforcementKey, kubetypes.SystemReservedCompressibleEnforcementKey, + kubetypes.KubeReservedEnforcementKey, kubetypes.KubeReservedCompressibleEnforcementKey, kubetypes.NodeAllocatableNoneKey)) continue } diff --git a/pkg/kubelet/apis/config/validation/validation_test.go b/pkg/kubelet/apis/config/validation/validation_test.go index 22df9e3ab90..70518cb0ed3 100644 --- a/pkg/kubelet/apis/config/validation/validation_test.go +++ b/pkg/kubelet/apis/config/validation/validation_test.go @@ -383,7 +383,30 @@ func TestValidateKubeletConfiguration(t *testing.T) { conf.SystemReservedCgroup = "" return conf }, - errMsg: "invalid configuration: systemReservedCgroup (--system-reserved-cgroup) must be specified when \"system-reserved\" contained in enforceNodeAllocatable (--enforce-node-allocatable)", + errMsg: "invalid configuration: systemReservedCgroup (--system-reserved-cgroup) must be specified when \"system-reserved\" or \"system-reserved-compressible\" included in enforceNodeAllocatable (--enforce-node-allocatable)", + }, { + name: "specify SystemReservedCompressibleEnforcementKey without specifying SystemReservedCgroup", + configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { + conf.EnforceNodeAllocatable = []string{kubetypes.SystemReservedCompressibleEnforcementKey} + conf.SystemReservedCgroup = "" + return conf + }, + errMsg: "invalid configuration: systemReservedCgroup (--system-reserved-cgroup) must be specified when \"system-reserved\" or \"system-reserved-compressible\" included in enforceNodeAllocatable (--enforce-node-allocatable)", + }, { + name: "specify SystemReservedCompressibleEnforcementKey with SystemReservedEnforcementKey", + configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { + conf.EnforceNodeAllocatable = []string{kubetypes.SystemReservedCompressibleEnforcementKey, kubetypes.SystemReservedEnforcementKey} + return conf + }, + errMsg: "invalid configuration: both \"system-reserved\" and \"system-reserved-compressible\" cannot be specified together in enforceNodeAllocatable (--enforce-node-allocatable)", + }, { + name: "specify KubeReservedCompressibleEnforcementKey without specifying KubeReservedCgroup", + configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { + conf.EnforceNodeAllocatable = []string{kubetypes.KubeReservedCompressibleEnforcementKey} + conf.KubeReservedCgroup = "" + return conf + }, + errMsg: "invalid configuration: kubeReservedCgroup (--kube-reserved-cgroup) must be specified when \"kube-reserved\" or \"kube-reserved-compressible\" included in enforceNodeAllocatable (--enforce-node-allocatable)", }, { name: "specify KubeReservedEnforcementKey without specifying KubeReservedCgroup", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { @@ -391,7 +414,14 @@ func TestValidateKubeletConfiguration(t *testing.T) { conf.KubeReservedCgroup = "" return conf }, - errMsg: "invalid configuration: kubeReservedCgroup (--kube-reserved-cgroup) must be specified when \"kube-reserved\" contained in enforceNodeAllocatable (--enforce-node-allocatable)", + errMsg: "invalid configuration: kubeReservedCgroup (--kube-reserved-cgroup) must be specified when \"kube-reserved\" or \"kube-reserved-compressible\" included in enforceNodeAllocatable (--enforce-node-allocatable)", + }, { + name: "specify KubeReservedCompressibleEnforcementKey with KubeReservedEnforcementKey", + configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { + conf.EnforceNodeAllocatable = []string{kubetypes.KubeReservedCompressibleEnforcementKey, kubetypes.KubeReservedEnforcementKey} + return conf + }, + errMsg: "invalid configuration: both \"kube-reserved\" and \"kube-reserved-compressible\" cannot be specified together in enforceNodeAllocatable (--enforce-node-allocatable)", }, { name: "specify NodeAllocatableNoneKey with additional enforcements", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { @@ -412,7 +442,7 @@ func TestValidateKubeletConfiguration(t *testing.T) { conf.EnforceNodeAllocatable = []string{"invalid-enforce-node-allocatable"} return conf }, - errMsg: "invalid configuration: option \"invalid-enforce-node-allocatable\" specified for enforceNodeAllocatable (--enforce-node-allocatable). Valid options are \"pods\", \"system-reserved\", \"kube-reserved\", or \"none\"", + errMsg: "invalid configuration: option \"invalid-enforce-node-allocatable\" specified for enforceNodeAllocatable (--enforce-node-allocatable). Valid options are \"pods\", \"system-reserved\", \"system-reserved-compressible\", \"kube-reserved\", \"kube-reserved-compressible\" or \"none\"", }, { name: "invalid HairpinMode", configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { diff --git a/pkg/kubelet/cm/node_container_manager_linux.go b/pkg/kubelet/cm/node_container_manager_linux.go index a069b1942fc..b2ad28290b8 100644 --- a/pkg/kubelet/cm/node_container_manager_linux.go +++ b/pkg/kubelet/cm/node_container_manager_linux.go @@ -53,7 +53,7 @@ func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { cgroupConfig := &CgroupConfig{ Name: cm.cgroupRoot, // The default limits for cpu shares can be very low which can lead to CPU starvation for pods. - ResourceParameters: getCgroupConfig(nodeAllocatable), + ResourceParameters: getCgroupConfig(nodeAllocatable, false), } if cm.cgroupManager.Exists(cgroupConfig.Name) { return nil @@ -81,7 +81,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { cgroupConfig := &CgroupConfig{ Name: cm.cgroupRoot, - ResourceParameters: getCgroupConfig(nodeAllocatable), + ResourceParameters: getCgroupConfig(nodeAllocatable, false), } // Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail. @@ -110,7 +110,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { // Now apply kube reserved and system reserved limits if required. if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) { klog.V(2).InfoS("Enforcing system reserved on cgroup", "cgroupName", nc.SystemReservedCgroupName, "limits", nc.SystemReserved) - if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil { + if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved, false); err != nil { message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return errors.New(message) @@ -119,19 +119,40 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { } if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) { klog.V(2).InfoS("Enforcing kube reserved on cgroup", "cgroupName", nc.KubeReservedCgroupName, "limits", nc.KubeReserved) - if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil { + if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved, false); err != nil { message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return errors.New(message) } cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) } + + if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedCompressibleEnforcementKey) { + klog.V(2).InfoS("Enforcing system reserved compressible on cgroup", "cgroupName", nc.SystemReservedCgroupName, "limits", nc.SystemReserved) + if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved, true); err != nil { + message := fmt.Sprintf("Failed to enforce System Reserved Compressible Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) + cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) + return errors.New(message) + } + cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) + } + + if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedCompressibleEnforcementKey) { + klog.V(2).InfoS("Enforcing kube reserved compressible on cgroup", "cgroupName", nc.KubeReservedCgroupName, "limits", nc.KubeReserved) + if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved, true); err != nil { + message := fmt.Sprintf("Failed to enforce Kube Reserved Compressible Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) + cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) + return errors.New(message) + } + cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) + } return nil } // enforceExistingCgroup updates the limits `rl` on existing cgroup `cName` using `cgroupManager` interface. -func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.ResourceList) error { - rp := getCgroupConfig(rl) +func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.ResourceList, compressibleResources bool) error { + rp := getCgroupConfig(rl, compressibleResources) + if rp == nil { return fmt.Errorf("%q cgroup is not configured properly", cName) } @@ -162,27 +183,39 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1. } // getCgroupConfig returns a ResourceConfig object that can be used to create or update cgroups via CgroupManager interface. -func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { +func getCgroupConfig(rl v1.ResourceList, compressibleResourcesOnly bool) *ResourceConfig { // TODO(vishh): Set CPU Quota if necessary. if rl == nil { return nil } var rc ResourceConfig - if q, exists := rl[v1.ResourceMemory]; exists { - // Memory is defined in bytes. - val := q.Value() - rc.Memory = &val + + setCompressibleResources := func() { + if q, exists := rl[v1.ResourceCPU]; exists { + // CPU is defined in milli-cores. + val := MilliCPUToShares(q.MilliValue()) + rc.CPUShares = &val + } } - if q, exists := rl[v1.ResourceCPU]; exists { - // CPU is defined in milli-cores. - val := MilliCPUToShares(q.MilliValue()) - rc.CPUShares = &val + + // Only return compressible resources + if compressibleResourcesOnly { + setCompressibleResources() + } else { + if q, exists := rl[v1.ResourceMemory]; exists { + // Memory is defined in bytes. + val := q.Value() + rc.Memory = &val + } + + setCompressibleResources() + + if q, exists := rl[pidlimit.PIDs]; exists { + val := q.Value() + rc.PidsLimit = &val + } + rc.HugePageLimit = HugePageLimits(rl) } - if q, exists := rl[pidlimit.PIDs]; exists { - val := q.Value() - rc.PidsLimit = &val - } - rc.HugePageLimit = HugePageLimits(rl) return &rc } diff --git a/pkg/kubelet/cm/node_container_manager_linux_test.go b/pkg/kubelet/cm/node_container_manager_linux_test.go index d4fa6c12ad4..ba060045310 100644 --- a/pkg/kubelet/cm/node_container_manager_linux_test.go +++ b/pkg/kubelet/cm/node_container_manager_linux_test.go @@ -23,7 +23,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" ) @@ -401,3 +401,65 @@ func getEphemeralStorageResourceList(storage string) v1.ResourceList { } return res } + +func TestGetCgroupConfig(t *testing.T) { + cases := []struct { + name string + resourceList v1.ResourceList + compressibleResources bool + checks func(*ResourceConfig, *testing.T) + }{ + { + name: "Nil resource list", + resourceList: nil, + compressibleResources: false, + checks: func(actual *ResourceConfig, t *testing.T) { + assert.Nil(t, actual) + }, + }, + { + name: "Compressible resources only", + resourceList: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("200Mi"), + }, + compressibleResources: true, + checks: func(actual *ResourceConfig, t *testing.T) { + assert.NotNil(t, actual.CPUShares) + assert.Nil(t, actual.Memory) + assert.Nil(t, actual.PidsLimit) + assert.Nil(t, actual.HugePageLimit) + }, + }, + { + name: "Memory only", + resourceList: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("200Mi"), + }, + compressibleResources: false, + checks: func(actual *ResourceConfig, t *testing.T) { + assert.NotNil(t, actual.Memory) + assert.Nil(t, actual.CPUShares) + }, + }, + { + name: "Memory and CPU without compressible resources", + resourceList: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("200Mi"), + }, + compressibleResources: false, + checks: func(actual *ResourceConfig, t *testing.T) { + assert.NotNil(t, actual.Memory) + assert.NotNil(t, actual.CPUShares) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + actual := getCgroupConfig(tc.resourceList, tc.compressibleResources) + tc.checks(actual, t) + }) + } +} diff --git a/pkg/kubelet/types/constants.go b/pkg/kubelet/types/constants.go index 9f343542d47..56cba9c43b8 100644 --- a/pkg/kubelet/types/constants.go +++ b/pkg/kubelet/types/constants.go @@ -23,10 +23,12 @@ const ( // User visible keys for managing node allocatable enforcement on the node. const ( - NodeAllocatableEnforcementKey = "pods" - SystemReservedEnforcementKey = "system-reserved" - KubeReservedEnforcementKey = "kube-reserved" - NodeAllocatableNoneKey = "none" + NodeAllocatableEnforcementKey = "pods" + SystemReservedEnforcementKey = "system-reserved" + SystemReservedCompressibleEnforcementKey = "system-reserved-compressible" + KubeReservedEnforcementKey = "kube-reserved" + KubeReservedCompressibleEnforcementKey = "kube-reserved-compressible" + NodeAllocatableNoneKey = "none" ) // SwapBehavior types