mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #52708 from NickrenREN/kubereserved-localephemeral
Automatic merge from submit-queue (batch tested with PRs 44596, 52708, 53163, 53167, 52692). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Fix --kube-reserved storage key name and add UTs for node allocatable reservation **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: part of #52463 **Special notes for your reviewer**: **Release note**: ```release-note NONE ``` /assign @jingxu97
This commit is contained in:
commit
8ba5ff9a0b
@ -379,7 +379,7 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat
|
||||
|
||||
// Node Allocatable Flags
|
||||
fs.Var(&c.SystemReserved, "system-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]")
|
||||
fs.Var(&c.KubeReserved, "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi, storage=1Gi) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local storage for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]")
|
||||
fs.Var(&c.KubeReserved, "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi, ephemeral-storage=1Gi) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local ephemeral storage for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]")
|
||||
fs.StringSliceVar(&c.EnforceNodeAllocatable, "enforce-node-allocatable", c.EnforceNodeAllocatable, "A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. Acceptible options are 'pods', 'system-reserved' & 'kube-reserved'. If the latter two options are specified, '--system-reserved-cgroup' & '--kube-reserved-cgroup' must also be set respectively. See https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md for more details.")
|
||||
fs.StringVar(&c.SystemReservedCgroup, "system-reserved-cgroup", c.SystemReservedCgroup, "Absolute name of the top level cgroup that is used to manage non-kubernetes components for which compute resources were reserved via '--system-reserved' flag. Ex. '/system-reserved'. [default='']")
|
||||
fs.StringVar(&c.KubeReservedCgroup, "kube-reserved-cgroup", c.KubeReservedCgroup, "Absolute name of the top level cgroup that is used to manage kubernetes components for which compute resources were reserved via '--kube-reserved' flag. Ex. '/kube-reserved'. [default='']")
|
||||
|
@ -30,19 +30,19 @@ func TestValueOfAllocatableResources(t *testing.T) {
|
||||
name string
|
||||
}{
|
||||
{
|
||||
kubeReserved: "cpu=200m,memory=-150G",
|
||||
kubeReserved: "cpu=200m,memory=-150G,ephemeral-storage=10Gi",
|
||||
systemReserved: "cpu=200m,memory=15Ki",
|
||||
errorExpected: true,
|
||||
name: "negative quantity value",
|
||||
},
|
||||
{
|
||||
kubeReserved: "cpu=200m,memory=150Gi",
|
||||
kubeReserved: "cpu=200m,memory=150Gi,ephemeral-storage=10Gi",
|
||||
systemReserved: "cpu=200m,memory=15Ky",
|
||||
errorExpected: true,
|
||||
name: "invalid quantity unit",
|
||||
},
|
||||
{
|
||||
kubeReserved: "cpu=200m,memory=15G",
|
||||
kubeReserved: "cpu=200m,memory=15G,ephemeral-storage=10Gi",
|
||||
systemReserved: "cpu=200m,memory=15Ki",
|
||||
errorExpected: false,
|
||||
name: "Valid resource quantity",
|
||||
|
@ -353,7 +353,7 @@ type KubeletConfiguration struct {
|
||||
SystemReserved ConfigurationMap
|
||||
// A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs
|
||||
// that describe resources reserved for kubernetes system components.
|
||||
// Currently cpu, memory and local storage for root file system are supported. [default=none]
|
||||
// Currently cpu, memory and local ephemeral storage for root file system are supported. [default=none]
|
||||
// See http://kubernetes.io/docs/user-guide/compute-resources for more detail.
|
||||
KubeReserved ConfigurationMap
|
||||
// This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons.
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
|
||||
func TestNodeAllocatableReservationForScheduling(t *testing.T) {
|
||||
memoryEvictionThreshold := resource.MustParse("100Mi")
|
||||
testCases := []struct {
|
||||
cpuMemCases := []struct {
|
||||
kubeReserved v1.ResourceList
|
||||
systemReserved v1.ResourceList
|
||||
expected v1.ResourceList
|
||||
@ -87,7 +87,7 @@ func TestNodeAllocatableReservationForScheduling(t *testing.T) {
|
||||
expected: getResourceList("", "150Mi"),
|
||||
},
|
||||
}
|
||||
for idx, tc := range testCases {
|
||||
for idx, tc := range cpuMemCases {
|
||||
nc := NodeConfig{
|
||||
NodeAllocatableConfig: NodeAllocatableConfig{
|
||||
KubeReserved: tc.kubeReserved,
|
||||
@ -111,24 +111,64 @@ func TestNodeAllocatableReservationForScheduling(t *testing.T) {
|
||||
assert.Equal(t, expected.MilliValue(), v.MilliValue(), "test case %d failed for resource %q", idx+1, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAllocatableWithNilHardThreshold(t *testing.T) {
|
||||
nc := NodeConfig{
|
||||
NodeAllocatableConfig: NodeAllocatableConfig{
|
||||
KubeReserved: getResourceList("100m", "100Mi"),
|
||||
SystemReserved: getResourceList("50m", "50Mi"),
|
||||
ephemeralStorageEvictionThreshold := resource.MustParse("100Mi")
|
||||
ephemeralStorageTestCases := []struct {
|
||||
kubeReserved v1.ResourceList
|
||||
expected v1.ResourceList
|
||||
capacity v1.ResourceList
|
||||
hardThreshold evictionapi.ThresholdValue
|
||||
}{
|
||||
{
|
||||
kubeReserved: getEphemeralStorageResourceList("100Mi"),
|
||||
capacity: getEphemeralStorageResourceList("10Gi"),
|
||||
expected: getEphemeralStorageResourceList("100Mi"),
|
||||
},
|
||||
{
|
||||
kubeReserved: getEphemeralStorageResourceList("100Mi"),
|
||||
hardThreshold: evictionapi.ThresholdValue{
|
||||
Quantity: &ephemeralStorageEvictionThreshold,
|
||||
},
|
||||
capacity: getEphemeralStorageResourceList("10Gi"),
|
||||
expected: getEphemeralStorageResourceList("200Mi"),
|
||||
},
|
||||
{
|
||||
kubeReserved: getEphemeralStorageResourceList("150Mi"),
|
||||
capacity: getEphemeralStorageResourceList("10Gi"),
|
||||
hardThreshold: evictionapi.ThresholdValue{
|
||||
Percentage: 0.05,
|
||||
},
|
||||
expected: getEphemeralStorageResourceList("694157320"),
|
||||
},
|
||||
|
||||
{
|
||||
kubeReserved: v1.ResourceList{},
|
||||
capacity: getEphemeralStorageResourceList("10Gi"),
|
||||
expected: getEphemeralStorageResourceList(""),
|
||||
},
|
||||
}
|
||||
cm := &containerManagerImpl{
|
||||
NodeConfig: nc,
|
||||
capacity: getResourceList("10", "10Gi"),
|
||||
}
|
||||
expected := getResourceList("150m", "150Mi")
|
||||
for k, v := range cm.GetNodeAllocatableReservation() {
|
||||
expected, exists := expected[k]
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, expected.MilliValue(), v.MilliValue(), "failed for resource %q", k)
|
||||
for idx, tc := range ephemeralStorageTestCases {
|
||||
nc := NodeConfig{
|
||||
NodeAllocatableConfig: NodeAllocatableConfig{
|
||||
KubeReserved: tc.kubeReserved,
|
||||
HardEvictionThresholds: []evictionapi.Threshold{
|
||||
{
|
||||
Signal: evictionapi.SignalNodeFsAvailable,
|
||||
Operator: evictionapi.OpLessThan,
|
||||
Value: tc.hardThreshold,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cm := &containerManagerImpl{
|
||||
NodeConfig: nc,
|
||||
capacity: tc.capacity,
|
||||
}
|
||||
for k, v := range cm.GetNodeAllocatableReservation() {
|
||||
expected, exists := tc.expected[k]
|
||||
assert.True(t, exists, "test case %d expected resource %q", idx+1, k)
|
||||
assert.Equal(t, expected.MilliValue(), v.MilliValue(), "test case %d failed for resource %q", idx+1, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -307,34 +347,30 @@ func TestNodeAllocatableInputValidation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
storageEvictionThreshold := resource.MustParse("100Mi")
|
||||
storageTestCases := []struct {
|
||||
ephemeralStorageEvictionThreshold := resource.MustParse("100Mi")
|
||||
ephemeralStorageTestCases := []struct {
|
||||
kubeReserved v1.ResourceList
|
||||
systemReserved v1.ResourceList
|
||||
capacity v1.ResourceList
|
||||
hardThreshold evictionapi.ThresholdValue
|
||||
invalidConfiguration bool
|
||||
}{
|
||||
{
|
||||
kubeReserved: getEphemeralStorageResourceList("100Mi"),
|
||||
systemReserved: getEphemeralStorageResourceList("50Mi"),
|
||||
capacity: getEphemeralStorageResourceList("500Mi"),
|
||||
kubeReserved: getEphemeralStorageResourceList("100Mi"),
|
||||
capacity: getEphemeralStorageResourceList("500Mi"),
|
||||
},
|
||||
{
|
||||
kubeReserved: getEphemeralStorageResourceList("10Gi"),
|
||||
systemReserved: getEphemeralStorageResourceList("10Gi"),
|
||||
kubeReserved: getEphemeralStorageResourceList("20Gi"),
|
||||
hardThreshold: evictionapi.ThresholdValue{
|
||||
Quantity: &storageEvictionThreshold,
|
||||
Quantity: &ephemeralStorageEvictionThreshold,
|
||||
},
|
||||
capacity: getEphemeralStorageResourceList("20Gi"),
|
||||
invalidConfiguration: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range storageTestCases {
|
||||
for _, tc := range ephemeralStorageTestCases {
|
||||
nc := NodeConfig{
|
||||
NodeAllocatableConfig: NodeAllocatableConfig{
|
||||
KubeReserved: tc.kubeReserved,
|
||||
SystemReserved: tc.systemReserved,
|
||||
KubeReserved: tc.kubeReserved,
|
||||
HardEvictionThresholds: []evictionapi.Threshold{
|
||||
{
|
||||
Signal: evictionapi.SignalNodeFsAvailable,
|
||||
|
Loading…
Reference in New Issue
Block a user