mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 10:43:56 +00:00
Merge pull request #48057 from NickrenREN/fix-validateNodeAllocatable
Automatic merge from submit-queue (batch tested with PRs 50758, 48057) Fix node allocatable resource validation GetNodeAllocatableReservation gets all the reserved resource value Allocatable resource = capacity - reservation **Release note**: ```release-note NONE ```
This commit is contained in:
commit
d2cf96d6ef
@ -43,6 +43,7 @@ go_library(
|
|||||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||||
] + select({
|
] + select({
|
||||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||||
|
"//pkg/api:go_default_library",
|
||||||
"//pkg/api/v1/helper/qos:go_default_library",
|
"//pkg/api/v1/helper/qos:go_default_library",
|
||||||
"//pkg/api/v1/resource:go_default_library",
|
"//pkg/api/v1/resource:go_default_library",
|
||||||
"//pkg/kubelet/cm/util:go_default_library",
|
"//pkg/kubelet/cm/util:go_default_library",
|
||||||
|
@ -490,14 +490,17 @@ func (cm *containerManagerImpl) Start(node *v1.Node, activePods ActivePodsFunc)
|
|||||||
// cache the node Info including resource capacity and
|
// cache the node Info including resource capacity and
|
||||||
// allocatable of the node
|
// allocatable of the node
|
||||||
cm.nodeInfo = node
|
cm.nodeInfo = node
|
||||||
// Setup the node
|
|
||||||
if err := cm.setupNode(activePods); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Ensure that node allocatable configuration is valid.
|
// Ensure that node allocatable configuration is valid.
|
||||||
if err := cm.validateNodeAllocatable(); err != nil {
|
if err := cm.validateNodeAllocatable(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Setup the node
|
||||||
|
if err := cm.setupNode(activePods); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Don't run a background thread if there are no ensureStateFuncs.
|
// Don't run a background thread if there are no ensureStateFuncs.
|
||||||
hasEnsureStateFuncs := false
|
hasEnsureStateFuncs := false
|
||||||
for _, cont := range cm.systemContainers {
|
for _, cont := range cm.systemContainers {
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||||
)
|
)
|
||||||
@ -228,14 +229,26 @@ func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.Res
|
|||||||
// validateNodeAllocatable ensures that the user specified Node Allocatable Configuration doesn't reserve more than the node capacity.
|
// validateNodeAllocatable ensures that the user specified Node Allocatable Configuration doesn't reserve more than the node capacity.
|
||||||
// Returns error if the configuration is invalid, nil otherwise.
|
// Returns error if the configuration is invalid, nil otherwise.
|
||||||
func (cm *containerManagerImpl) validateNodeAllocatable() error {
|
func (cm *containerManagerImpl) validateNodeAllocatable() error {
|
||||||
na := cm.GetNodeAllocatableReservation()
|
|
||||||
zeroValue := resource.MustParse("0")
|
|
||||||
var errors []string
|
var errors []string
|
||||||
for key, val := range na {
|
nar := cm.GetNodeAllocatableReservation()
|
||||||
if val.Cmp(zeroValue) <= 0 {
|
for k, v := range nar {
|
||||||
errors = append(errors, fmt.Sprintf("Resource %q has an allocatable of %v", key, val))
|
capacityClone, err := api.Scheme.DeepCopy(cm.capacity[k])
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("DeepCopy capacity error"))
|
||||||
|
}
|
||||||
|
value, ok := capacityClone.(resource.Quantity)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"failed to cast object %#v to Quantity",
|
||||||
|
capacityClone)
|
||||||
|
}
|
||||||
|
value.Sub(v)
|
||||||
|
|
||||||
|
if value.Sign() < 0 {
|
||||||
|
errors = append(errors, fmt.Sprintf("Resource %q has an allocatable of %v, capacity of %v", k, v, value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(errors) > 0 {
|
if len(errors) > 0 {
|
||||||
return fmt.Errorf("Invalid Node Allocatable configuration. %s", strings.Join(errors, " "))
|
return fmt.Errorf("Invalid Node Allocatable configuration. %s", strings.Join(errors, " "))
|
||||||
}
|
}
|
||||||
|
@ -221,7 +221,7 @@ func TestNodeAllocatableForEnforcement(t *testing.T) {
|
|||||||
func TestNodeAllocatableInputValidation(t *testing.T) {
|
func TestNodeAllocatableInputValidation(t *testing.T) {
|
||||||
memoryEvictionThreshold := resource.MustParse("100Mi")
|
memoryEvictionThreshold := resource.MustParse("100Mi")
|
||||||
highMemoryEvictionThreshold := resource.MustParse("2Gi")
|
highMemoryEvictionThreshold := resource.MustParse("2Gi")
|
||||||
testCases := []struct {
|
cpuMemTestCases := []struct {
|
||||||
kubeReserved v1.ResourceList
|
kubeReserved v1.ResourceList
|
||||||
systemReserved v1.ResourceList
|
systemReserved v1.ResourceList
|
||||||
capacity v1.ResourceList
|
capacity v1.ResourceList
|
||||||
@ -279,7 +279,7 @@ func TestNodeAllocatableInputValidation(t *testing.T) {
|
|||||||
invalidConfiguration: true,
|
invalidConfiguration: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range cpuMemTestCases {
|
||||||
nc := NodeConfig{
|
nc := NodeConfig{
|
||||||
NodeAllocatableConfig: NodeAllocatableConfig{
|
NodeAllocatableConfig: NodeAllocatableConfig{
|
||||||
KubeReserved: tc.kubeReserved,
|
KubeReserved: tc.kubeReserved,
|
||||||
@ -297,9 +297,74 @@ func TestNodeAllocatableInputValidation(t *testing.T) {
|
|||||||
NodeConfig: nc,
|
NodeConfig: nc,
|
||||||
capacity: tc.capacity,
|
capacity: tc.capacity,
|
||||||
}
|
}
|
||||||
if err := cm.validateNodeAllocatable(); err != nil && !tc.invalidConfiguration {
|
err := cm.validateNodeAllocatable()
|
||||||
|
if err == nil && tc.invalidConfiguration {
|
||||||
|
t.Logf("Expected invalid node allocatable configuration")
|
||||||
|
t.FailNow()
|
||||||
|
} else if err != nil && !tc.invalidConfiguration {
|
||||||
|
t.Logf("Expected valid node allocatable configuration: %v", err)
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storageEvictionThreshold := resource.MustParse("100Mi")
|
||||||
|
storageTestCases := []struct {
|
||||||
|
kubeReserved v1.ResourceList
|
||||||
|
systemReserved v1.ResourceList
|
||||||
|
capacity v1.ResourceList
|
||||||
|
hardThreshold evictionapi.ThresholdValue
|
||||||
|
invalidConfiguration bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
kubeReserved: getScratchResourceList("100Mi"),
|
||||||
|
systemReserved: getScratchResourceList("50Mi"),
|
||||||
|
capacity: getScratchResourceList("500Mi"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
kubeReserved: getScratchResourceList("10Gi"),
|
||||||
|
systemReserved: getScratchResourceList("10Gi"),
|
||||||
|
hardThreshold: evictionapi.ThresholdValue{
|
||||||
|
Quantity: &storageEvictionThreshold,
|
||||||
|
},
|
||||||
|
capacity: getScratchResourceList("20Gi"),
|
||||||
|
invalidConfiguration: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range storageTestCases {
|
||||||
|
nc := NodeConfig{
|
||||||
|
NodeAllocatableConfig: NodeAllocatableConfig{
|
||||||
|
KubeReserved: tc.kubeReserved,
|
||||||
|
SystemReserved: tc.systemReserved,
|
||||||
|
HardEvictionThresholds: []evictionapi.Threshold{
|
||||||
|
{
|
||||||
|
Signal: evictionapi.SignalNodeFsAvailable,
|
||||||
|
Operator: evictionapi.OpLessThan,
|
||||||
|
Value: tc.hardThreshold,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cm := &containerManagerImpl{
|
||||||
|
NodeConfig: nc,
|
||||||
|
capacity: tc.capacity,
|
||||||
|
}
|
||||||
|
err := cm.validateNodeAllocatable()
|
||||||
|
if err == nil && tc.invalidConfiguration {
|
||||||
|
t.Logf("Expected invalid node allocatable configuration")
|
||||||
|
t.FailNow()
|
||||||
|
} else if err != nil && !tc.invalidConfiguration {
|
||||||
t.Logf("Expected valid node allocatable configuration: %v", err)
|
t.Logf("Expected valid node allocatable configuration: %v", err)
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getScratchResourceList returns a ResourceList with the
|
||||||
|
// specified scratch storage resource values
|
||||||
|
func getScratchResourceList(storage string) v1.ResourceList {
|
||||||
|
res := v1.ResourceList{}
|
||||||
|
if storage != "" {
|
||||||
|
res[v1.ResourceStorageScratch] = resource.MustParse(storage)
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user