Fix node allocatable resource validation

GetNodeAllocatableReservation gets all the reserved resource, and we need to compare it with capacity
This commit is contained in:
NickrenREN 2017-06-26 19:41:59 +08:00
parent 276bfb8cf1
commit eadb7ca8c0
4 changed files with 94 additions and 12 deletions

View File

@ -43,6 +43,7 @@ go_library(
"//vendor/k8s.io/client-go/tools/record:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux_amd64": [
"//pkg/api:go_default_library",
"//pkg/api/v1/helper/qos:go_default_library",
"//pkg/api/v1/resource:go_default_library",
"//pkg/kubelet/cm/util:go_default_library",

View File

@ -490,14 +490,17 @@ func (cm *containerManagerImpl) Start(node *v1.Node, activePods ActivePodsFunc)
// cache the node Info including resource capacity and
// allocatable of the node
cm.nodeInfo = node
// Setup the node
if err := cm.setupNode(activePods); err != nil {
return err
}
// Ensure that node allocatable configuration is valid.
if err := cm.validateNodeAllocatable(); err != nil {
return err
}
// Setup the node
if err := cm.setupNode(activePods); err != nil {
return err
}
// Don't run a background thread if there are no ensureStateFuncs.
hasEnsureStateFuncs := false
for _, cont := range cm.systemContainers {

View File

@ -28,6 +28,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/kubelet/events"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
)
@ -228,14 +229,26 @@ func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.Res
// validateNodeAllocatable ensures that the user specified Node Allocatable Configuration doesn't reserve more than the node capacity.
// Returns error if the configuration is invalid, nil otherwise.
func (cm *containerManagerImpl) validateNodeAllocatable() error {
na := cm.GetNodeAllocatableReservation()
zeroValue := resource.MustParse("0")
var errors []string
for key, val := range na {
if val.Cmp(zeroValue) <= 0 {
errors = append(errors, fmt.Sprintf("Resource %q has an allocatable of %v", key, val))
nar := cm.GetNodeAllocatableReservation()
for k, v := range nar {
capacityClone, err := api.Scheme.DeepCopy(cm.capacity[k])
if err != nil {
errors = append(errors, fmt.Sprintf("DeepCopy capacity error"))
}
value, ok := capacityClone.(resource.Quantity)
if !ok {
return fmt.Errorf(
"failed to cast object %#v to Quantity",
capacityClone)
}
value.Sub(v)
if value.Sign() < 0 {
errors = append(errors, fmt.Sprintf("Resource %q has an allocatable of %v, capacity of %v", k, v, value))
}
}
if len(errors) > 0 {
return fmt.Errorf("Invalid Node Allocatable configuration. %s", strings.Join(errors, " "))
}

View File

@ -221,7 +221,7 @@ func TestNodeAllocatableForEnforcement(t *testing.T) {
func TestNodeAllocatableInputValidation(t *testing.T) {
memoryEvictionThreshold := resource.MustParse("100Mi")
highMemoryEvictionThreshold := resource.MustParse("2Gi")
testCases := []struct {
cpuMemTestCases := []struct {
kubeReserved v1.ResourceList
systemReserved v1.ResourceList
capacity v1.ResourceList
@ -279,7 +279,7 @@ func TestNodeAllocatableInputValidation(t *testing.T) {
invalidConfiguration: true,
},
}
for _, tc := range testCases {
for _, tc := range cpuMemTestCases {
nc := NodeConfig{
NodeAllocatableConfig: NodeAllocatableConfig{
KubeReserved: tc.kubeReserved,
@ -297,9 +297,74 @@ func TestNodeAllocatableInputValidation(t *testing.T) {
NodeConfig: nc,
capacity: tc.capacity,
}
if err := cm.validateNodeAllocatable(); err != nil && !tc.invalidConfiguration {
err := cm.validateNodeAllocatable()
if err == nil && tc.invalidConfiguration {
t.Logf("Expected invalid node allocatable configuration")
t.FailNow()
} else if err != nil && !tc.invalidConfiguration {
t.Logf("Expected valid node allocatable configuration: %v", err)
t.FailNow()
}
}
storageEvictionThreshold := resource.MustParse("100Mi")
storageTestCases := []struct {
kubeReserved v1.ResourceList
systemReserved v1.ResourceList
capacity v1.ResourceList
hardThreshold evictionapi.ThresholdValue
invalidConfiguration bool
}{
{
kubeReserved: getScratchResourceList("100Mi"),
systemReserved: getScratchResourceList("50Mi"),
capacity: getScratchResourceList("500Mi"),
},
{
kubeReserved: getScratchResourceList("10Gi"),
systemReserved: getScratchResourceList("10Gi"),
hardThreshold: evictionapi.ThresholdValue{
Quantity: &storageEvictionThreshold,
},
capacity: getScratchResourceList("20Gi"),
invalidConfiguration: true,
},
}
for _, tc := range storageTestCases {
nc := NodeConfig{
NodeAllocatableConfig: NodeAllocatableConfig{
KubeReserved: tc.kubeReserved,
SystemReserved: tc.systemReserved,
HardEvictionThresholds: []evictionapi.Threshold{
{
Signal: evictionapi.SignalNodeFsAvailable,
Operator: evictionapi.OpLessThan,
Value: tc.hardThreshold,
},
},
},
}
cm := &containerManagerImpl{
NodeConfig: nc,
capacity: tc.capacity,
}
err := cm.validateNodeAllocatable()
if err == nil && tc.invalidConfiguration {
t.Logf("Expected invalid node allocatable configuration")
t.FailNow()
} else if err != nil && !tc.invalidConfiguration {
t.Logf("Expected valid node allocatable configuration: %v", err)
t.FailNow()
}
}
}
// getScratchResourceList returns a ResourceList with the
// specified scratch storage resource values
func getScratchResourceList(storage string) v1.ResourceList {
res := v1.ResourceList{}
if storage != "" {
res[v1.ResourceStorageScratch] = resource.MustParse(storage)
}
return res
}