Merge pull request #84074 from zouyee/proirity

LeastRequestedPriority/MostRequestedPriority/BalancedResourceAllocation as Score plugins
This commit is contained in:
Kubernetes Prow Robot 2019-10-19 17:21:37 -07:00 committed by GitHub
commit e1685b5b59
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 1221 additions and 72 deletions

View File

@ -96,7 +96,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"TestLabelsPresence",
),
wantPrioritizers: sets.NewString(
"LeastRequestedPriority",
"ServiceSpreadingPriority",
"TestServiceAntiAffinity",
"TestLabelPreference",
@ -109,6 +108,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesLeastAllocated", Weight: 1},
},
},
},
@ -141,8 +143,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"TestServiceAntiAffinity",
"TestLabelPreference",
@ -157,6 +157,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
},
},
},
@ -198,8 +202,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"TestServiceAntiAffinity",
"TestLabelPreference",
@ -216,7 +218,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "VolumeZone"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
},
},
@ -264,8 +268,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
),
@ -282,7 +284,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
@ -335,11 +339,8 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
"MostRequestedPriority",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
@ -354,7 +355,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "TaintToleration", Weight: 2},
@ -417,11 +421,8 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
"MostRequestedPriority",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
@ -436,7 +437,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "TaintToleration", Weight: 2},
@ -512,11 +516,8 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
"MostRequestedPriority",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
@ -531,7 +532,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "TaintToleration", Weight: 2},
@ -608,11 +612,8 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
"MostRequestedPriority",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
@ -628,7 +629,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "TaintToleration", Weight: 2},
@ -710,11 +714,8 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
"MostRequestedPriority",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
@ -730,7 +731,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "TaintToleration", Weight: 2},
@ -824,11 +828,8 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
"MostRequestedPriority",
"RequestedToCapacityRatioPriority",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
@ -845,7 +846,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "TaintToleration", Weight: 2},
@ -940,11 +944,8 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
"MostRequestedPriority",
"RequestedToCapacityRatioPriority",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
@ -962,7 +963,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "TaintToleration", Weight: 2},
@ -1057,11 +1061,8 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
"MostRequestedPriority",
"RequestedToCapacityRatioPriority",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
@ -1079,7 +1080,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "TaintToleration", Weight: 2},
@ -1178,11 +1182,8 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
),
wantPrioritizers: sets.NewString(
"EqualPriority",
"LeastRequestedPriority",
"BalancedResourceAllocation",
"SelectorSpreadPriority",
"InterPodAffinityPriority",
"MostRequestedPriority",
"RequestedToCapacityRatioPriority",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
@ -1200,7 +1201,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "TaintToleration", Weight: 2},
@ -1241,10 +1245,13 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"InterPodAffinity": "MatchInterPodAffinity",
}
scoreToPriorityMap := map[string]string{
"ImageLocality": "ImageLocalityPriority",
"NodeAffinity": "NodeAffinityPriority",
"NodePreferAvoidPods": "NodePreferAvoidPodsPriority",
"TaintToleration": "TaintTolerationPriority",
"ImageLocality": "ImageLocalityPriority",
"NodeAffinity": "NodeAffinityPriority",
"NodePreferAvoidPods": "NodePreferAvoidPodsPriority",
"TaintToleration": "TaintTolerationPriority",
"NodeResourcesLeastAllocated": "LeastRequestedPriority",
"NodeResourcesBalancedAllocation": "BalancedResourceAllocation",
"NodeResourcesMostAllocated": "MostRequestedPriority",
}
for v, tc := range schedulerFiles {

View File

@ -53,15 +53,18 @@ type RegistryArgs struct {
// runs custom plugins, can pass a different Registry when initializing the scheduler.
func NewDefaultRegistry(args *RegistryArgs) framework.Registry {
return framework.Registry{
imagelocality.Name: imagelocality.New,
tainttoleration.Name: tainttoleration.New,
noderesources.Name: noderesources.New,
nodename.Name: nodename.New,
nodeports.Name: nodeports.New,
nodepreferavoidpods.Name: nodepreferavoidpods.New,
nodeaffinity.Name: nodeaffinity.New,
podtopologyspread.Name: podtopologyspread.New,
nodeunschedulable.Name: nodeunschedulable.New,
imagelocality.Name: imagelocality.New,
tainttoleration.Name: tainttoleration.New,
noderesources.Name: noderesources.New,
nodename.Name: nodename.New,
nodeports.Name: nodeports.New,
nodepreferavoidpods.Name: nodepreferavoidpods.New,
nodeaffinity.Name: nodeaffinity.New,
podtopologyspread.Name: podtopologyspread.New,
nodeunschedulable.Name: nodeunschedulable.New,
noderesources.BalancedAllocationName: noderesources.NewBalancedAllocation,
noderesources.MostAllocatedName: noderesources.NewMostAllocated,
noderesources.LeastAllocatedName: noderesources.NewLeastAllocated,
volumebinding.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return volumebinding.NewFromVolumeBinder(args.VolumeBinder), nil
},
@ -190,6 +193,24 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry {
return
})
registry.RegisterPriority(priorities.MostRequestedPriority,
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
plugins.Score = appendToPluginSet(plugins.Score, noderesources.MostAllocatedName, &args.Weight)
return
})
registry.RegisterPriority(priorities.BalancedResourceAllocation,
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
plugins.Score = appendToPluginSet(plugins.Score, noderesources.BalancedAllocationName, &args.Weight)
return
})
registry.RegisterPriority(priorities.LeastRequestedPriority,
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
plugins.Score = appendToPluginSet(plugins.Score, noderesources.LeastAllocatedName, &args.Weight)
return
})
return registry
}

View File

@ -2,15 +2,24 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["node_resources.go"],
srcs = [
"balanced_allocation.go",
"least_allocated.go",
"most_allocated.go",
"node_resources.go",
"test_util.go",
],
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithm/priorities:go_default_library",
"//pkg/scheduler/framework/plugins/migration:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)
@ -31,7 +40,12 @@ filegroup(
go_test(
name = "go_default_test",
srcs = ["node_resources_test.go"],
srcs = [
"balanced_allocation_test.go",
"least_allocated_test.go",
"most_allocated_test.go",
"node_resources_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
@ -42,6 +56,7 @@ go_test(
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",

View File

@ -0,0 +1,65 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// BalancedAllocation is a score plugin that calculates the difference between the cpu and memory fraction
// of capacity, and prioritizes the host based on how close the two metrics are to each other.
type BalancedAllocation struct {
handle framework.FrameworkHandle
}
var _ = framework.ScorePlugin(&BalancedAllocation{})
// BalancedAllocationName is the name of the plugin used in the plugin registry and configurations.
const BalancedAllocationName = "NodeResourcesBalancedAllocation"
// Name returns name of the plugin. It is used in logs, etc.
func (ba *BalancedAllocation) Name() string {
return BalancedAllocationName
}
// Score invoked at the score extension point.
func (ba *BalancedAllocation) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
nodeInfo, exist := ba.handle.NodeInfoSnapshot().NodeInfoMap[nodeName]
if !exist {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("node %q does not exist in NodeInfoSnapshot", nodeName))
}
// BalancedResourceAllocationMap does not use priority metadata, hence we pass nil here
s, err := priorities.BalancedResourceAllocationMap(pod, nil, nodeInfo)
return s.Score, migration.ErrorToFrameworkStatus(err)
}
// ScoreExtensions of the Score plugin.
func (ba *BalancedAllocation) ScoreExtensions() framework.ScoreExtensions {
return nil
}
// NewBalancedAllocation initializes a new plugin and returns it.
func NewBalancedAllocation(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
return &BalancedAllocation{handle: h}, nil
}

View File

@ -0,0 +1,407 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"context"
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// getExistingVolumeCountForNode gets the current number of volumes on node.
func getExistingVolumeCountForNode(pods []*v1.Pod, maxVolumes int) int {
volumeCount := 0
for _, pod := range pods {
volumeCount += len(pod.Spec.Volumes)
}
if maxVolumes-volumeCount > 0 {
return maxVolumes - volumeCount
}
return 0
}
func TestNodeResourcesBalancedAllocation(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced node resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
podwithVol1 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
},
},
},
NodeName: "machine4",
}
podwithVol2 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp1"},
},
},
},
NodeName: "machine4",
}
podwithVol3 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp1"},
},
},
},
NodeName: "machine4",
}
labels1 := map[string]string{
"foo": "bar",
"baz": "blah",
}
labels2 := map[string]string{
"bar": "foo",
"baz": "blah",
}
machine1Spec := v1.PodSpec{
NodeName: "machine1",
}
machine2Spec := v1.PodSpec{
NodeName: "machine2",
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
cpuOnly := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
}
cpuOnly2 := cpuOnly
cpuOnly2.NodeName = "machine2"
cpuAndMemory := v1.PodSpec{
NodeName: "machine2",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
}
cpuAndMemory3 := v1.PodSpec{
NodeName: "machine3",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
}
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
// Node1 scores (remaining resources) on 0-10 scale
// CPU Fraction: 0 / 4000 = 0%
// Memory Fraction: 0 / 10000 = 0%
// Node1 Score: 10 - (0-0)*100 = 100
// Node2 scores (remaining resources) on 0-10 scale
// CPU Fraction: 0 / 4000 = 0 %
// Memory Fraction: 0 / 10000 = 0%
// Node2 Score: 10 - (0-0)*100 = 100
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled, nothing requested",
},
{
// Node1 scores on 0-10 scale
// CPU Fraction: 3000 / 4000= 75%
// Memory Fraction: 5000 / 10000 = 50%
// Node1 Score: 10 - (0.75-0.5)*100 = 75
// Node2 scores on 0-10 scale
// CPU Fraction: 3000 / 6000= 50%
// Memory Fraction: 5000/10000 = 50%
// Node2 Score: 10 - (0.5-0.5)*100 = 100
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 75}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
// Node1 scores on 0-10 scale
// CPU Fraction: 0 / 4000= 0%
// Memory Fraction: 0 / 10000 = 0%
// Node1 Score: 10 - (0-0)*100 = 100
// Node2 scores on 0-10 scale
// CPU Fraction: 0 / 4000= 0%
// Memory Fraction: 0 / 10000 = 0%
// Node2 Score: 10 - (0-0)*100 = 100
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "no resources requested, pods scheduled",
pods: []*v1.Pod{
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
// Node1 scores on 0-10 scale
// CPU Fraction: 6000 / 10000 = 60%
// Memory Fraction: 0 / 20000 = 0%
// Node1 Score: 10 - (0.6-0)*100 = 40
// Node2 scores on 0-10 scale
// CPU Fraction: 6000 / 10000 = 60%
// Memory Fraction: 5000 / 20000 = 25%
// Node2 Score: 10 - (0.6-0.25)*100 = 65
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 40}, {Name: "machine2", Score: 65}},
name: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
// Node1 scores on 0-10 scale
// CPU Fraction: 6000 / 10000 = 60%
// Memory Fraction: 5000 / 20000 = 25%
// Node1 Score: 10 - (0.6-0.25)*100 = 65
// Node2 scores on 0-10 scale
// CPU Fraction: 6000 / 10000 = 60%
// Memory Fraction: 10000 / 20000 = 50%
// Node2 Score: 10 - (0.6-0.5)*100 = 9
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 65}, {Name: "machine2", Score: 90}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
// Node1 scores on 0-10 scale
// CPU Fraction: 6000 / 10000 = 60%
// Memory Fraction: 5000 / 20000 = 25%
// Node1 Score: 10 - (0.6-0.25)*100 = 65
// Node2 scores on 0-10 scale
// CPU Fraction: 6000 / 10000 = 60%
// Memory Fraction: 10000 / 50000 = 20%
// Node2 Score: 10 - (0.6-0.2)*100 = 60
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 65}, {Name: "machine2", Score: 60}},
name: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
// Node1 scores on 0-10 scale
// CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
// Memory Fraction: 0 / 10000 = 0
// Node1 Score: 0
// Node2 scores on 0-10 scale
// CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
// Memory Fraction 5000 / 10000 = 50%
// Node2 Score: 0
pod: &v1.Pod{Spec: cpuOnly},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "requested resources exceed node capacity",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "zero node resources, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
// Machine4 will be chosen here because it already has a existing volume making the variance
// of volume count, CPU usage, memory usage closer.
pod: &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp2"},
},
},
},
},
},
nodes: []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine3", Score: 89}, {Name: "machine4", Score: 98}},
name: "Include volume count on a node for balanced resource allocation",
pods: []*v1.Pod{
{Spec: cpuAndMemory3},
{Spec: podwithVol1},
{Spec: podwithVol2},
{Spec: podwithVol3},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
if len(test.pod.Spec.Volumes) > 0 {
maxVolumes := 5
for _, info := range nodeNameToInfo {
info.TransientInfo.TransNodeInfo.AllocatableVolumesCount = getExistingVolumeCountForNode(info.Pods(), maxVolumes)
info.TransientInfo.TransNodeInfo.RequestedVolumes = len(test.pod.Spec.Volumes)
}
}
fh, _ := framework.NewFramework(nil, nil, nil)
snapshot := fh.NodeInfoSnapshot()
snapshot.NodeInfoMap = nodeNameToInfo
p, _ := NewBalancedAllocation(nil, fh)
for i := range test.nodes {
hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) {
t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult)
}
}
})
}
}

View File

@ -0,0 +1,64 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// LeastAllocated is a score plugin that favors nodes with fewer allocation requested resources based on requested resources.
type LeastAllocated struct {
handle framework.FrameworkHandle
}
var _ = framework.ScorePlugin(&LeastAllocated{})
// LeastAllocatedName is the name of the plugin used in the plugin registry and configurations.
const LeastAllocatedName = "NodeResourcesLeastAllocated"
// Name returns name of the plugin. It is used in logs, etc.
func (la *LeastAllocated) Name() string {
return LeastAllocatedName
}
// Score invoked at the score extension point.
func (la *LeastAllocated) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
nodeInfo, exist := la.handle.NodeInfoSnapshot().NodeInfoMap[nodeName]
if !exist {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("node %q does not exist in NodeInfoSnapshot", nodeName))
}
// LeastRequestedPriorityMap does not use priority metadata, hence we pass nil here
s, err := priorities.LeastRequestedPriorityMap(pod, nil, nodeInfo)
return s.Score, migration.ErrorToFrameworkStatus(err)
}
// ScoreExtensions of the Score plugin.
func (la *LeastAllocated) ScoreExtensions() framework.ScoreExtensions {
return nil
}
// NewLeastAllocated initializes a new plugin and returns it.
func NewLeastAllocated(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
return &LeastAllocated{handle: h}, nil
}

View File

@ -0,0 +1,253 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"context"
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
func TestNodeResourcesLeastAllocated(t *testing.T) {
labels1 := map[string]string{
"foo": "bar",
"baz": "blah",
}
labels2 := map[string]string{
"bar": "foo",
"baz": "blah",
}
machine1Spec := v1.PodSpec{
NodeName: "machine1",
}
machine2Spec := v1.PodSpec{
NodeName: "machine2",
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
cpuOnly := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
}
cpuOnly2 := cpuOnly
cpuOnly2.NodeName = "machine2"
cpuAndMemory := v1.PodSpec{
NodeName: "machine2",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
}
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
// Node1 scores (remaining resources) on 0-10 scale
// CPU Score: ((4000 - 0) *100) / 4000 = 100
// Memory Score: ((10000 - 0) *100) / 10000 = 100
// Node1 Score: (100 + 100) / 2 = 100
// Node2 scores (remaining resources) on 0-10 scale
// CPU Score: ((4000 - 0) *100) / 4000 = 100
// Memory Score: ((10000 - 0) *10) / 10000 = 100
// Node2 Score: (100 + 100) / 2 = 100
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled, nothing requested",
},
{
// Node1 scores on 0-10 scale
// CPU Score: ((4000 - 3000) *100) / 4000 = 25
// Memory Score: ((10000 - 5000) *100) / 10000 = 50
// Node1 Score: (25 + 50) / 2 = 37
// Node2 scores on 0-10 scale
// CPU Score: ((6000 - 3000) *100) / 6000 = 50
// Memory Score: ((10000 - 5000) *100) / 10000 = 50
// Node2 Score: (50 + 50) / 2 = 50
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 37}, {Name: "machine2", Score: 50}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
// Node1 scores on 0-10 scale
// CPU Score: ((4000 - 0) *100) / 4000 = 100
// Memory Score: ((10000 - 0) *100) / 10000 = 100
// Node1 Score: (100 + 100) / 2 = 100
// Node2 scores on 0-10 scale
// CPU Score: ((4000 - 0) *100) / 4000 = 100
// Memory Score: ((10000 - 0) *100) / 10000 = 100
// Node2 Score: (100 + 100) / 2 = 100
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "no resources requested, pods scheduled",
pods: []*v1.Pod{
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
// Node1 scores on 0-10 scale
// CPU Score: ((10000 - 6000) *100) / 10000 = 40
// Memory Score: ((20000 - 0) *100) / 20000 = 100
// Node1 Score: (40 + 100) / 2 = 70
// Node2 scores on 0-10 scale
// CPU Score: ((10000 - 6000) *100) / 10000 = 40
// Memory Score: ((20000 - 5000) *100) / 20000 = 75
// Node2 Score: (40 + 75) / 2 = 57
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 70}, {Name: "machine2", Score: 57}},
name: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
// Node1 scores on 0-10 scale
// CPU Score: ((10000 - 6000) *10) / 10000 = 40
// Memory Score: ((20000 - 5000) *10) / 20000 = 75
// Node1 Score: (40 + 75) / 2 = 57
// Node2 scores on 0-10 scale
// CPU Score: ((10000 - 6000) *100) / 10000 = 40
// Memory Score: ((20000 - 10000) *100) / 20000 = 50
// Node2 Score: (40 + 50) / 2 = 45
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 57}, {Name: "machine2", Score: 45}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
// Node1 scores on 0-10 scale
// CPU Score: ((10000 - 6000) *100) / 10000 = 40
// Memory Score: ((20000 - 5000) *100) / 20000 = 75
// Node1 Score: (40 + 75) / 2 = 57
// Node2 scores on 0-10 scale
// CPU Score: ((10000 - 6000) *100) / 10000 = 40
// Memory Score: ((50000 - 10000) *100) / 50000 = 80
// Node2 Score: (40 + 80) / 2 = 60
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 57}, {Name: "machine2", Score: 60}},
name: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
// Node1 scores on 0-10 scale
// CPU Score: ((4000 - 6000) *100) / 4000 = 0
// Memory Score: ((10000 - 0) *100) / 10000 = 100
// Node1 Score: (0 + 100) / 2 = 50
// Node2 scores on 0-10 scale
// CPU Score: ((4000 - 6000) *100) / 4000 = 0
// Memory Score: ((10000 - 5000) *100) / 10000 = 50
// Node2 Score: (0 + 50) / 2 = 25
pod: &v1.Pod{Spec: cpuOnly},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 25}},
name: "requested resources exceed node capacity",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "zero node resources, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil)
snapshot := fh.NodeInfoSnapshot()
snapshot.NodeInfoMap = nodeNameToInfo
p, _ := NewLeastAllocated(nil, fh)
for i := range test.nodes {
hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) {
t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult)
}
}
})
}
}

View File

@ -0,0 +1,64 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// MostAllocated is a score plugin that favors nodes with high allocation based on requested resources.
type MostAllocated struct {
handle framework.FrameworkHandle
}
var _ = framework.ScorePlugin(&MostAllocated{})
// MostAllocatedName is the name of the plugin used in the plugin registry and configurations.
const MostAllocatedName = "NodeResourcesMostAllocated"
// Name returns name of the plugin. It is used in logs, etc.
func (ma *MostAllocated) Name() string {
return MostAllocatedName
}
// Score invoked at the Score extension point.
func (ma *MostAllocated) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
nodeInfo, exist := ma.handle.NodeInfoSnapshot().NodeInfoMap[nodeName]
if !exist {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("node %q does not exist in NodeInfoSnapshot", nodeName))
}
// MostRequestedPriorityMap does not use priority metadata, hence we pass nil here
s, err := priorities.MostRequestedPriorityMap(pod, nil, nodeInfo)
return s.Score, migration.ErrorToFrameworkStatus(err)
}
// ScoreExtensions of the Score plugin.
func (ma *MostAllocated) ScoreExtensions() framework.ScoreExtensions {
return nil
}
// NewMostAllocated initializes a new plugin and returns it.
func NewMostAllocated(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
return &MostAllocated{handle: h}, nil
}

View File

@ -0,0 +1,216 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"context"
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
func TestNodeResourcesMostAllocated(t *testing.T) {
labels1 := map[string]string{
"foo": "bar",
"baz": "blah",
}
labels2 := map[string]string{
"bar": "foo",
"baz": "blah",
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
cpuOnly := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
}
cpuOnly2 := cpuOnly
cpuOnly2.NodeName = "machine2"
cpuAndMemory := v1.PodSpec{
NodeName: "machine2",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
}
bigCPUAndMemory := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("4000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3000m"),
v1.ResourceMemory: resource.MustParse("5000"),
},
},
},
},
}
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
// Node1 scores (used resources) on 0-10 scale
// CPU Score: (0 * 100) / 4000 = 0
// Memory Score: (0 * 100) / 10000 = 0
// Node1 Score: (0 + 0) / 2 = 0
// Node2 scores (used resources) on 0-10 scale
// CPU Score: (0 * 100) / 4000 = 0
// Memory Score: (0 * 100) / 10000 = 0
// Node2 Score: (0 + 0) / 2 = 0
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
// Node1 scores on 0-10 scale
// CPU Score: (3000 * 100) / 4000 = 75
// Memory Score: (5000 * 100) / 10000 = 50
// Node1 Score: (75 + 50) / 2 = 6
// Node2 scores on 0-10 scale
// CPU Score: (3000 * 100) / 6000 = 50
// Memory Score: (5000 * 100) / 10000 = 50
// Node2 Score: (50 + 50) / 2 = 50
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 62}, {Name: "machine2", Score: 50}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
// Node1 scores on 0-10 scale
// CPU Score: (6000 * 100) / 10000 = 60
// Memory Score: (0 * 100) / 20000 = 100
// Node1 Score: (60 + 0) / 2 = 30
// Node2 scores on 0-10 scale
// CPU Score: (6000 * 100) / 10000 = 60
// Memory Score: (5000 * 100) / 20000 = 25
// Node2 Score: (60 + 25) / 2 = 42
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 30}, {Name: "machine2", Score: 42}},
name: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
// Node1 scores on 0-10 scale
// CPU Score: (6000 * 100) / 10000 = 60
// Memory Score: (5000 * 100) / 20000 = 25
// Node1 Score: (60 + 25) / 2 = 42
// Node2 scores on 0-10 scale
// CPU Score: (6000 * 100) / 10000 = 60
// Memory Score: (10000 * 100) / 20000 = 50
// Node2 Score: (60 + 50) / 2 = 55
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 42}, {Name: "machine2", Score: 55}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
// Node1 scores on 0-10 scale
// CPU Score: 5000 > 4000 return 0
// Memory Score: (9000 * 100) / 10000 = 90
// Node1 Score: (0 + 90) / 2 = 45
// Node2 scores on 0-10 scale
// CPU Score: (5000 * 100) / 10000 = 50
// Memory Score: 9000 > 8000 return 0
// Node2 Score: (50 + 0) / 2 = 25
pod: &v1.Pod{Spec: bigCPUAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 45}, {Name: "machine2", Score: 25}},
name: "resources requested with more than the node, pods scheduled with resources",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil)
snapshot := fh.NodeInfoSnapshot()
snapshot.NodeInfoMap = nodeNameToInfo
p, _ := NewMostAllocated(nil, fh)
for i := range test.nodes {
hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) {
t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult)
}
}
})
}
}

View File

@ -0,0 +1,39 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func makeNode(node string, milliCPU, memory int64) *v1.Node {
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
},
},
}
}

View File

@ -135,9 +135,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
"MaxGCEPDVolumeCount",
),
expectedPrioritizers: sets.NewString(
"BalancedResourceAllocation",
"InterPodAffinityPriority",
"LeastRequestedPriority",
"SelectorSpreadPriority",
),
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
@ -155,7 +153,9 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
{Name: "ImageLocality", Weight: 1},
{Name: "NodeResourcesLeastAllocated", Weight: 1},
{Name: "NodeAffinity", Weight: 1},
{Name: "NodePreferAvoidPods", Weight: 10000},
{Name: "TaintToleration", Weight: 1},
@ -215,9 +215,7 @@ kind: Policy
"MaxGCEPDVolumeCount",
),
expectedPrioritizers: sets.NewString(
"BalancedResourceAllocation",
"InterPodAffinityPriority",
"LeastRequestedPriority",
"SelectorSpreadPriority",
),
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
@ -235,7 +233,9 @@ kind: Policy
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
{Name: "ImageLocality", Weight: 1},
{Name: "NodeResourcesLeastAllocated", Weight: 1},
{Name: "NodeAffinity", Weight: 1},
{Name: "NodePreferAvoidPods", Weight: 10000},
{Name: "TaintToleration", Weight: 1},
@ -486,25 +486,23 @@ func TestUnschedulableNodes(t *testing.T) {
}
func TestMultiScheduler(t *testing.T) {
/*
This integration tests the multi-scheduler feature in the following way:
1. create a default scheduler
2. create a node
3. create 3 pods: testPodNoAnnotation, testPodWithAnnotationFitsDefault and testPodWithAnnotationFitsFoo
- note: the first two should be picked and scheduled by default scheduler while the last one should be
picked by scheduler of name "foo-scheduler" which does not exist yet.
4. **check point-1**:
- testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled
- testPodWithAnnotationFitsFoo should NOT be scheduled
5. create a scheduler with name "foo-scheduler"
6. **check point-2**:
- testPodWithAnnotationFitsFoo should be scheduled
7. stop default scheduler
8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2
- note: these two pods belong to default scheduler which no longer exists
9. **check point-3**:
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
*/
// This integration tests the multi-scheduler feature in the following way:
// 1. create a default scheduler
// 2. create a node
// 3. create 3 pods: testPodNoAnnotation, testPodWithAnnotationFitsDefault and testPodWithAnnotationFitsFoo
// - note: the first two should be picked and scheduled by default scheduler while the last one should be
// picked by scheduler of name "foo-scheduler" which does not exist yet.
// 4. **check point-1**:
// - testPodNoAnnotation, testPodWithAnnotationFitsDefault should be scheduled
// - testPodWithAnnotationFitsFoo should NOT be scheduled
// 5. create a scheduler with name "foo-scheduler"
// 6. **check point-2**:
// - testPodWithAnnotationFitsFoo should be scheduled
// 7. stop default scheduler
// 8. create 2 pods: testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2
// - note: these two pods belong to default scheduler which no longer exists
// 9. **check point-3**:
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
// 1. create and start default-scheduler
context := initTest(t, "multi-scheduler")