Merge pull request #86725 from notpad/feature/scheduler

Move resource-based priority functions to their Score plugins
This commit is contained in:
Kubernetes Prow Robot 2019-12-31 09:37:40 -08:00 committed by GitHub
commit 8743d98431
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 830 additions and 2031 deletions

View File

@ -9,32 +9,22 @@ load(
go_library(
name = "go_default_library",
srcs = [
"balanced_resource_allocation.go",
"least_requested.go",
"metadata.go",
"most_requested.go",
"priorities.go",
"reduce.go",
"requested_to_capacity_ratio.go",
"resource_allocation.go",
"selector_spreading.go",
"test_util.go",
"types.go",
],
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
@ -44,18 +34,13 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"balanced_resource_allocation_test.go",
"least_requested_test.go",
"metadata_test.go",
"most_requested_test.go",
"requested_to_capacity_ratio_test.go",
"selector_spreading_test.go",
"spreading_perf_test.go",
"types_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/features:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
@ -68,12 +53,9 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)

View File

@ -1,79 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import (
"math"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
var (
balancedResourcePriority = &ResourceAllocationPriority{"BalancedResourceAllocation", balancedResourceScorer, DefaultRequestedRatioResources}
// BalancedResourceAllocationMap favors nodes with balanced resource usage rate.
// BalancedResourceAllocationMap should **NOT** be used alone, and **MUST** be used together
// with LeastRequestedPriority. It calculates the difference between the cpu and memory fraction
// of capacity, and prioritizes the host based on how close the two metrics are to each other.
// Detail: score = 10 - variance(cpuFraction,memoryFraction,volumeFraction)*10. The algorithm is partly inspired by:
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced
// Resource Utilization"
BalancedResourceAllocationMap = balancedResourcePriority.PriorityMap
)
// todo: use resource weights in the scorer function
func balancedResourceScorer(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
cpuFraction := fractionOfCapacity(requested[v1.ResourceCPU], allocable[v1.ResourceCPU])
memoryFraction := fractionOfCapacity(requested[v1.ResourceMemory], allocable[v1.ResourceMemory])
// This to find a node which has most balanced CPU, memory and volume usage.
if cpuFraction >= 1 || memoryFraction >= 1 {
// if requested >= capacity, the corresponding host should never be preferred.
return 0
}
if includeVolumes && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && allocatableVolumes > 0 {
volumeFraction := float64(requestedVolumes) / float64(allocatableVolumes)
if volumeFraction >= 1 {
// if requested >= capacity, the corresponding host should never be preferred.
return 0
}
// Compute variance for all the three fractions.
mean := (cpuFraction + memoryFraction + volumeFraction) / float64(3)
variance := float64((((cpuFraction - mean) * (cpuFraction - mean)) + ((memoryFraction - mean) * (memoryFraction - mean)) + ((volumeFraction - mean) * (volumeFraction - mean))) / float64(3))
// Since the variance is between positive fractions, it will be positive fraction. 1-variance lets the
// score to be higher for node which has least variance and multiplying it with 10 provides the scaling
// factor needed.
return int64((1 - variance) * float64(framework.MaxNodeScore))
}
// Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1
// respectively. Multiplying the absolute value of the difference by 10 scales the value to
// 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from
// 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced.
diff := math.Abs(cpuFraction - memoryFraction)
return int64((1 - diff) * float64(framework.MaxNodeScore))
}
func fractionOfCapacity(requested, capacity int64) float64 {
if capacity == 0 {
return 1
}
return float64(requested) / float64(capacity)
}

View File

@ -1,424 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import (
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
)
// getExistingVolumeCountForNode gets the current number of volumes on node.
func getExistingVolumeCountForNode(pods []*v1.Pod, maxVolumes int) int {
volumeCount := 0
for _, pod := range pods {
volumeCount += len(pod.Spec.Volumes)
}
if maxVolumes-volumeCount > 0 {
return maxVolumes - volumeCount
}
return 0
}
func TestBalancedResourceAllocation(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
podwithVol1 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
},
},
},
NodeName: "machine4",
}
podwithVol2 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp1"},
},
},
},
NodeName: "machine4",
}
podwithVol3 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp1"},
},
},
},
NodeName: "machine4",
}
labels1 := map[string]string{
"foo": "bar",
"baz": "blah",
}
labels2 := map[string]string{
"bar": "foo",
"baz": "blah",
}
machine1Spec := v1.PodSpec{
NodeName: "machine1",
}
machine2Spec := v1.PodSpec{
NodeName: "machine2",
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
cpuOnly := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
}
cpuOnly2 := cpuOnly
cpuOnly2.NodeName = "machine2"
cpuAndMemory := v1.PodSpec{
NodeName: "machine2",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
}
cpuAndMemory3 := v1.PodSpec{
NodeName: "machine3",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
}
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
/*
Node1 scores (remaining resources) on 0-10 scale
CPU Fraction: 0 / 4000 = 0%
Memory Fraction: 0 / 10000 = 0%
Node1 Score: 10 - (0-0)*100 = 100
Node2 scores (remaining resources) on 0-10 scale
CPU Fraction: 0 / 4000 = 0 %
Memory Fraction: 0 / 10000 = 0%
Node2 Score: 10 - (0-0)*100 = 100
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled, nothing requested",
},
{
/*
Node1 scores on 0-10 scale
CPU Fraction: 3000 / 4000= 75%
Memory Fraction: 5000 / 10000 = 50%
Node1 Score: 10 - (0.75-0.5)*100 = 75
Node2 scores on 0-10 scale
CPU Fraction: 3000 / 6000= 50%
Memory Fraction: 5000/10000 = 50%
Node2 Score: 10 - (0.5-0.5)*100 = 100
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 75}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
/*
Node1 scores on 0-10 scale
CPU Fraction: 0 / 4000= 0%
Memory Fraction: 0 / 10000 = 0%
Node1 Score: 10 - (0-0)*100 = 100
Node2 scores on 0-10 scale
CPU Fraction: 0 / 4000= 0%
Memory Fraction: 0 / 10000 = 0%
Node2 Score: 10 - (0-0)*100 = 100
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "no resources requested, pods scheduled",
pods: []*v1.Pod{
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 0 / 20000 = 0%
Node1 Score: 10 - (0.6-0)*100 = 40
Node2 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 5000 / 20000 = 25%
Node2 Score: 10 - (0.6-0.25)*100 = 65
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 40}, {Name: "machine2", Score: 65}},
name: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 5000 / 20000 = 25%
Node1 Score: 10 - (0.6-0.25)*100 = 65
Node2 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 10000 / 20000 = 50%
Node2 Score: 10 - (0.6-0.5)*100 = 9
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 65}, {Name: "machine2", Score: 90}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 5000 / 20000 = 25%
Node1 Score: 10 - (0.6-0.25)*100 = 65
Node2 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 10000 / 50000 = 20%
Node2 Score: 10 - (0.6-0.2)*100 = 60
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 65}, {Name: "machine2", Score: 60}},
name: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
Memory Fraction: 0 / 10000 = 0
Node1 Score: 0
Node2 scores on 0-10 scale
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
Memory Fraction 5000 / 10000 = 50%
Node2 Score: 0
*/
pod: &v1.Pod{Spec: cpuOnly},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "requested resources exceed node capacity",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "zero node resources, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Machine4 will be chosen here because it already has a existing volume making the variance
of volume count, CPU usage, memory usage closer.
*/
pod: &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp2"},
},
},
},
},
},
nodes: []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine3", Score: 89}, {Name: "machine4", Score: 98}},
name: "Include volume count on a node for balanced resource allocation",
pods: []*v1.Pod{
{Spec: cpuAndMemory3},
{Spec: podwithVol1},
{Spec: podwithVol2},
{Spec: podwithVol3},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
if len(test.pod.Spec.Volumes) > 0 {
maxVolumes := 5
for _, info := range snapshot.NodeInfoMap {
info.TransientInfo.TransNodeInfo.AllocatableVolumesCount = getExistingVolumeCountForNode(info.Pods(), maxVolumes)
info.TransientInfo.TransNodeInfo.RequestedVolumes = len(test.pod.Spec.Volumes)
}
}
list, err := runMapReducePriority(BalancedResourceAllocationMap, nil, nil, test.pod, snapshot, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
})
}
}

View File

@ -1,56 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
var (
leastRequestedRatioResources = DefaultRequestedRatioResources
leastResourcePriority = &ResourceAllocationPriority{"LeastResourceAllocation", leastResourceScorer, leastRequestedRatioResources}
// LeastRequestedPriorityMap is a priority function that favors nodes with fewer requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
//
// Details:
// (cpu((capacity-sum(requested))*10/capacity) + memory((capacity-sum(requested))*10/capacity))/2
LeastRequestedPriorityMap = leastResourcePriority.PriorityMap
)
func leastResourceScorer(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range leastRequestedRatioResources {
resourceScore := leastRequestedScore(requested[resource], allocable[resource])
nodeScore += resourceScore * weight
weightSum += weight
}
return nodeScore / weightSum
}
// The unused capacity is calculated on a scale of 0-10
// 0 being the lowest priority and 10 being the highest.
// The more unused resources the higher the score is.
func leastRequestedScore(requested, capacity int64) int64 {
if capacity == 0 {
return 0
}
if requested > capacity {
return 0
}
return ((capacity - requested) * int64(framework.MaxNodeScore)) / capacity
}

View File

@ -1,266 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import (
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
)
func TestLeastRequested(t *testing.T) {
labels1 := map[string]string{
"foo": "bar",
"baz": "blah",
}
labels2 := map[string]string{
"bar": "foo",
"baz": "blah",
}
machine1Spec := v1.PodSpec{
NodeName: "machine1",
}
machine2Spec := v1.PodSpec{
NodeName: "machine2",
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
cpuOnly := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
}
cpuOnly2 := cpuOnly
cpuOnly2.NodeName = "machine2"
cpuAndMemory := v1.PodSpec{
NodeName: "machine2",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
}
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
/*
Node1 scores (remaining resources) on 0-10 scale
CPU Score: ((4000 - 0) *100) / 4000 = 100
Memory Score: ((10000 - 0) *100) / 10000 = 100
Node1 Score: (100 + 100) / 2 = 100
Node2 scores (remaining resources) on 0-10 scale
CPU Score: ((4000 - 0) *100) / 4000 = 100
Memory Score: ((10000 - 0) *10) / 10000 = 100
Node2 Score: (100 + 100) / 2 = 100
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled, nothing requested",
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((4000 - 3000) *100) / 4000 = 25
Memory Score: ((10000 - 5000) *100) / 10000 = 50
Node1 Score: (25 + 50) / 2 = 37
Node2 scores on 0-10 scale
CPU Score: ((6000 - 3000) *100) / 6000 = 50
Memory Score: ((10000 - 5000) *100) / 10000 = 50
Node2 Score: (50 + 50) / 2 = 50
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 37}, {Name: "machine2", Score: 50}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((4000 - 0) *100) / 4000 = 100
Memory Score: ((10000 - 0) *100) / 10000 = 100
Node1 Score: (100 + 100) / 2 = 100
Node2 scores on 0-10 scale
CPU Score: ((4000 - 0) *100) / 4000 = 100
Memory Score: ((10000 - 0) *100) / 10000 = 100
Node2 Score: (100 + 100) / 2 = 100
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "no resources requested, pods scheduled",
pods: []*v1.Pod{
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *100) / 10000 = 40
Memory Score: ((20000 - 0) *100) / 20000 = 100
Node1 Score: (40 + 100) / 2 = 70
Node2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *100) / 10000 = 40
Memory Score: ((20000 - 5000) *100) / 20000 = 75
Node2 Score: (40 + 75) / 2 = 57
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 70}, {Name: "machine2", Score: 57}},
name: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 40
Memory Score: ((20000 - 5000) *10) / 20000 = 75
Node1 Score: (40 + 75) / 2 = 57
Node2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *100) / 10000 = 40
Memory Score: ((20000 - 10000) *100) / 20000 = 50
Node2 Score: (40 + 50) / 2 = 45
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 57}, {Name: "machine2", Score: 45}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *100) / 10000 = 40
Memory Score: ((20000 - 5000) *100) / 20000 = 75
Node1 Score: (40 + 75) / 2 = 57
Node2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *100) / 10000 = 40
Memory Score: ((50000 - 10000) *100) / 50000 = 80
Node2 Score: (40 + 80) / 2 = 60
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 57}, {Name: "machine2", Score: 60}},
name: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((4000 - 6000) *100) / 4000 = 0
Memory Score: ((10000 - 0) *100) / 10000 = 100
Node1 Score: (0 + 100) / 2 = 50
Node2 scores on 0-10 scale
CPU Score: ((4000 - 6000) *100) / 4000 = 0
Memory Score: ((10000 - 5000) *100) / 10000 = 50
Node2 Score: (0 + 50) / 2 = 25
*/
pod: &v1.Pod{Spec: cpuOnly},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 25}},
name: "requested resources exceed node capacity",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "zero node resources, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
list, err := runMapReducePriority(LeastRequestedPriorityMap, nil, nil, test.pod, snapshot, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
})
}
}

View File

@ -1,59 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
var (
mostRequestedRatioResources = DefaultRequestedRatioResources
mostResourcePriority = &ResourceAllocationPriority{"MostResourceAllocation", mostResourceScorer, mostRequestedRatioResources}
// MostRequestedPriorityMap is a priority function that favors nodes with most requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the maximum of the average of the fraction of requested to capacity.
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2
MostRequestedPriorityMap = mostResourcePriority.PriorityMap
)
func mostResourceScorer(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range mostRequestedRatioResources {
resourceScore := mostRequestedScore(requested[resource], allocable[resource])
nodeScore += resourceScore * weight
weightSum += weight
}
return (nodeScore / weightSum)
}
// The used capacity is calculated on a scale of 0-10
// 0 being the lowest priority and 10 being the highest.
// The more resources are used the higher the score is. This function
// is almost a reversed version of least_requested_priority.calculateUnusedScore
// (10 - calculateUnusedScore). The main difference is in rounding. It was added to
// keep the final formula clean and not to modify the widely used (by users
// in their default scheduling policies) calculateUsedScore.
func mostRequestedScore(requested, capacity int64) int64 {
if capacity == 0 {
return 0
}
if requested > capacity {
return 0
}
return (requested * framework.MaxNodeScore) / capacity
}

View File

@ -1,223 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import (
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
)
func TestMostRequested(t *testing.T) {
labels1 := map[string]string{
"foo": "bar",
"baz": "blah",
}
labels2 := map[string]string{
"bar": "foo",
"baz": "blah",
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
cpuOnly := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
}
cpuOnly2 := cpuOnly
cpuOnly2.NodeName = "machine2"
cpuAndMemory := v1.PodSpec{
NodeName: "machine2",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
}
bigCPUAndMemory := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("4000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3000m"),
v1.ResourceMemory: resource.MustParse("5000"),
},
},
},
},
}
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
/*
Node1 scores (used resources) on 0-10 scale
CPU Score: (0 * 100) / 4000 = 0
Memory Score: (0 * 100) / 10000 = 0
Node1 Score: (0 + 0) / 2 = 0
Node2 scores (used resources) on 0-10 scale
CPU Score: (0 * 100) / 4000 = 0
Memory Score: (0 * 100) / 10000 = 0
Node2 Score: (0 + 0) / 2 = 0
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
/*
Node1 scores on 0-10 scale
CPU Score: (3000 * 100) / 4000 = 75
Memory Score: (5000 * 100) / 10000 = 50
Node1 Score: (75 + 50) / 2 = 6
Node2 scores on 0-10 scale
CPU Score: (3000 * 100) / 6000 = 50
Memory Score: (5000 * 100) / 10000 = 50
Node2 Score: (50 + 50) / 2 = 50
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 62}, {Name: "machine2", Score: 50}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
/*
Node1 scores on 0-10 scale
CPU Score: (6000 * 100) / 10000 = 60
Memory Score: (0 * 100) / 20000 = 100
Node1 Score: (60 + 0) / 2 = 30
Node2 scores on 0-10 scale
CPU Score: (6000 * 100) / 10000 = 60
Memory Score: (5000 * 100) / 20000 = 25
Node2 Score: (60 + 25) / 2 = 42
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 30}, {Name: "machine2", Score: 42}},
name: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: (6000 * 100) / 10000 = 60
Memory Score: (5000 * 100) / 20000 = 25
Node1 Score: (60 + 25) / 2 = 42
Node2 scores on 0-10 scale
CPU Score: (6000 * 100) / 10000 = 60
Memory Score: (10000 * 100) / 20000 = 50
Node2 Score: (60 + 50) / 2 = 55
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 42}, {Name: "machine2", Score: 55}},
name: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: 5000 > 4000 return 0
Memory Score: (9000 * 100) / 10000 = 90
Node1 Score: (0 + 90) / 2 = 45
Node2 scores on 0-10 scale
CPU Score: (5000 * 100) / 10000 = 50
Memory Score: 9000 > 8000 return 0
Node2 Score: (50 + 0) / 2 = 25
*/
pod: &v1.Pod{Spec: bigCPUAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 45}, {Name: "machine2", Score: 25}},
name: "resources requested with more than the node, pods scheduled with resources",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
list, err := runMapReducePriority(MostRequestedPriorityMap, nil, nil, test.pod, snapshot, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
})
}
}

View File

@ -1,175 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import (
"fmt"
"math"
"k8s.io/klog"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// FunctionShape represents shape of scoring function.
// For safety use NewFunctionShape which performs precondition checks for struct creation.
type FunctionShape []FunctionShapePoint
// FunctionShapePoint represents single point in scoring function shape.
type FunctionShapePoint struct {
// Utilization is function argument.
Utilization int64
// Score is function value.
Score int64
}
var (
// give priority to least utilized nodes by default
defaultFunctionShape, _ = NewFunctionShape([]FunctionShapePoint{
{
Utilization: 0,
Score: framework.MaxNodeScore,
},
{
Utilization: 100,
Score: framework.MinNodeScore,
},
})
)
const (
minUtilization = 0
maxUtilization = 100
minScore = 0
maxScore = framework.MaxNodeScore
)
// NewFunctionShape creates instance of FunctionShape in a safe way performing all
// necessary sanity checks.
func NewFunctionShape(points []FunctionShapePoint) (FunctionShape, error) {
n := len(points)
if n == 0 {
return nil, fmt.Errorf("at least one point must be specified")
}
for i := 1; i < n; i++ {
if points[i-1].Utilization >= points[i].Utilization {
return nil, fmt.Errorf("utilization values must be sorted. Utilization[%d]==%d >= Utilization[%d]==%d", i-1, points[i-1].Utilization, i, points[i].Utilization)
}
}
for i, point := range points {
if point.Utilization < minUtilization {
return nil, fmt.Errorf("utilization values must not be less than %d. Utilization[%d]==%d", minUtilization, i, point.Utilization)
}
if point.Utilization > maxUtilization {
return nil, fmt.Errorf("utilization values must not be greater than %d. Utilization[%d]==%d", maxUtilization, i, point.Utilization)
}
if point.Score < minScore {
return nil, fmt.Errorf("score values must not be less than %d. Score[%d]==%d", minScore, i, point.Score)
}
if point.Score > maxScore {
return nil, fmt.Errorf("score valuses not be greater than %d. Score[%d]==%d", maxScore, i, point.Score)
}
}
// We make defensive copy so we make no assumption if array passed as argument is not changed afterwards
pointsCopy := make(FunctionShape, n)
copy(pointsCopy, points)
return pointsCopy, nil
}
func validateResourceWeightMap(resourceToWeightMap ResourceToWeightMap) error {
if len(resourceToWeightMap) == 0 {
return fmt.Errorf("resourceToWeightMap cannot be nil")
}
for resource, weight := range resourceToWeightMap {
if weight < 1 {
return fmt.Errorf("resource %s weight %d must not be less than 1", string(resource), weight)
}
}
return nil
}
// RequestedToCapacityRatioResourceAllocationPriorityDefault creates a requestedToCapacity based
// ResourceAllocationPriority using default resource scoring function shape.
// The default function assigns 1.0 to resource when all capacity is available
// and 0.0 when requested amount is equal to capacity.
func RequestedToCapacityRatioResourceAllocationPriorityDefault() *ResourceAllocationPriority {
return RequestedToCapacityRatioResourceAllocationPriority(defaultFunctionShape, DefaultRequestedRatioResources)
}
// RequestedToCapacityRatioResourceAllocationPriority creates a requestedToCapacity based
// ResourceAllocationPriority using provided resource scoring function shape.
func RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape FunctionShape, resourceToWeightMap ResourceToWeightMap) *ResourceAllocationPriority {
return &ResourceAllocationPriority{"RequestedToCapacityRatioResourceAllocationPriority", buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape, resourceToWeightMap), resourceToWeightMap}
}
func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionShape, resourceToWeightMap ResourceToWeightMap) func(ResourceToValueMap, ResourceToValueMap, bool, int, int) int64 {
rawScoringFunction := buildBrokenLinearFunction(scoringFunctionShape)
err := validateResourceWeightMap(resourceToWeightMap)
if err != nil {
klog.Error(err)
}
resourceScoringFunction := func(requested, capacity int64) int64 {
if capacity == 0 || requested > capacity {
return rawScoringFunction(maxUtilization)
}
return rawScoringFunction(maxUtilization - (capacity-requested)*maxUtilization/capacity)
}
return func(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range resourceToWeightMap {
resourceScore := resourceScoringFunction(requested[resource], allocable[resource])
if resourceScore > 0 {
nodeScore += resourceScore * weight
weightSum += weight
}
}
if weightSum == 0 {
return 0
}
return int64(math.Round(float64(nodeScore) / float64(weightSum)))
}
}
// Creates a function which is built using linear segments. Segments are defined via shape array.
// Shape[i].Utilization slice represents points on "utilization" axis where different segments meet.
// Shape[i].Score represents function values at meeting points.
//
// function f(p) is defined as:
// shape[0].Score for p < f[0].Utilization
// shape[i].Score for p == shape[i].Utilization
// shape[n-1].Score for p > shape[n-1].Utilization
// and linear between points (p < shape[i].Utilization)
func buildBrokenLinearFunction(shape FunctionShape) func(int64) int64 {
n := len(shape)
return func(p int64) int64 {
for i := 0; i < n; i++ {
if p <= shape[i].Utilization {
if i == 0 {
return shape[0].Score
}
return shape[i-1].Score + (shape[i].Score-shape[i-1].Score)*(p-shape[i-1].Utilization)/(shape[i].Utilization-shape[i-1].Utilization)
}
}
return shape[n-1].Score
}
}

View File

@ -1,627 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import (
"reflect"
"sort"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
)
func TestCreatingFunctionShapeErrorsIfEmptyPoints(t *testing.T) {
var err error
_, err = NewFunctionShape([]FunctionShapePoint{})
assert.Equal(t, "at least one point must be specified", err.Error())
}
func TestCreatingResourceNegativeWeight(t *testing.T) {
err := validateResourceWeightMap(ResourceToWeightMap{v1.ResourceCPU: -1})
assert.Equal(t, "resource cpu weight -1 must not be less than 1", err.Error())
}
func TestCreatingResourceDefaultWeight(t *testing.T) {
err := validateResourceWeightMap(ResourceToWeightMap{})
assert.Equal(t, "resourceToWeightMap cannot be nil", err.Error())
}
func TestCreatingFunctionShapeErrorsIfXIsNotSorted(t *testing.T) {
var err error
_, err = NewFunctionShape([]FunctionShapePoint{{10, 1}, {15, 2}, {20, 3}, {19, 4}, {25, 5}})
assert.Equal(t, "utilization values must be sorted. Utilization[2]==20 >= Utilization[3]==19", err.Error())
_, err = NewFunctionShape([]FunctionShapePoint{{10, 1}, {20, 2}, {20, 3}, {22, 4}, {25, 5}})
assert.Equal(t, "utilization values must be sorted. Utilization[1]==20 >= Utilization[2]==20", err.Error())
}
func TestCreatingFunctionPointNotInAllowedRange(t *testing.T) {
var err error
_, err = NewFunctionShape([]FunctionShapePoint{{-1, 0}, {100, 100}})
assert.Equal(t, "utilization values must not be less than 0. Utilization[0]==-1", err.Error())
_, err = NewFunctionShape([]FunctionShapePoint{{0, 0}, {101, 100}})
assert.Equal(t, "utilization values must not be greater than 100. Utilization[1]==101", err.Error())
_, err = NewFunctionShape([]FunctionShapePoint{{0, -1}, {100, 100}})
assert.Equal(t, "score values must not be less than 0. Score[0]==-1", err.Error())
_, err = NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 101}})
assert.Equal(t, "score valuses not be greater than 100. Score[1]==101", err.Error())
}
func TestBrokenLinearFunction(t *testing.T) {
type Assertion struct {
p int64
expected int64
}
type Test struct {
points []FunctionShapePoint
assertions []Assertion
}
tests := []Test{
{
points: []FunctionShapePoint{{10, 1}, {90, 9}},
assertions: []Assertion{
{p: -10, expected: 1},
{p: 0, expected: 1},
{p: 9, expected: 1},
{p: 10, expected: 1},
{p: 15, expected: 1},
{p: 19, expected: 1},
{p: 20, expected: 2},
{p: 89, expected: 8},
{p: 90, expected: 9},
{p: 99, expected: 9},
{p: 100, expected: 9},
{p: 110, expected: 9},
},
},
{
points: []FunctionShapePoint{{0, 2}, {40, 10}, {100, 0}},
assertions: []Assertion{
{p: -10, expected: 2},
{p: 0, expected: 2},
{p: 20, expected: 6},
{p: 30, expected: 8},
{p: 40, expected: 10},
{p: 70, expected: 5},
{p: 100, expected: 0},
{p: 110, expected: 0},
},
},
{
points: []FunctionShapePoint{{0, 2}, {40, 2}, {100, 2}},
assertions: []Assertion{
{p: -10, expected: 2},
{p: 0, expected: 2},
{p: 20, expected: 2},
{p: 30, expected: 2},
{p: 40, expected: 2},
{p: 70, expected: 2},
{p: 100, expected: 2},
{p: 110, expected: 2},
},
},
}
for _, test := range tests {
functionShape, err := NewFunctionShape(test.points)
assert.Nil(t, err)
function := buildBrokenLinearFunction(functionShape)
for _, assertion := range test.assertions {
assert.InDelta(t, assertion.expected, function(assertion.p), 0.1, "points=%v, p=%f", test.points, assertion.p)
}
}
}
func TestRequestedToCapacityRatio(t *testing.T) {
type resources struct {
cpu int64
mem int64
}
type nodeResources struct {
capacity resources
used resources
}
type test struct {
test string
requested resources
nodes map[string]nodeResources
expectedPriorities framework.NodeScoreList
}
tests := []test{
{
test: "nothing scheduled, nothing requested (default - least requested nodes have priority)",
requested: resources{0, 0},
nodes: map[string]nodeResources{
"node1": {
capacity: resources{4000, 10000},
used: resources{0, 0},
},
"node2": {
capacity: resources{4000, 10000},
used: resources{0, 0},
},
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 100}, {Name: "node2", Score: 100}},
},
{
test: "nothing scheduled, resources requested, differently sized machines (default - least requested nodes have priority)",
requested: resources{3000, 5000},
nodes: map[string]nodeResources{
"node1": {
capacity: resources{4000, 10000},
used: resources{0, 0},
},
"node2": {
capacity: resources{6000, 10000},
used: resources{0, 0},
},
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
},
{
test: "no resources requested, pods scheduled with resources (default - least requested nodes have priority)",
requested: resources{0, 0},
nodes: map[string]nodeResources{
"node1": {
capacity: resources{4000, 10000},
used: resources{3000, 5000},
},
"node2": {
capacity: resources{6000, 10000},
used: resources{3000, 5000},
},
},
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
},
}
buildResourcesPod := func(node string, requestedResources resources) *v1.Pod {
return &v1.Pod{Spec: v1.PodSpec{
NodeName: node,
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(requestedResources.cpu, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(requestedResources.mem, resource.DecimalSI),
},
},
},
},
},
}
}
for _, test := range tests {
var nodeNames []string
for nodeName := range test.nodes {
nodeNames = append(nodeNames, nodeName)
}
sort.Strings(nodeNames)
var nodes []*v1.Node
for _, nodeName := range nodeNames {
node := test.nodes[nodeName]
nodes = append(nodes, makeNode(nodeName, node.capacity.cpu, node.capacity.mem))
}
var scheduledPods []*v1.Pod
for name, node := range test.nodes {
scheduledPods = append(scheduledPods,
buildResourcesPod(name, node.used))
}
newPod := buildResourcesPod("", test.requested)
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(scheduledPods, nodes))
list, err := runMapReducePriority(RequestedToCapacityRatioResourceAllocationPriorityDefault().PriorityMap, nil, nil, newPod, snapshot, nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedPriorities, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedPriorities, list)
}
}
}
func TestResourceBinPackingSingleExtended(t *testing.T) {
extendedResource := "intel.com/foo"
extendedResource1 := map[string]int64{
"intel.com/foo": 4,
}
extendedResource2 := map[string]int64{
"intel.com/foo": 8,
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
extendedResourcePod1 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource): resource.MustParse("2"),
},
},
},
},
}
extendedResourcePod2 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource): resource.MustParse("4"),
},
},
},
},
}
machine2Pod := extendedResourcePod1
machine2Pod.NodeName = "machine2"
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),8)
// = 100 - (8-0)*(100/8) = 0 = rawScoringFunction(0)
// Node1 Score: 0
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),4)
// = 100 - (4-0)*(100/4) = 0 = rawScoringFunction(0)
// Node2 Score: 0
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// Node1 Score: 2
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node2 Score: 5
pod: &v1.Pod{Spec: extendedResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 5}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 =rawScoringFunction(25)
// Node1 Score: 2
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((2+2),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// Node2 Score: 10
pod: &v1.Pod{Spec: extendedResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 10}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{
{Spec: machine2Pod},
},
},
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),8)
// = 100 - (8-4)*(100/8) = 50 = rawScoringFunction(50)
// Node1 Score: 5
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// Node2 Score: 10
pod: &v1.Pod{Spec: extendedResourcePod2},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 10}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
functionShape, _ := NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 10}})
resourceToWeightMap := ResourceToWeightMap{v1.ResourceName("intel.com/foo"): 1}
prior := RequestedToCapacityRatioResourceAllocationPriority(functionShape, resourceToWeightMap)
list, err := runMapReducePriority(prior.PriorityMap, nil, nil, test.pod, snapshot, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
})
}
}
func TestResourceBinPackingMultipleExtended(t *testing.T) {
extendedResource1 := "intel.com/foo"
extendedResource2 := "intel.com/bar"
extendedResources1 := map[string]int64{
"intel.com/foo": 4,
"intel.com/bar": 8,
}
extendedResources2 := map[string]int64{
"intel.com/foo": 8,
"intel.com/bar": 4,
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
extnededResourcePod1 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource1): resource.MustParse("2"),
v1.ResourceName(extendedResource2): resource.MustParse("2"),
},
},
},
},
}
extnededResourcePod2 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource1): resource.MustParse("4"),
v1.ResourceName(extendedResource2): resource.MustParse("2"),
},
},
},
},
}
machine2Pod := extnededResourcePod1
machine2Pod.NodeName = "machine2"
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),8)
// = 100 - (8-0)*(100/8) = 0 = rawScoringFunction(0)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),4)
// = 100 - (4-0)*(100/4) = 0 = rawScoringFunction(0)
// Node1 Score: (0 * 3) + (0 * 5) / 8 = 0
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),4)
// = 100 - (4-0)*(100/4) = 0 = rawScoringFunction(0)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),8)
// = 100 - (8-0)*(100/8) = 0 = rawScoringFunction(0)
// Node2 Score: (0 * 3) + (0 * 5) / 8 = 0
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node1 Score: (2 * 3) + (5 * 5) / 8 = 4
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// Node2 Score: (5 * 3) + (2 * 5) / 8 = 3
pod: &v1.Pod{Spec: extnededResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 3}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node1 Score: (2 * 3) + (5 * 5) / 8 = 4
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((2+2),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((2+2),8)
// = 100 - (8-4)*(100/8) = 50 = rawScoringFunction(50)
// Node2 Score: (10 * 3) + (5 * 5) / 8 = 7
pod: &v1.Pod{Spec: extnededResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 7}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{
{Spec: machine2Pod},
},
},
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// used + requested / available
// intel.com/foo Score: { (0 + 4) / 8 } * 10 = 0
// intel.com/bar Score: { (0 + 2) / 4 } * 10 = 0
// Node1 Score: (0.25 * 3) + (0.5 * 5) / 8 = 5
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node2 scores (used resources) on 0-10 scale
// used + requested / available
// intel.com/foo Score: { (0 + 4) / 4 } * 10 = 0
// intel.com/bar Score: { (0 + 2) / 8 } * 10 = 0
// Node2 Score: (1 * 3) + (0.25 * 5) / 8 = 5
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),8)
// = 100 - (8-4)*(100/8) = 50 = rawScoringFunction(50)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node1 Score: (5 * 3) + (5 * 5) / 8 = 5
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// Node2 Score: (10 * 3) + (2 * 5) / 8 = 5
pod: &v1.Pod{Spec: extnededResourcePod2},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 5}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
functionShape, _ := NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 10}})
resourceToWeightMap := ResourceToWeightMap{v1.ResourceName("intel.com/foo"): 3, v1.ResourceName("intel.com/bar"): 5}
prior := RequestedToCapacityRatioResourceAllocationPriority(functionShape, resourceToWeightMap)
list, err := runMapReducePriority(prior.PriorityMap, nil, nil, test.pod, snapshot, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
})
}
}

View File

@ -20,44 +20,10 @@ import (
"sort"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
)
func makeNode(node string, milliCPU, memory int64) *v1.Node {
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
},
},
}
}
func makeNodeWithExtendedResource(node string, milliCPU, memory int64, extendedResource map[string]int64) *v1.Node {
resourceList := make(map[v1.ResourceName]resource.Quantity)
for res, quantity := range extendedResource {
resourceList[v1.ResourceName(res)] = *resource.NewQuantity(quantity, resource.DecimalSI)
}
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(milliCPU, resource.DecimalSI)
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI)
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: resourceList,
Allocatable: resourceList,
},
}
}
func runMapReducePriority(mapFn PriorityMapFunction, reduceFn PriorityReduceFunction, metaData interface{}, pod *v1.Pod, sharedLister schedulerlisters.SharedLister, nodes []*v1.Node) (framework.NodeScoreList, error) {
result := make(framework.NodeScoreList, 0, len(nodes))
for i := range nodes {

View File

@ -343,11 +343,11 @@ func RegisterCustomPriority(policy schedulerapi.PriorityPolicy, configProducerAr
return RegisterPriority(priority, weight)
}
func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *schedulerapi.RequestedToCapacityRatioArguments) (priorities.FunctionShape, priorities.ResourceToWeightMap) {
func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *schedulerapi.RequestedToCapacityRatioArguments) (noderesources.FunctionShape, noderesources.ResourceToWeightMap) {
n := len(arguments.Shape)
points := make([]priorities.FunctionShapePoint, 0, n)
points := make([]noderesources.FunctionShapePoint, 0, n)
for _, point := range arguments.Shape {
points = append(points, priorities.FunctionShapePoint{
points = append(points, noderesources.FunctionShapePoint{
Utilization: int64(point.Utilization),
// MaxCustomPriorityScore may diverge from the max score used in the scheduler and defined by MaxNodeScore,
// therefore we need to scale the score returned by requested to capacity ratio to the score range
@ -355,13 +355,13 @@ func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *s
Score: int64(point.Score) * (framework.MaxNodeScore / schedulerapi.MaxCustomPriorityScore),
})
}
shape, err := priorities.NewFunctionShape(points)
shape, err := noderesources.NewFunctionShape(points)
if err != nil {
klog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error())
}
resourceToWeightMap := make(priorities.ResourceToWeightMap, 0)
resourceToWeightMap := make(noderesources.ResourceToWeightMap, 0)
if len(arguments.Resources) == 0 {
resourceToWeightMap = priorities.DefaultRequestedRatioResources
resourceToWeightMap = noderesources.DefaultRequestedRatioResources
return shape, resourceToWeightMap
}
for _, resource := range arguments.Resources {

View File

@ -21,8 +21,8 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
)
func TestAlgorithmNameValidation(t *testing.T) {
@ -64,12 +64,12 @@ func TestBuildScoringFunctionShapeFromRequestedToCapacityRatioArguments(t *testi
},
}
builtShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments)
expectedShape, _ := priorities.NewFunctionShape([]priorities.FunctionShapePoint{
expectedShape, _ := noderesources.NewFunctionShape([]noderesources.FunctionShapePoint{
{Utilization: 10, Score: 10},
{Utilization: 30, Score: 50},
{Utilization: 70, Score: 20},
})
expectedResources := priorities.ResourceToWeightMap{
expectedResources := noderesources.ResourceToWeightMap{
v1.ResourceCPU: 1,
v1.ResourceMemory: 1,
}
@ -86,12 +86,12 @@ func TestBuildScoringFunctionShapeFromRequestedToCapacityRatioArgumentsNilResour
},
}
builtShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments)
expectedShape, _ := priorities.NewFunctionShape([]priorities.FunctionShapePoint{
expectedShape, _ := noderesources.NewFunctionShape([]noderesources.FunctionShapePoint{
{Utilization: 10, Score: 10},
{Utilization: 30, Score: 50},
{Utilization: 70, Score: 20},
})
expectedResources := priorities.ResourceToWeightMap{
expectedResources := noderesources.ResourceToWeightMap{
v1.ResourceCPU: 1,
v1.ResourceMemory: 1,
}

View File

@ -8,14 +8,17 @@ go_library(
"least_allocated.go",
"most_allocated.go",
"requested_to_capacity_ratio.go",
"resource_allocation.go",
"resource_limits.go",
"test_util.go",
],
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithm/priorities:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/framework/plugins/migration:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
@ -24,6 +27,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -66,5 +70,6 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)

View File

@ -19,11 +19,12 @@ package noderesources
import (
"context"
"fmt"
"math"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
@ -31,6 +32,7 @@ import (
// of capacity, and prioritizes the host based on how close the two metrics are to each other.
type BalancedAllocation struct {
handle framework.FrameworkHandle
resourceAllocationScorer
}
var _ = framework.ScorePlugin(&BalancedAllocation{})
@ -50,9 +52,14 @@ func (ba *BalancedAllocation) Score(ctx context.Context, state *framework.CycleS
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
}
// BalancedResourceAllocationMap does not use priority metadata, hence we pass nil here
s, err := priorities.BalancedResourceAllocationMap(pod, nil, nodeInfo)
return s.Score, migration.ErrorToFrameworkStatus(err)
// ba.score favors nodes with balanced resource usage rate.
// It should **NOT** be used alone, and **MUST** be used together
// with NodeResourcesLeastAllocated plugin. It calculates the difference between the cpu and memory fraction
// of capacity, and prioritizes the host based on how close the two metrics are to each other.
// Detail: score = 10 - variance(cpuFraction,memoryFraction,volumeFraction)*10. The algorithm is partly inspired by:
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced
// Resource Utilization"
return ba.score(pod, nodeInfo)
}
// ScoreExtensions of the Score plugin.
@ -62,5 +69,52 @@ func (ba *BalancedAllocation) ScoreExtensions() framework.ScoreExtensions {
// NewBalancedAllocation initializes a new plugin and returns it.
func NewBalancedAllocation(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
return &BalancedAllocation{handle: h}, nil
return &BalancedAllocation{
handle: h,
resourceAllocationScorer: resourceAllocationScorer{
BalancedAllocationName,
balancedResourceScorer,
DefaultRequestedRatioResources,
},
}, nil
}
// todo: use resource weights in the scorer function
func balancedResourceScorer(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
cpuFraction := fractionOfCapacity(requested[v1.ResourceCPU], allocable[v1.ResourceCPU])
memoryFraction := fractionOfCapacity(requested[v1.ResourceMemory], allocable[v1.ResourceMemory])
// This to find a node which has most balanced CPU, memory and volume usage.
if cpuFraction >= 1 || memoryFraction >= 1 {
// if requested >= capacity, the corresponding host should never be preferred.
return 0
}
if includeVolumes && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && allocatableVolumes > 0 {
volumeFraction := float64(requestedVolumes) / float64(allocatableVolumes)
if volumeFraction >= 1 {
// if requested >= capacity, the corresponding host should never be preferred.
return 0
}
// Compute variance for all the three fractions.
mean := (cpuFraction + memoryFraction + volumeFraction) / float64(3)
variance := float64((((cpuFraction - mean) * (cpuFraction - mean)) + ((memoryFraction - mean) * (memoryFraction - mean)) + ((volumeFraction - mean) * (volumeFraction - mean))) / float64(3))
// Since the variance is between positive fractions, it will be positive fraction. 1-variance lets the
// score to be higher for node which has least variance and multiplying it with 10 provides the scaling
// factor needed.
return int64((1 - variance) * float64(framework.MaxNodeScore))
}
// Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1
// respectively. Multiplying the absolute value of the difference by 10 scales the value to
// 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from
// 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced.
diff := math.Abs(cpuFraction - memoryFraction)
return int64((1 - diff) * float64(framework.MaxNodeScore))
}
func fractionOfCapacity(requested, capacity int64) float64 {
if capacity == 0 {
return 1
}
return float64(requested) / float64(capacity)
}

View File

@ -22,14 +22,13 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// LeastAllocated is a score plugin that favors nodes with fewer allocation requested resources based on requested resources.
type LeastAllocated struct {
handle framework.FrameworkHandle
resourceAllocationScorer
}
var _ = framework.ScorePlugin(&LeastAllocated{})
@ -49,9 +48,13 @@ func (la *LeastAllocated) Score(ctx context.Context, state *framework.CycleState
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
}
// LeastRequestedPriorityMap does not use priority metadata, hence we pass nil here
s, err := priorities.LeastRequestedPriorityMap(pod, nil, nodeInfo)
return s.Score, migration.ErrorToFrameworkStatus(err)
// la.score favors nodes with fewer requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
//
// Details:
// (cpu((capacity-sum(requested))*10/capacity) + memory((capacity-sum(requested))*10/capacity))/2
return la.score(pod, nodeInfo)
}
// ScoreExtensions of the Score plugin.
@ -61,5 +64,36 @@ func (la *LeastAllocated) ScoreExtensions() framework.ScoreExtensions {
// NewLeastAllocated initializes a new plugin and returns it.
func NewLeastAllocated(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
return &LeastAllocated{handle: h}, nil
return &LeastAllocated{
handle: h,
resourceAllocationScorer: resourceAllocationScorer{
LeastAllocatedName,
leastResourceScorer,
DefaultRequestedRatioResources,
},
}, nil
}
func leastResourceScorer(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range DefaultRequestedRatioResources {
resourceScore := leastRequestedScore(requested[resource], allocable[resource])
nodeScore += resourceScore * weight
weightSum += weight
}
return nodeScore / weightSum
}
// The unused capacity is calculated on a scale of 0-10
// 0 being the lowest priority and 10 being the highest.
// The more unused resources the higher the score is.
func leastRequestedScore(requested, capacity int64) int64 {
if capacity == 0 {
return 0
}
if requested > capacity {
return 0
}
return ((capacity - requested) * int64(framework.MaxNodeScore)) / capacity
}

View File

@ -22,14 +22,13 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// MostAllocated is a score plugin that favors nodes with high allocation based on requested resources.
type MostAllocated struct {
handle framework.FrameworkHandle
resourceAllocationScorer
}
var _ = framework.ScorePlugin(&MostAllocated{})
@ -49,9 +48,11 @@ func (ma *MostAllocated) Score(ctx context.Context, state *framework.CycleState,
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v, node is nil: %v", nodeName, err, nodeInfo.Node() == nil))
}
// MostRequestedPriorityMap does not use priority metadata, hence we pass nil here
s, err := priorities.MostRequestedPriorityMap(pod, nil, nodeInfo)
return s.Score, migration.ErrorToFrameworkStatus(err)
// ma.score favors nodes with most requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the maximum of the average of the fraction of requested to capacity.
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2
return ma.score(pod, nodeInfo)
}
// ScoreExtensions of the Score plugin.
@ -61,5 +62,41 @@ func (ma *MostAllocated) ScoreExtensions() framework.ScoreExtensions {
// NewMostAllocated initializes a new plugin and returns it.
func NewMostAllocated(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
return &MostAllocated{handle: h}, nil
return &MostAllocated{
handle: h,
resourceAllocationScorer: resourceAllocationScorer{
MostAllocatedName,
mostResourceScorer,
DefaultRequestedRatioResources,
},
}, nil
}
func mostResourceScorer(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range DefaultRequestedRatioResources {
resourceScore := mostRequestedScore(requested[resource], allocable[resource])
nodeScore += resourceScore * weight
weightSum += weight
}
return (nodeScore / weightSum)
}
// The used capacity is calculated on a scale of 0-10
// 0 being the lowest priority and 10 being the highest.
// The more resources are used the higher the score is. This function
// is almost a reversed version of least_requested_priority.calculateUnusedScore
// (10 - calculateUnusedScore). The main difference is in rounding. It was added to
// keep the final formula clean and not to modify the widely used (by users
// in their default scheduling policies) calculateUsedScore.
func mostRequestedScore(requested, capacity int64) int64 {
if capacity == 0 {
return 0
}
if requested > capacity {
return 0
}
return (requested * framework.MaxNodeScore) / capacity
}

View File

@ -19,21 +19,39 @@ package noderesources
import (
"context"
"fmt"
"math"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
"k8s.io/klog"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// RequestedToCapacityRatioName is the name of this plugin.
const RequestedToCapacityRatioName = "RequestedToCapacityRatio"
const (
// RequestedToCapacityRatioName is the name of this plugin.
RequestedToCapacityRatioName = "RequestedToCapacityRatio"
minUtilization = 0
maxUtilization = 100
minScore = 0
maxScore = framework.MaxNodeScore
)
// FunctionShape represents shape of scoring function.
// For safety use NewFunctionShape which performs precondition checks for struct creation.
type FunctionShape []FunctionShapePoint
// FunctionShapePoint represents single point in scoring function shape.
type FunctionShapePoint struct {
// Utilization is function argument.
Utilization int64
// Score is function value.
Score int64
}
// RequestedToCapacityRatioArgs holds the args that are used to configure the plugin.
type RequestedToCapacityRatioArgs struct {
FunctionShape priorities.FunctionShape
ResourceToWeightMap priorities.ResourceToWeightMap
FunctionShape FunctionShape
ResourceToWeightMap ResourceToWeightMap
}
// NewRequestedToCapacityRatio initializes a new plugin and returns it.
@ -42,18 +60,22 @@ func NewRequestedToCapacityRatio(plArgs *runtime.Unknown, handle framework.Frame
if err := framework.DecodeInto(plArgs, args); err != nil {
return nil, err
}
p := priorities.RequestedToCapacityRatioResourceAllocationPriority(args.FunctionShape, args.ResourceToWeightMap)
return &RequestedToCapacityRatio{
handle: handle,
prioritize: p.PriorityMap,
handle: handle,
resourceAllocationScorer: resourceAllocationScorer{
RequestedToCapacityRatioName,
buildRequestedToCapacityRatioScorerFunction(args.FunctionShape, args.ResourceToWeightMap),
args.ResourceToWeightMap,
},
}, nil
}
// RequestedToCapacityRatio is a score plugin that allow users to apply bin packing
// on core resources like CPU, Memory as well as extended resources like accelerators.
type RequestedToCapacityRatio struct {
handle framework.FrameworkHandle
prioritize priorities.PriorityMapFunction
handle framework.FrameworkHandle
resourceAllocationScorer
}
var _ framework.ScorePlugin = &RequestedToCapacityRatio{}
@ -69,12 +91,113 @@ func (pl *RequestedToCapacityRatio) Score(ctx context.Context, _ *framework.Cycl
if err != nil {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
}
// Note that RequestedToCapacityRatioPriority doesn't use priority metadata, hence passing nil here.
s, err := pl.prioritize(pod, nil, nodeInfo)
return s.Score, migration.ErrorToFrameworkStatus(err)
return pl.score(pod, nodeInfo)
}
// ScoreExtensions of the Score plugin.
func (pl *RequestedToCapacityRatio) ScoreExtensions() framework.ScoreExtensions {
return nil
}
// NewFunctionShape creates instance of FunctionShape in a safe way performing all
// necessary sanity checks.
func NewFunctionShape(points []FunctionShapePoint) (FunctionShape, error) {
n := len(points)
if n == 0 {
return nil, fmt.Errorf("at least one point must be specified")
}
for i := 1; i < n; i++ {
if points[i-1].Utilization >= points[i].Utilization {
return nil, fmt.Errorf("utilization values must be sorted. Utilization[%d]==%d >= Utilization[%d]==%d", i-1, points[i-1].Utilization, i, points[i].Utilization)
}
}
for i, point := range points {
if point.Utilization < minUtilization {
return nil, fmt.Errorf("utilization values must not be less than %d. Utilization[%d]==%d", minUtilization, i, point.Utilization)
}
if point.Utilization > maxUtilization {
return nil, fmt.Errorf("utilization values must not be greater than %d. Utilization[%d]==%d", maxUtilization, i, point.Utilization)
}
if point.Score < minScore {
return nil, fmt.Errorf("score values must not be less than %d. Score[%d]==%d", minScore, i, point.Score)
}
if point.Score > maxScore {
return nil, fmt.Errorf("score valuses not be greater than %d. Score[%d]==%d", maxScore, i, point.Score)
}
}
// We make defensive copy so we make no assumption if array passed as argument is not changed afterwards
pointsCopy := make(FunctionShape, n)
copy(pointsCopy, points)
return pointsCopy, nil
}
func validateResourceWeightMap(resourceToWeightMap ResourceToWeightMap) error {
if len(resourceToWeightMap) == 0 {
return fmt.Errorf("resourceToWeightMap cannot be nil")
}
for resource, weight := range resourceToWeightMap {
if weight < 1 {
return fmt.Errorf("resource %s weight %d must not be less than 1", string(resource), weight)
}
}
return nil
}
func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionShape, resourceToWeightMap ResourceToWeightMap) func(resourceToValueMap, resourceToValueMap, bool, int, int) int64 {
rawScoringFunction := buildBrokenLinearFunction(scoringFunctionShape)
err := validateResourceWeightMap(resourceToWeightMap)
if err != nil {
klog.Error(err)
}
resourceScoringFunction := func(requested, capacity int64) int64 {
if capacity == 0 || requested > capacity {
return rawScoringFunction(maxUtilization)
}
return rawScoringFunction(maxUtilization - (capacity-requested)*maxUtilization/capacity)
}
return func(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range resourceToWeightMap {
resourceScore := resourceScoringFunction(requested[resource], allocable[resource])
if resourceScore > 0 {
nodeScore += resourceScore * weight
weightSum += weight
}
}
if weightSum == 0 {
return 0
}
return int64(math.Round(float64(nodeScore) / float64(weightSum)))
}
}
// Creates a function which is built using linear segments. Segments are defined via shape array.
// Shape[i].Utilization slice represents points on "utilization" axis where different segments meet.
// Shape[i].Score represents function values at meeting points.
//
// function f(p) is defined as:
// shape[0].Score for p < f[0].Utilization
// shape[i].Score for p == shape[i].Utilization
// shape[n-1].Score for p > shape[n-1].Utilization
// and linear between points (p < shape[i].Utilization)
func buildBrokenLinearFunction(shape FunctionShape) func(int64) int64 {
n := len(shape)
return func(p int64) int64 {
for i := 0; i < n; i++ {
if p <= shape[i].Utilization {
if i == 0 {
return shape[0].Score
}
return shape[i-1].Score + (shape[i].Score-shape[i-1].Score)*(p-shape[i-1].Utilization)/(shape[i].Utilization-shape[i-1].Utilization)
}
}
return shape[n-1].Score
}
}

View File

@ -21,6 +21,7 @@ import (
"reflect"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
@ -102,3 +103,500 @@ func makePod(node string, milliCPU, memory int64) *v1.Pod {
},
}
}
func TestCreatingFunctionShapeErrorsIfEmptyPoints(t *testing.T) {
var err error
_, err = NewFunctionShape([]FunctionShapePoint{})
assert.Equal(t, "at least one point must be specified", err.Error())
}
func TestCreatingResourceNegativeWeight(t *testing.T) {
err := validateResourceWeightMap(ResourceToWeightMap{v1.ResourceCPU: -1})
assert.Equal(t, "resource cpu weight -1 must not be less than 1", err.Error())
}
func TestCreatingResourceDefaultWeight(t *testing.T) {
err := validateResourceWeightMap(ResourceToWeightMap{})
assert.Equal(t, "resourceToWeightMap cannot be nil", err.Error())
}
func TestCreatingFunctionShapeErrorsIfXIsNotSorted(t *testing.T) {
var err error
_, err = NewFunctionShape([]FunctionShapePoint{{10, 1}, {15, 2}, {20, 3}, {19, 4}, {25, 5}})
assert.Equal(t, "utilization values must be sorted. Utilization[2]==20 >= Utilization[3]==19", err.Error())
_, err = NewFunctionShape([]FunctionShapePoint{{10, 1}, {20, 2}, {20, 3}, {22, 4}, {25, 5}})
assert.Equal(t, "utilization values must be sorted. Utilization[1]==20 >= Utilization[2]==20", err.Error())
}
func TestCreatingFunctionPointNotInAllowedRange(t *testing.T) {
var err error
_, err = NewFunctionShape([]FunctionShapePoint{{-1, 0}, {100, 100}})
assert.Equal(t, "utilization values must not be less than 0. Utilization[0]==-1", err.Error())
_, err = NewFunctionShape([]FunctionShapePoint{{0, 0}, {101, 100}})
assert.Equal(t, "utilization values must not be greater than 100. Utilization[1]==101", err.Error())
_, err = NewFunctionShape([]FunctionShapePoint{{0, -1}, {100, 100}})
assert.Equal(t, "score values must not be less than 0. Score[0]==-1", err.Error())
_, err = NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 101}})
assert.Equal(t, "score valuses not be greater than 100. Score[1]==101", err.Error())
}
func TestBrokenLinearFunction(t *testing.T) {
type Assertion struct {
p int64
expected int64
}
type Test struct {
points []FunctionShapePoint
assertions []Assertion
}
tests := []Test{
{
points: []FunctionShapePoint{{10, 1}, {90, 9}},
assertions: []Assertion{
{p: -10, expected: 1},
{p: 0, expected: 1},
{p: 9, expected: 1},
{p: 10, expected: 1},
{p: 15, expected: 1},
{p: 19, expected: 1},
{p: 20, expected: 2},
{p: 89, expected: 8},
{p: 90, expected: 9},
{p: 99, expected: 9},
{p: 100, expected: 9},
{p: 110, expected: 9},
},
},
{
points: []FunctionShapePoint{{0, 2}, {40, 10}, {100, 0}},
assertions: []Assertion{
{p: -10, expected: 2},
{p: 0, expected: 2},
{p: 20, expected: 6},
{p: 30, expected: 8},
{p: 40, expected: 10},
{p: 70, expected: 5},
{p: 100, expected: 0},
{p: 110, expected: 0},
},
},
{
points: []FunctionShapePoint{{0, 2}, {40, 2}, {100, 2}},
assertions: []Assertion{
{p: -10, expected: 2},
{p: 0, expected: 2},
{p: 20, expected: 2},
{p: 30, expected: 2},
{p: 40, expected: 2},
{p: 70, expected: 2},
{p: 100, expected: 2},
{p: 110, expected: 2},
},
},
}
for _, test := range tests {
functionShape, err := NewFunctionShape(test.points)
assert.Nil(t, err)
function := buildBrokenLinearFunction(functionShape)
for _, assertion := range test.assertions {
assert.InDelta(t, assertion.expected, function(assertion.p), 0.1, "points=%v, p=%f", test.points, assertion.p)
}
}
}
func TestResourceBinPackingSingleExtended(t *testing.T) {
extendedResource := "intel.com/foo"
extendedResource1 := map[string]int64{
"intel.com/foo": 4,
}
extendedResource2 := map[string]int64{
"intel.com/foo": 8,
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
extendedResourcePod1 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource): resource.MustParse("2"),
},
},
},
},
}
extendedResourcePod2 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource): resource.MustParse("4"),
},
},
},
},
}
machine2Pod := extendedResourcePod1
machine2Pod.NodeName = "machine2"
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),8)
// = 100 - (8-0)*(100/8) = 0 = rawScoringFunction(0)
// Node1 Score: 0
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),4)
// = 100 - (4-0)*(100/4) = 0 = rawScoringFunction(0)
// Node2 Score: 0
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// Node1 Score: 2
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node2 Score: 5
pod: &v1.Pod{Spec: extendedResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 5}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 =rawScoringFunction(25)
// Node1 Score: 2
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((2+2),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// Node2 Score: 10
pod: &v1.Pod{Spec: extendedResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 10}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{
{Spec: machine2Pod},
},
},
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),8)
// = 100 - (8-4)*(100/8) = 50 = rawScoringFunction(50)
// Node1 Score: 5
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// Node2 Score: 10
pod: &v1.Pod{Spec: extendedResourcePod2},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 10}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
args := &runtime.Unknown{Raw: []byte(`{"FunctionShape" : [{"Utilization" : 0, "Score" : 0}, {"Utilization" : 100, "Score" : 10}], "ResourceToWeightMap" : {"intel.com/foo" : 1}}`)}
p, _ := NewRequestedToCapacityRatio(args, fh)
var gotList framework.NodeScoreList
for _, n := range test.nodes {
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.pod, n.Name)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: n.Name, Score: score})
}
if !reflect.DeepEqual(test.expectedList, gotList) {
t.Errorf("expected %#v, got %#v", test.expectedList, gotList)
}
})
}
}
func TestResourceBinPackingMultipleExtended(t *testing.T) {
extendedResource1 := "intel.com/foo"
extendedResource2 := "intel.com/bar"
extendedResources1 := map[string]int64{
"intel.com/foo": 4,
"intel.com/bar": 8,
}
extendedResources2 := map[string]int64{
"intel.com/foo": 8,
"intel.com/bar": 4,
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
extnededResourcePod1 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource1): resource.MustParse("2"),
v1.ResourceName(extendedResource2): resource.MustParse("2"),
},
},
},
},
}
extnededResourcePod2 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource1): resource.MustParse("4"),
v1.ResourceName(extendedResource2): resource.MustParse("2"),
},
},
},
},
}
machine2Pod := extnededResourcePod1
machine2Pod.NodeName = "machine2"
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList framework.NodeScoreList
name string
}{
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),8)
// = 100 - (8-0)*(100/8) = 0 = rawScoringFunction(0)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),4)
// = 100 - (4-0)*(100/4) = 0 = rawScoringFunction(0)
// Node1 Score: (0 * 3) + (0 * 5) / 8 = 0
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),4)
// = 100 - (4-0)*(100/4) = 0 = rawScoringFunction(0)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),8)
// = 100 - (8-0)*(100/8) = 0 = rawScoringFunction(0)
// Node2 Score: (0 * 3) + (0 * 5) / 8 = 0
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node1 Score: (2 * 3) + (5 * 5) / 8 = 4
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// Node2 Score: (5 * 3) + (2 * 5) / 8 = 3
pod: &v1.Pod{Spec: extnededResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 3}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node1 Score: (2 * 3) + (5 * 5) / 8 = 4
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((2+2),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((2+2),8)
// = 100 - (8-4)*(100/8) = 50 = rawScoringFunction(50)
// Node2 Score: (10 * 3) + (5 * 5) / 8 = 7
pod: &v1.Pod{Spec: extnededResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 7}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{
{Spec: machine2Pod},
},
},
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// used + requested / available
// intel.com/foo Score: { (0 + 4) / 8 } * 10 = 0
// intel.com/bar Score: { (0 + 2) / 4 } * 10 = 0
// Node1 Score: (0.25 * 3) + (0.5 * 5) / 8 = 5
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node2 scores (used resources) on 0-10 scale
// used + requested / available
// intel.com/foo Score: { (0 + 4) / 4 } * 10 = 0
// intel.com/bar Score: { (0 + 2) / 8 } * 10 = 0
// Node2 Score: (1 * 3) + (0.25 * 5) / 8 = 5
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),8)
// = 100 - (8-4)*(100/8) = 50 = rawScoringFunction(50)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node1 Score: (5 * 3) + (5 * 5) / 8 = 5
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// Node2 Score: (10 * 3) + (2 * 5) / 8 = 5
pod: &v1.Pod{Spec: extnededResourcePod2},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 5}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
args := &runtime.Unknown{Raw: []byte(`{"FunctionShape" : [{"Utilization" : 0, "Score" : 0}, {"Utilization" : 100, "Score" : 10}], "ResourceToWeightMap" : {"intel.com/foo" : 3, "intel.com/bar" : 5}}`)}
p, _ := NewRequestedToCapacityRatio(args, fh)
var gotList framework.NodeScoreList
for _, n := range test.nodes {
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.pod, n.Name)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: n.Name, Score: score})
}
if !reflect.DeepEqual(test.expectedList, gotList) {
t.Errorf("expected %#v, got %#v", test.expectedList, gotList)
}
})
}
}

View File

@ -14,11 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
package noderesources
import (
"fmt"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
@ -29,37 +27,35 @@ import (
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// ResourceAllocationPriority contains information to calculate resource allocation priority.
type ResourceAllocationPriority struct {
Name string
scorer func(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64
resourceToWeightMap ResourceToWeightMap
}
// ResourceToWeightMap contains resource name and weight.
type ResourceToWeightMap map[v1.ResourceName]int64
// ResourceToValueMap contains resource name and score.
type ResourceToValueMap map[v1.ResourceName]int64
// DefaultRequestedRatioResources is used to set default requestToWeight map for CPU and memory
var DefaultRequestedRatioResources = ResourceToWeightMap{v1.ResourceMemory: 1, v1.ResourceCPU: 1}
// PriorityMap priorities nodes according to the resource allocations on the node.
// It will use `scorer` function to calculate the score.
func (r *ResourceAllocationPriority) PriorityMap(
// resourceAllocationScorer contains information to calculate resource allocation score.
type resourceAllocationScorer struct {
Name string
scorer func(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64
resourceToWeightMap ResourceToWeightMap
}
// resourceToValueMap contains resource name and score.
type resourceToValueMap map[v1.ResourceName]int64
// score will use `scorer` function to calculate the score.
func (r *resourceAllocationScorer) score(
pod *v1.Pod,
meta interface{},
nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
nodeInfo *schedulernodeinfo.NodeInfo) (int64, *framework.Status) {
node := nodeInfo.Node()
if node == nil {
return framework.NodeScore{}, fmt.Errorf("node not found")
return 0, framework.NewStatus(framework.Error, "node not found")
}
if r.resourceToWeightMap == nil {
return framework.NodeScore{}, fmt.Errorf("resources not found")
return 0, framework.NewStatus(framework.Error, "resources not found")
}
requested := make(ResourceToValueMap, len(r.resourceToWeightMap))
allocatable := make(ResourceToValueMap, len(r.resourceToWeightMap))
requested := make(resourceToValueMap, len(r.resourceToWeightMap))
allocatable := make(resourceToValueMap, len(r.resourceToWeightMap))
for resource := range r.resourceToWeightMap {
allocatable[resource], requested[resource] = calculateResourceAllocatableRequest(nodeInfo, pod, resource)
}
@ -90,10 +86,7 @@ func (r *ResourceAllocationPriority) PriorityMap(
}
}
return framework.NodeScore{
Name: node.Name,
Score: score,
}, nil
return score, nil
}
// calculateResourceAllocatableRequest returns resources Allocatable and Requested values

View File

@ -37,3 +37,19 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
},
}
}
func makeNodeWithExtendedResource(node string, milliCPU, memory int64, extendedResource map[string]int64) *v1.Node {
resourceList := make(map[v1.ResourceName]resource.Quantity)
for res, quantity := range extendedResource {
resourceList[v1.ResourceName(res)] = *resource.NewQuantity(quantity, resource.DecimalSI)
}
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(milliCPU, resource.DecimalSI)
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI)
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: resourceList,
Allocatable: resourceList,
},
}
}