Merge pull request #77688 from sudeshsh/extended_resource_bin_packing

Extending RequestedToCapacityRatio priority function to support resource bin packing of extended resources
This commit is contained in:
Kubernetes Prow Robot 2019-08-27 22:41:11 -07:00 committed by GitHub
commit 668bf42d11
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 900 additions and 130 deletions

View File

@ -19,14 +19,14 @@ package priorities
import (
"math"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
var (
balancedResourcePriority = &ResourceAllocationPriority{"BalancedResourceAllocation", balancedResourceScorer}
balancedResourcePriority = &ResourceAllocationPriority{"BalancedResourceAllocation", balancedResourceScorer, DefaultRequestedRatioResources}
// BalancedResourceAllocationMap favors nodes with balanced resource usage rate.
// BalancedResourceAllocationMap should **NOT** be used alone, and **MUST** be used together
@ -38,9 +38,10 @@ var (
BalancedResourceAllocationMap = balancedResourcePriority.PriorityMap
)
func balancedResourceScorer(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
cpuFraction := fractionOfCapacity(requested.MilliCPU, allocable.MilliCPU)
memoryFraction := fractionOfCapacity(requested.Memory, allocable.Memory)
// todo: use resource weights in the scorer function
func balancedResourceScorer(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
cpuFraction := fractionOfCapacity(requested[v1.ResourceCPU], allocable[v1.ResourceCPU])
memoryFraction := fractionOfCapacity(requested[v1.ResourceMemory], allocable[v1.ResourceMemory])
// This to find a node which has most balanced CPU, memory and volume usage.
if cpuFraction >= 1 || memoryFraction >= 1 {
// if requested >= capacity, the corresponding host should never be preferred.

View File

@ -20,7 +20,7 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -402,35 +402,24 @@ func TestBalancedResourceAllocation(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
metadata := &priorityMetadata{
nonZeroRequest: getNonZeroRequests(test.pod),
}
for _, hasMeta := range []bool{true, false} {
if len(test.pod.Spec.Volumes) > 0 {
maxVolumes := 5
for _, info := range nodeNameToInfo {
info.TransientInfo.TransNodeInfo.AllocatableVolumesCount = getExistingVolumeCountForNode(info.Pods(), maxVolumes)
info.TransientInfo.TransNodeInfo.RequestedVolumes = len(test.pod.Spec.Volumes)
}
}
var function PriorityFunction
if hasMeta {
function = priorityFunction(BalancedResourceAllocationMap, nil, metadata)
} else {
function = priorityFunction(BalancedResourceAllocationMap, nil, nil)
}
list, err := function(test.pod, nodeNameToInfo, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("hasMeta %#v expected %#v, got %#v", hasMeta, test.expectedList, list)
if len(test.pod.Spec.Volumes) > 0 {
maxVolumes := 5
for _, info := range nodeNameToInfo {
info.TransientInfo.TransNodeInfo.AllocatableVolumesCount = getExistingVolumeCountForNode(info.Pods(), maxVolumes)
info.TransientInfo.TransNodeInfo.RequestedVolumes = len(test.pod.Spec.Volumes)
}
}
function := priorityFunction(BalancedResourceAllocationMap, nil, nil)
list, err := function(test.pod, nodeNameToInfo, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
})
}
}

View File

@ -18,11 +18,11 @@ package priorities
import (
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
var (
leastResourcePriority = &ResourceAllocationPriority{"LeastResourceAllocation", leastResourceScorer}
leastRequestedRatioResources = DefaultRequestedRatioResources
leastResourcePriority = &ResourceAllocationPriority{"LeastResourceAllocation", leastResourceScorer, leastRequestedRatioResources}
// LeastRequestedPriorityMap is a priority function that favors nodes with fewer requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
@ -33,9 +33,14 @@ var (
LeastRequestedPriorityMap = leastResourcePriority.PriorityMap
)
func leastResourceScorer(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
return (leastRequestedScore(requested.MilliCPU, allocable.MilliCPU) +
leastRequestedScore(requested.Memory, allocable.Memory)) / 2
func leastResourceScorer(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range leastRequestedRatioResources {
resourceScore := leastRequestedScore(requested[resource], allocable[resource])
nodeScore += resourceScore * weight
weightSum += weight
}
return nodeScore / weightSum
}
// The unused capacity is calculated on a scale of 0-10

View File

@ -17,7 +17,7 @@ limitations under the License.
package priorities
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
@ -45,7 +45,6 @@ func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controlle
// priorityMetadata is a type that is passed as metadata for priority functions
type priorityMetadata struct {
nonZeroRequest *schedulernodeinfo.Resource
podLimits *schedulernodeinfo.Resource
podTolerations []v1.Toleration
affinity *v1.Affinity
@ -62,7 +61,6 @@ func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo
return nil
}
return &priorityMetadata{
nonZeroRequest: getNonZeroRequests(pod),
podLimits: getResourceLimits(pod),
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
affinity: pod.Spec.Affinity,

View File

@ -21,7 +21,7 @@ import (
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
@ -137,7 +137,6 @@ func TestPriorityMetadata(t *testing.T) {
{
pod: podWithTolerationsAndAffinity,
expected: &priorityMetadata{
nonZeroRequest: nonZeroReqs,
podLimits: nonPodLimits,
podTolerations: tolerations,
affinity: podAffinity,
@ -147,7 +146,6 @@ func TestPriorityMetadata(t *testing.T) {
{
pod: podWithTolerationsAndRequests,
expected: &priorityMetadata{
nonZeroRequest: specifiedReqs,
podLimits: nonPodLimits,
podTolerations: tolerations,
affinity: nil,
@ -157,7 +155,6 @@ func TestPriorityMetadata(t *testing.T) {
{
pod: podWithAffinityAndRequests,
expected: &priorityMetadata{
nonZeroRequest: specifiedReqs,
podLimits: specifiedPodLimits,
podTolerations: nil,
affinity: podAffinity,

View File

@ -18,11 +18,11 @@ package priorities
import (
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
var (
mostResourcePriority = &ResourceAllocationPriority{"MostResourceAllocation", mostResourceScorer}
mostRequestedRatioResources = DefaultRequestedRatioResources
mostResourcePriority = &ResourceAllocationPriority{"MostResourceAllocation", mostResourceScorer, mostRequestedRatioResources}
// MostRequestedPriorityMap is a priority function that favors nodes with most requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
@ -31,9 +31,15 @@ var (
MostRequestedPriorityMap = mostResourcePriority.PriorityMap
)
func mostResourceScorer(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
return (mostRequestedScore(requested.MilliCPU, allocable.MilliCPU) +
mostRequestedScore(requested.Memory, allocable.Memory)) / 2
func mostResourceScorer(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range mostRequestedRatioResources {
resourceScore := mostRequestedScore(requested[resource], allocable[resource])
nodeScore += resourceScore * weight
weightSum += weight
}
return (nodeScore / weightSum)
}
// The used capacity is calculated on a scale of 0-10

View File

@ -18,9 +18,10 @@ package priorities
import (
"fmt"
"math"
"k8s.io/klog"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// FunctionShape represents shape of scoring function.
@ -84,23 +85,39 @@ func NewFunctionShape(points []FunctionShapePoint) (FunctionShape, error) {
return pointsCopy, nil
}
func validateResourceWeightMap(resourceToWeightMap ResourceToWeightMap) error {
if len(resourceToWeightMap) == 0 {
return fmt.Errorf("resourceToWeightMap cannot be nil")
}
for resource, weight := range resourceToWeightMap {
if weight < 1 {
return fmt.Errorf("resource %s weight %d must not be less than 1", string(resource), weight)
}
}
return nil
}
// RequestedToCapacityRatioResourceAllocationPriorityDefault creates a requestedToCapacity based
// ResourceAllocationPriority using default resource scoring function shape.
// The default function assigns 1.0 to resource when all capacity is available
// and 0.0 when requested amount is equal to capacity.
func RequestedToCapacityRatioResourceAllocationPriorityDefault() *ResourceAllocationPriority {
return RequestedToCapacityRatioResourceAllocationPriority(defaultFunctionShape)
return RequestedToCapacityRatioResourceAllocationPriority(defaultFunctionShape, DefaultRequestedRatioResources)
}
// RequestedToCapacityRatioResourceAllocationPriority creates a requestedToCapacity based
// ResourceAllocationPriority using provided resource scoring function shape.
func RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape FunctionShape) *ResourceAllocationPriority {
return &ResourceAllocationPriority{"RequestedToCapacityRatioResourceAllocationPriority", buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape)}
func RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape FunctionShape, resourceToWeightMap ResourceToWeightMap) *ResourceAllocationPriority {
return &ResourceAllocationPriority{"RequestedToCapacityRatioResourceAllocationPriority", buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape, resourceToWeightMap), resourceToWeightMap}
}
func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionShape) func(*schedulernodeinfo.Resource, *schedulernodeinfo.Resource, bool, int, int) int64 {
func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionShape, resourceToWeightMap ResourceToWeightMap) func(ResourceToValueMap, ResourceToValueMap, bool, int, int) int64 {
rawScoringFunction := buildBrokenLinearFunction(scoringFunctionShape)
err := validateResourceWeightMap(resourceToWeightMap)
if err != nil {
klog.Error(err)
}
resourceScoringFunction := func(requested, capacity int64) int64 {
if capacity == 0 || requested > capacity {
return rawScoringFunction(maxUtilization)
@ -108,11 +125,19 @@ func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionSh
return rawScoringFunction(maxUtilization - (capacity-requested)*maxUtilization/capacity)
}
return func(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
cpuScore := resourceScoringFunction(requested.MilliCPU, allocable.MilliCPU)
memoryScore := resourceScoringFunction(requested.Memory, allocable.Memory)
return (cpuScore + memoryScore) / 2
return func(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range resourceToWeightMap {
resourceScore := resourceScoringFunction(requested[resource], allocable[resource])
if resourceScore > 0 {
nodeScore += resourceScore * weight
weightSum += weight
}
}
if weightSum == 0 {
return 0
}
return int64(math.Round(float64(nodeScore) / float64(weightSum)))
}
}

View File

@ -22,7 +22,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
@ -34,6 +34,17 @@ func TestCreatingFunctionShapeErrorsIfEmptyPoints(t *testing.T) {
assert.Equal(t, "at least one point must be specified", err.Error())
}
func TestCreatingResourceNegativeWeight(t *testing.T) {
err := validateResourceWeightMap(ResourceToWeightMap{v1.ResourceCPU: -1})
assert.Equal(t, "resource cpu weight -1 must not be less than 1", err.Error())
}
func TestCreatingResourceDefaultWeight(t *testing.T) {
err := validateResourceWeightMap(ResourceToWeightMap{})
assert.Equal(t, "resourceToWeightMap cannot be nil", err.Error())
}
func TestCreatingFunctionShapeErrorsIfXIsNotSorted(t *testing.T) {
var err error
_, err = NewFunctionShape([]FunctionShapePoint{{10, 1}, {15, 2}, {20, 3}, {19, 4}, {25, 5}})
@ -239,3 +250,378 @@ func TestRequestedToCapacityRatio(t *testing.T) {
}
}
}
func TestResourceBinPackingSingleExtended(t *testing.T) {
extendedResource := "intel.com/foo"
extendedResource1 := map[string]int64{
"intel.com/foo": 4,
}
extendedResource2 := map[string]int64{
"intel.com/foo": 8,
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
extendedResourcePod1 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource): resource.MustParse("2"),
},
},
},
},
}
extendedResourcePod2 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource): resource.MustParse("4"),
},
},
},
},
}
machine2Pod := extendedResourcePod1
machine2Pod.NodeName = "machine2"
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
name string
}{
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),8)
// = 100 - (8-0)*(100/8) = 0 = rawScoringFunction(0)
// Node1 Score: 0
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),4)
// = 100 - (4-0)*(100/4) = 0 = rawScoringFunction(0)
// Node2 Score: 0
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// Node1 Score: 2
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node2 Score: 5
pod: &v1.Pod{Spec: extendedResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 5}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 =rawScoringFunction(25)
// Node1 Score: 2
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((2+2),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// Node2 Score: 10
pod: &v1.Pod{Spec: extendedResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{
{Spec: machine2Pod},
},
},
{
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),8)
// = 100 - (8-4)*(100/8) = 50 = rawScoringFunction(50)
// Node1 Score: 5
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// Node2 Score: 10
pod: &v1.Pod{Spec: extendedResourcePod2},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 10}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
functionShape, _ := NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 10}})
resourceToWeightMap := ResourceToWeightMap{v1.ResourceName("intel.com/foo"): 1}
prior := RequestedToCapacityRatioResourceAllocationPriority(functionShape, resourceToWeightMap)
list, err := priorityFunction(prior.PriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
})
}
}
func TestResourceBinPackingMultipleExtended(t *testing.T) {
extendedResource1 := "intel.com/foo"
extendedResource2 := "intel.com/bar"
extendedResources1 := map[string]int64{
"intel.com/foo": 4,
"intel.com/bar": 8,
}
extendedResources2 := map[string]int64{
"intel.com/foo": 8,
"intel.com/bar": 4,
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
extnededResourcePod1 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource1): resource.MustParse("2"),
v1.ResourceName(extendedResource2): resource.MustParse("2"),
},
},
},
},
}
extnededResourcePod2 := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(extendedResource1): resource.MustParse("4"),
v1.ResourceName(extendedResource2): resource.MustParse("2"),
},
},
},
},
}
machine2Pod := extnededResourcePod1
machine2Pod.NodeName = "machine2"
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
name string
}{
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),8)
// = 100 - (8-0)*(100/8) = 0 = rawScoringFunction(0)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),4)
// = 100 - (4-0)*(100/4) = 0 = rawScoringFunction(0)
// Node1 Score: (0 * 3) + (0 * 5) / 8 = 0
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),4)
// = 100 - (4-0)*(100/4) = 0 = rawScoringFunction(0)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+0),8)
// = 100 - (8-0)*(100/8) = 0 = rawScoringFunction(0)
// Node2 Score: (0 * 3) + (0 * 5) / 8 = 0
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
name: "nothing scheduled, nothing requested",
},
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node1 Score: (2 * 3) + (5 * 5) / 8 = 4
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// Node2 Score: (5 * 3) + (2 * 5) / 8 = 3
pod: &v1.Pod{Spec: extnededResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 3}},
name: "resources requested, pods scheduled with less resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node1 Score: (2 * 3) + (5 * 5) / 8 = 4
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((2+2),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((2+2),8)
// = 100 - (8-4)*(100/8) = 50 = rawScoringFunction(50)
// Node2 Score: (10 * 3) + (5 * 5) / 8 = 7
pod: &v1.Pod{Spec: extnededResourcePod1},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 7}},
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
pods: []*v1.Pod{
{Spec: machine2Pod},
},
},
{
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// used + requested / available
// intel.com/foo Score: { (0 + 4) / 8 } * 10 = 0
// intel.com/bar Score: { (0 + 2) / 4 } * 10 = 0
// Node1 Score: (0.25 * 3) + (0.5 * 5) / 8 = 5
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node2 scores (used resources) on 0-10 scale
// used + requested / available
// intel.com/foo Score: { (0 + 4) / 4 } * 10 = 0
// intel.com/bar Score: { (0 + 2) / 8 } * 10 = 0
// Node2 Score: (1 * 3) + (0.25 * 5) / 8 = 5
// resources["intel.com/foo"] = 3
// resources["intel.com/bar"] = 5
// Node1 scores (used resources) on 0-10 scale
// Node1 Score:
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),8)
// = 100 - (8-4)*(100/8) = 50 = rawScoringFunction(50)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),4)
// = 100 - (4-2)*(100/4) = 50 = rawScoringFunction(50)
// Node1 Score: (5 * 3) + (5 * 5) / 8 = 5
// Node2 scores (used resources) on 0-10 scale
// rawScoringFunction(used + requested / available)
// intel.com/foo:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+4),4)
// = 100 - (4-4)*(100/4) = 100 = rawScoringFunction(100)
// intel.com/bar:
// rawScoringFunction(used + requested / available)
// resourceScoringFunction((0+2),8)
// = 100 - (8-2)*(100/8) = 25 = rawScoringFunction(25)
// Node2 Score: (10 * 3) + (2 * 5) / 8 = 5
pod: &v1.Pod{Spec: extnededResourcePod2},
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 5}},
name: "resources requested, pods scheduled with more resources",
pods: []*v1.Pod{
{Spec: noResources},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
functionShape, _ := NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 10}})
resourceToWeightMap := ResourceToWeightMap{v1.ResourceName("intel.com/foo"): 3, v1.ResourceName("intel.com/bar"): 5}
prior := RequestedToCapacityRatioResourceAllocationPriority(functionShape, resourceToWeightMap)
list, err := priorityFunction(prior.PriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
})
}
}

View File

@ -19,9 +19,10 @@ package priorities
import (
"fmt"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
@ -30,10 +31,20 @@ import (
// ResourceAllocationPriority contains information to calculate resource allocation priority.
type ResourceAllocationPriority struct {
Name string
scorer func(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64
Name string
scorer func(requested, allocable ResourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64
resourceToWeightMap ResourceToWeightMap
}
// ResourceToWeightMap contains resource name and weight.
type ResourceToWeightMap map[v1.ResourceName]int64
// ResourceToValueMap contains resource name and score.
type ResourceToValueMap map[v1.ResourceName]int64
// DefaultRequestedRatioResources is used to set default requestToWeight map for CPU and memory
var DefaultRequestedRatioResources = ResourceToWeightMap{v1.ResourceMemory: 1, v1.ResourceCPU: 1}
// PriorityMap priorities nodes according to the resource allocations on the node.
// It will use `scorer` function to calculate the score.
func (r *ResourceAllocationPriority) PriorityMap(
@ -44,44 +55,38 @@ func (r *ResourceAllocationPriority) PriorityMap(
if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
}
allocatable := nodeInfo.AllocatableResource()
var requested schedulernodeinfo.Resource
if priorityMeta, ok := meta.(*priorityMetadata); ok {
requested = *priorityMeta.nonZeroRequest
} else {
// We couldn't parse metadata - fallback to computing it.
requested = *getNonZeroRequests(pod)
if r.resourceToWeightMap == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("resources not found")
}
requested := make(ResourceToValueMap, len(r.resourceToWeightMap))
allocatable := make(ResourceToValueMap, len(r.resourceToWeightMap))
for resource := range r.resourceToWeightMap {
allocatable[resource], requested[resource] = calculateResourceAllocatableRequest(nodeInfo, pod, resource)
}
requested.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU
requested.Memory += nodeInfo.NonZeroRequest().Memory
var score int64
// Check if the pod has volumes and this could be added to scorer function for balanced resource allocation.
if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil {
score = r.scorer(&requested, &allocatable, true, nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount)
score = r.scorer(requested, allocatable, true, nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount)
} else {
score = r.scorer(&requested, &allocatable, false, 0, 0)
score = r.scorer(requested, allocatable, false, 0, 0)
}
if klog.V(10) {
if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil {
klog.Infof(
"%v -> %v: %v, capacity %d millicores %d memory bytes, %d volumes, total request %d millicores %d memory bytes %d volumes, score %d",
"%v -> %v: %v, map of allocatable resources %v, map of requested resources %v , allocatable volumes %d, requested volumes %d, score %d",
pod.Name, node.Name, r.Name,
allocatable.MilliCPU, allocatable.Memory, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount,
requested.MilliCPU, requested.Memory,
allocatable, requested, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount,
nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes,
score,
)
} else {
klog.Infof(
"%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d",
"%v -> %v: %v, map of allocatable resources %v, map of requested resources %v ,score %d,",
pod.Name, node.Name, r.Name,
allocatable.MilliCPU, allocatable.Memory,
requested.MilliCPU, requested.Memory,
score,
allocatable, requested, score,
)
}
}
@ -91,27 +96,47 @@ func (r *ResourceAllocationPriority) PriorityMap(
}, nil
}
// getNonZeroRequests returns the total non-zero requests. If Overhead is defined for the pod and the
// calculateResourceAllocatableRequest returns resources Allocatable and Requested values
func calculateResourceAllocatableRequest(nodeInfo *schedulernodeinfo.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) {
allocatable := nodeInfo.AllocatableResource()
requested := nodeInfo.RequestedResource()
podRequest := calculatePodResourceRequest(pod, resource)
switch resource {
case v1.ResourceCPU:
return allocatable.MilliCPU, (nodeInfo.NonZeroRequest().MilliCPU + podRequest)
case v1.ResourceMemory:
return allocatable.Memory, (nodeInfo.NonZeroRequest().Memory + podRequest)
case v1.ResourceEphemeralStorage:
return allocatable.EphemeralStorage, (requested.EphemeralStorage + podRequest)
default:
if v1helper.IsScalarResourceName(resource) {
return allocatable.ScalarResources[resource], (requested.ScalarResources[resource] + podRequest)
}
}
if klog.V(10) {
klog.Infof("requested resource %v not considered for node score calculation",
resource,
)
}
return 0, 0
}
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
// PodOverhead feature is enabled, the Overhead is added to the result.
func getNonZeroRequests(pod *v1.Pod) *schedulernodeinfo.Resource {
result := &schedulernodeinfo.Resource{}
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
var podRequest int64
for i := range pod.Spec.Containers {
container := &pod.Spec.Containers[i]
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
result.MilliCPU += cpu
result.Memory += memory
value := priorityutil.GetNonzeroRequestForResource(resource, &container.Resources.Requests)
podRequest += value
}
// If Overhead is being utilized, add to the total requests for the pod
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
result.MilliCPU += pod.Spec.Overhead.Cpu().MilliValue()
}
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
result.Memory += pod.Spec.Overhead.Memory().Value()
if quantity, found := pod.Spec.Overhead[resource]; found {
podRequest += quantity.Value()
}
}
return result
return podRequest
}

View File

@ -40,6 +40,22 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
}
}
func makeNodeWithExtendedResource(node string, milliCPU, memory int64, extendedResource map[string]int64) *v1.Node {
resourceList := make(map[v1.ResourceName]resource.Quantity)
for res, quantity := range extendedResource {
resourceList[v1.ResourceName(res)] = *resource.NewQuantity(quantity, resource.DecimalSI)
}
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(milliCPU, resource.DecimalSI)
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI)
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: resourceList,
Allocatable: resourceList,
},
}
}
func priorityFunction(mapFn PriorityMapFunction, reduceFn PriorityReduceFunction, metaData interface{}) PriorityFunction {
return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
result := make(schedulerapi.HostPriorityList, 0, len(nodes))

View File

@ -32,6 +32,7 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",

View File

@ -16,7 +16,10 @@ limitations under the License.
package util
import "k8s.io/api/core/v1"
import (
v1 "k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
)
// For each of these resources, a pod that doesn't request the resource explicitly
// will be treated as having requested the amount indicated below, for the purpose
@ -33,21 +36,43 @@ const (
DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
)
// GetNonzeroRequests returns the default resource request if none is found or
// GetNonzeroRequests returns the default cpu and memory resource request if none is found or
// what is provided on the request.
func GetNonzeroRequests(requests *v1.ResourceList) (milliCPU int64, memory int64) {
var outMilliCPU, outMemory int64
// Override if un-set, but not if explicitly set to zero
if _, found := (*requests)[v1.ResourceCPU]; !found {
outMilliCPU = DefaultMilliCPURequest
} else {
outMilliCPU = requests.Cpu().MilliValue()
}
// Override if un-set, but not if explicitly set to zero
if _, found := (*requests)[v1.ResourceMemory]; !found {
outMemory = DefaultMemoryRequest
} else {
outMemory = requests.Memory().Value()
}
return outMilliCPU, outMemory
func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
return GetNonzeroRequestForResource(v1.ResourceCPU, requests),
GetNonzeroRequestForResource(v1.ResourceMemory, requests)
}
// GetNonzeroRequestForResource returns the default resource request if none is found or
// what is provided on the request.
func GetNonzeroRequestForResource(resource v1.ResourceName, requests *v1.ResourceList) int64 {
switch resource {
case v1.ResourceCPU:
// Override if un-set, but not if explicitly set to zero
if _, found := (*requests)[v1.ResourceCPU]; !found {
return DefaultMilliCPURequest
}
return requests.Cpu().MilliValue()
case v1.ResourceMemory:
// Override if un-set, but not if explicitly set to zero
if _, found := (*requests)[v1.ResourceMemory]; !found {
return DefaultMemoryRequest
}
return requests.Memory().Value()
case v1.ResourceEphemeralStorage:
quantity, found := (*requests)[v1.ResourceEphemeralStorage]
if !found {
return 0
}
return quantity.Value()
default:
if v1helper.IsScalarResourceName(resource) {
quantity, found := (*requests)[resource]
if !found {
return 0
}
return quantity.Value()
}
}
return 0
}

View File

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
)
func TestGetNonzeroRequests(t *testing.T) {
func TestGetNonZeroRequest(t *testing.T) {
tests := []struct {
name string
requests v1.ResourceList
@ -73,3 +73,62 @@ func TestGetNonzeroRequests(t *testing.T) {
})
}
}
func TestGetLeastRequestResource(t *testing.T) {
tests := []struct {
name string
requests v1.ResourceList
resource v1.ResourceName
expectedQuantity int64
}{
{
"extended_resource_not_found",
v1.ResourceList{},
v1.ResourceName("intel.com/foo"),
0,
},
{
"extended_resource_found",
v1.ResourceList{
v1.ResourceName("intel.com/foo"): resource.MustParse("4"),
},
v1.ResourceName("intel.com/foo"),
4,
},
{
"cpu_not_found",
v1.ResourceList{},
v1.ResourceCPU,
DefaultMilliCPURequest,
},
{
"memory_not_found",
v1.ResourceList{},
v1.ResourceMemory,
DefaultMemoryRequest,
},
{
"cpu_exist",
v1.ResourceList{
v1.ResourceCPU: resource.MustParse("200m"),
},
v1.ResourceCPU,
200,
},
{
"memory_exist",
v1.ResourceList{
v1.ResourceMemory: resource.MustParse("400Mi"),
},
v1.ResourceMemory,
400 * 1024 * 1024,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
realQuantity := GetNonzeroRequestForResource(test.resource, &test.requests)
assert.EqualValuesf(t, test.expectedQuantity, realQuantity, "Failed to test: %s", test.name)
})
}
}

View File

@ -22,7 +22,7 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
@ -1061,6 +1061,139 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}},
},
},
"1.16": {
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodePIDPressure"},
{"name": "CheckNodeCondition"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MaxCSIVolumeCountPred"},
{"name": "MaxCinderVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "GeneralPredicates"},
{"name": "CheckVolumeBinding"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2},
{
"name": "RequestedToCapacityRatioPriority",
"weight": 2,
"argument": {
"requestedToCapacityRatioArguments": {
"shape": [
{"utilization": 0, "score": 0},
{"utilization": 50, "score": 7}
],
"resources": [
{"name": "intel.com/foo", "weight": 3},
{"name": "intel.com/bar", "weight": 5}
]
}
}}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"bindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true,
"managedResources": [{"name":"example.com/foo","ignoredByScheduler":true}],
"ignorable":true
}]
}`,
ExpectedPolicy: schedulerapi.Policy{
Predicates: []schedulerapi.PredicatePolicy{
{Name: "MatchNodeSelector"},
{Name: "PodFitsResources"},
{Name: "PodFitsHostPorts"},
{Name: "HostName"},
{Name: "NoDiskConflict"},
{Name: "NoVolumeZoneConflict"},
{Name: "PodToleratesNodeTaints"},
{Name: "CheckNodeMemoryPressure"},
{Name: "CheckNodeDiskPressure"},
{Name: "CheckNodePIDPressure"},
{Name: "CheckNodeCondition"},
{Name: "MaxEBSVolumeCount"},
{Name: "MaxGCEPDVolumeCount"},
{Name: "MaxAzureDiskVolumeCount"},
{Name: "MaxCSIVolumeCountPred"},
{Name: "MaxCinderVolumeCount"},
{Name: "MatchInterPodAffinity"},
{Name: "GeneralPredicates"},
{Name: "CheckVolumeBinding"},
{Name: "TestServiceAffinity", Argument: &schedulerapi.PredicateArgument{ServiceAffinity: &schedulerapi.ServiceAffinity{Labels: []string{"region"}}}},
{Name: "TestLabelsPresence", Argument: &schedulerapi.PredicateArgument{LabelsPresence: &schedulerapi.LabelsPresence{Labels: []string{"foo"}, Presence: true}}},
},
Priorities: []schedulerapi.PriorityPolicy{
{Name: "EqualPriority", Weight: 2},
{Name: "ImageLocalityPriority", Weight: 2},
{Name: "LeastRequestedPriority", Weight: 2},
{Name: "BalancedResourceAllocation", Weight: 2},
{Name: "SelectorSpreadPriority", Weight: 2},
{Name: "NodePreferAvoidPodsPriority", Weight: 2},
{Name: "NodeAffinityPriority", Weight: 2},
{Name: "TaintTolerationPriority", Weight: 2},
{Name: "InterPodAffinityPriority", Weight: 2},
{Name: "MostRequestedPriority", Weight: 2},
{
Name: "RequestedToCapacityRatioPriority",
Weight: 2,
Argument: &schedulerapi.PriorityArgument{
RequestedToCapacityRatioArguments: &schedulerapi.RequestedToCapacityRatioArguments{
UtilizationShape: []schedulerapi.UtilizationShapePoint{
{Utilization: 0, Score: 0},
{Utilization: 50, Score: 7},
},
Resources: []schedulerapi.ResourceSpec{
{Name: v1.ResourceName("intel.com/foo"), Weight: 3},
{Name: v1.ResourceName("intel.com/bar"), Weight: 5},
},
},
},
},
},
ExtenderConfigs: []schedulerapi.ExtenderConfig{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.11 restored case-sensitivity, but allowed either "BindVerb" or "bindVerb"
EnableHTTPS: true,
TLSConfig: &schedulerapi.ExtenderTLSConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
ManagedResources: []schedulerapi.ExtenderManagedResource{{Name: v1.ResourceName("example.com/foo"), IgnoredByScheduler: true}},
Ignorable: true,
}},
},
},
}
registeredPredicates := sets.NewString(factory.ListRegisteredFitPredicates()...)

View File

@ -19,7 +19,7 @@ package api
import (
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
@ -147,10 +147,11 @@ type LabelPreference struct {
Presence bool
}
// RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function
// RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function.
type RequestedToCapacityRatioArguments struct {
// Array of point defining priority function shape
UtilizationShape []UtilizationShapePoint
Resources []ResourceSpec
}
// UtilizationShapePoint represents single point of priority function shape
@ -161,6 +162,14 @@ type UtilizationShapePoint struct {
Score int
}
// ResourceSpec represents single resource for bin packing of priority RequestedToCapacityRatioArguments.
type ResourceSpec struct {
// Name of the resource to be managed by RequestedToCapacityRatio function.
Name v1.ResourceName
// Weight of the resource.
Weight int
}
// ExtenderManagedResource describes the arguments of extended resources
// managed by an extender.
type ExtenderManagedResource struct {

View File

@ -127,13 +127,14 @@ type LabelPreference struct {
Presence bool `json:"presence"`
}
// RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function
// RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function.
type RequestedToCapacityRatioArguments struct {
// Array of point defining priority function shape
// Array of point defining priority function shape.
UtilizationShape []UtilizationShapePoint `json:"shape"`
Resources []ResourceSpec `json:"resources,omitempty"`
}
// UtilizationShapePoint represents single point of priority function shape
// UtilizationShapePoint represents single point of priority function shape.
type UtilizationShapePoint struct {
// Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
Utilization int `json:"utilization"`
@ -141,6 +142,14 @@ type UtilizationShapePoint struct {
Score int `json:"score"`
}
// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments.
type ResourceSpec struct {
// Name of the resource to be managed by RequestedToCapacityRatio function.
Name apiv1.ResourceName `json:"name,casttype=ResourceName"`
// Weight of the resource.
Weight int `json:"weight,omitempty"`
}
// ExtenderManagedResource describes the arguments of extended resources
// managed by an extender.
type ExtenderManagedResource struct {

View File

@ -575,6 +575,11 @@ func (in *RequestedToCapacityRatioArguments) DeepCopyInto(out *RequestedToCapaci
*out = make([]UtilizationShapePoint, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]ResourceSpec, len(*in))
copy(*out, *in)
}
return
}
@ -588,6 +593,22 @@ func (in *RequestedToCapacityRatioArguments) DeepCopy() *RequestedToCapacityRati
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec.
func (in *ResourceSpec) DeepCopy() *ResourceSpec {
if in == nil {
return nil
}
out := new(ResourceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) {
*out = *in

View File

@ -575,6 +575,11 @@ func (in *RequestedToCapacityRatioArguments) DeepCopyInto(out *RequestedToCapaci
*out = make([]UtilizationShapePoint, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]ResourceSpec, len(*in))
copy(*out, *in)
}
return
}
@ -588,6 +593,22 @@ func (in *RequestedToCapacityRatioArguments) DeepCopy() *RequestedToCapacityRati
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec.
func (in *ResourceSpec) DeepCopy() *ResourceSpec {
if in == nil {
return nil
}
out := new(ResourceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) {
*out = *in

View File

@ -390,8 +390,8 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
} else if policy.Argument.RequestedToCapacityRatioArguments != nil {
pcf = &PriorityConfigFactory{
MapReduceFunction: func(args PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) {
scoringFunctionShape := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(policy.Argument.RequestedToCapacityRatioArguments)
p := priorities.RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape)
scoringFunctionShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(policy.Argument.RequestedToCapacityRatioArguments)
p := priorities.RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape, resources)
return p.PriorityMap, nil
},
Weight: policy.Weight,
@ -414,7 +414,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
return RegisterPriorityConfigFactory(policy.Name, *pcf)
}
func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *schedulerapi.RequestedToCapacityRatioArguments) priorities.FunctionShape {
func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *schedulerapi.RequestedToCapacityRatioArguments) (priorities.FunctionShape, priorities.ResourceToWeightMap) {
n := len(arguments.UtilizationShape)
points := make([]priorities.FunctionShapePoint, 0, n)
for _, point := range arguments.UtilizationShape {
@ -424,7 +424,18 @@ func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *s
if err != nil {
klog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error())
}
return shape
resourceToWeightMap := make(priorities.ResourceToWeightMap, 0)
if len(arguments.Resources) == 0 {
resourceToWeightMap = priorities.DefaultRequestedRatioResources
return shape, resourceToWeightMap
}
for _, resource := range arguments.Resources {
resourceToWeightMap[resource.Name] = int64(resource.Weight)
if resource.Weight == 0 {
resourceToWeightMap[resource.Name] = 1
}
}
return shape, resourceToWeightMap
}
// IsPriorityFunctionRegistered is useful for testing providers.

View File

@ -20,6 +20,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/api"
)
@ -94,12 +95,44 @@ func TestBuildScoringFunctionShapeFromRequestedToCapacityRatioArguments(t *testi
{Utilization: 10, Score: 1},
{Utilization: 30, Score: 5},
{Utilization: 70, Score: 2},
}}
builtShape := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments)
},
Resources: []api.ResourceSpec{
{Name: v1.ResourceCPU},
{Name: v1.ResourceMemory},
},
}
builtShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments)
expectedShape, _ := priorities.NewFunctionShape([]priorities.FunctionShapePoint{
{Utilization: 10, Score: 1},
{Utilization: 30, Score: 5},
{Utilization: 70, Score: 2},
})
expectedResources := priorities.ResourceToWeightMap{
v1.ResourceCPU: 1,
v1.ResourceMemory: 1,
}
assert.Equal(t, expectedShape, builtShape)
assert.Equal(t, expectedResources, resources)
}
func TestBuildScoringFunctionShapeFromRequestedToCapacityRatioArgumentsNilResourceToWeightMap(t *testing.T) {
arguments := api.RequestedToCapacityRatioArguments{
UtilizationShape: []api.UtilizationShapePoint{
{Utilization: 10, Score: 1},
{Utilization: 30, Score: 5},
{Utilization: 70, Score: 2},
},
}
builtShape, resources := buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(&arguments)
expectedShape, _ := priorities.NewFunctionShape([]priorities.FunctionShapePoint{
{Utilization: 10, Score: 1},
{Utilization: 30, Score: 5},
{Utilization: 70, Score: 2},
})
expectedResources := priorities.ResourceToWeightMap{
v1.ResourceCPU: 1,
v1.ResourceMemory: 1,
}
assert.Equal(t, expectedShape, builtShape)
assert.Equal(t, expectedResources, resources)
}