Prioritizing nodes based on volume capacity: unit tests

This commit is contained in:
Yecheng Fu 2021-03-02 10:25:35 +08:00
parent 21a43586e7
commit d791f7feef
5 changed files with 696 additions and 76 deletions

View File

@ -87,20 +87,20 @@ func (b *BindingInfo) StorageClassName() string {
return b.pv.Spec.StorageClassName return b.pv.Spec.StorageClassName
} }
// VolumeResource represents volume resource. // StorageResource represents storage resource.
type VolumeResource struct { type StorageResource struct {
Requested int64 Requested int64
Capacity int64 Capacity int64
} }
// VolumeResource returns volume resource. // StorageResource returns storage resource.
func (b *BindingInfo) VolumeResource() *VolumeResource { func (b *BindingInfo) StorageResource() *StorageResource {
// both fields are mandatory // both fields are mandatory
requestedQty := b.pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestedQty := b.pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
capacitQty := b.pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] capacityQty := b.pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
return &VolumeResource{ return &StorageResource{
Requested: requestedQty.Value(), Requested: requestedQty.Value(),
Capacity: capacitQty.Value(), Capacity: capacityQty.Value(),
} }
} }

View File

@ -23,10 +23,13 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
) )
type classResourceMap map[string]*scheduling.VolumeResource // classResourceMap holds a map of storage class to resource.
type classResourceMap map[string]*scheduling.StorageResource
// volumeCapacityScorer calculates the score based on class storage resource information.
type volumeCapacityScorer func(classResourceMap) int64 type volumeCapacityScorer func(classResourceMap) int64
// buildScorerFunction builds volumeCapacityScorer from the scoring function shape.
func buildScorerFunction(scoringFunctionShape helper.FunctionShape) volumeCapacityScorer { func buildScorerFunction(scoringFunctionShape helper.FunctionShape) volumeCapacityScorer {
rawScoringFunction := helper.BuildBrokenLinearFunction(scoringFunctionShape) rawScoringFunction := helper.BuildBrokenLinearFunction(scoringFunctionShape)
f := func(requested, capacity int64) int64 { f := func(requested, capacity int64) int64 {
@ -37,15 +40,15 @@ func buildScorerFunction(scoringFunctionShape helper.FunctionShape) volumeCapaci
return rawScoringFunction(requested * maxUtilization / capacity) return rawScoringFunction(requested * maxUtilization / capacity)
} }
return func(classResources classResourceMap) int64 { return func(classResources classResourceMap) int64 {
var nodeScore, weightSum int64 var nodeScore int64
// in alpha stage, all classes have the same weight
weightSum := len(classResources)
if weightSum == 0 {
return 0
}
for _, resource := range classResources { for _, resource := range classResources {
classScore := f(resource.Requested, resource.Capacity) classScore := f(resource.Requested, resource.Capacity)
nodeScore += classScore nodeScore += classScore
// in alpha stage, all classes have the same weight
weightSum += 1
}
if weightSum == 0 {
return 0
} }
return int64(math.Round(float64(nodeScore) / float64(weightSum))) return int64(math.Round(float64(nodeScore) / float64(weightSum)))
} }

View File

@ -0,0 +1,312 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumebinding
import (
"testing"
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
)
const (
classHDD = "hdd"
classSSD = "ssd"
)
func TestScore(t *testing.T) {
defaultShape := make(helper.FunctionShape, 0, len(defaultShapePoint))
for _, point := range defaultShapePoint {
defaultShape = append(defaultShape, helper.FunctionShapePoint{
Utilization: int64(point.Utilization),
Score: int64(point.Score) * (framework.MaxNodeScore / config.MaxCustomPriorityScore),
})
}
type scoreCase struct {
classResources classResourceMap
score int64
}
tests := []struct {
name string
shape helper.FunctionShape
cases []scoreCase
}{
{
name: "default shape, single class",
shape: defaultShape,
cases: []scoreCase{
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 0,
Capacity: 100,
},
},
0,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 30,
Capacity: 100,
},
},
30,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 50,
Capacity: 100,
},
},
50,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 100,
Capacity: 100,
},
},
100,
},
},
},
{
name: "default shape, multiple classes",
shape: defaultShape,
cases: []scoreCase{
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 0,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 0,
Capacity: 100,
},
},
0,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 0,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 30,
Capacity: 100,
},
},
15,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 30,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 30,
Capacity: 100,
},
},
30,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 30,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 60,
Capacity: 100,
},
},
45,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 50,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 50,
Capacity: 100,
},
},
50,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 50,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 100,
Capacity: 100,
},
},
75,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 100,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 100,
Capacity: 100,
},
},
100,
},
},
},
{
name: "custom shape, multiple classes",
shape: helper.FunctionShape{
{
Utilization: 50,
Score: 0,
},
{
Utilization: 80,
Score: 30,
},
{
Utilization: 100,
Score: 50,
},
},
cases: []scoreCase{
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 0,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 0,
Capacity: 100,
},
},
0,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 0,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 30,
Capacity: 100,
},
},
0,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 30,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 30,
Capacity: 100,
},
},
0,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 30,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 60,
Capacity: 100,
},
},
5,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 50,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 100,
Capacity: 100,
},
},
25,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 90,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 90,
Capacity: 100,
},
},
40,
},
{
classResourceMap{
classHDD: &scheduling.StorageResource{
Requested: 100,
Capacity: 100,
},
classSSD: &scheduling.StorageResource{
Requested: 100,
Capacity: 100,
},
},
50,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
f := buildScorerFunction(tt.shape)
for _, c := range tt.cases {
gotScore := f(c.classResources)
if gotScore != c.score {
t.Errorf("Expect %d, but got %d", c.score, gotScore)
}
}
})
}
}

View File

@ -247,19 +247,20 @@ func (pl *VolumeBinding) Score(ctx context.Context, cs *framework.CycleState, po
return 0, nil return 0, nil
} }
// group by storage class // group by storage class
classToWeight := make(classToWeightMap) classResources := make(classResourceMap)
requestedClassToValueMap := make(map[string]int64)
capacityClassToValueMap := make(map[string]int64)
for _, staticBinding := range podVolumes.StaticBindings { for _, staticBinding := range podVolumes.StaticBindings {
class := staticBinding.StorageClassName() class := staticBinding.StorageClassName()
volumeResource := staticBinding.VolumeResource() storageResource := staticBinding.StorageResource()
if _, ok := requestedClassToValueMap[class]; !ok { if _, ok := classResources[class]; !ok {
classToWeight[class] = 1 // in alpha stage, all classes have the same weight classResources[class] = &scheduling.StorageResource{
Requested: 0,
Capacity: 0,
}
} }
requestedClassToValueMap[class] += volumeResource.Requested classResources[class].Requested += storageResource.Requested
capacityClassToValueMap[class] += volumeResource.Capacity classResources[class].Capacity += storageResource.Capacity
} }
return pl.scorer(requestedClassToValueMap, capacityClassToValueMap, classToWeight), nil return pl.scorer(classResources), nil
} }
// ScoreExtensions of the Score plugin. // ScoreExtensions of the Score plugin.

View File

@ -21,13 +21,20 @@ import (
"reflect" "reflect"
"testing" "testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/component-base/featuregate"
featuregatetesting "k8s.io/component-base/featuregate/testing"
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util" pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
"k8s.io/kubernetes/pkg/controller/volume/scheduling" "k8s.io/kubernetes/pkg/controller/volume/scheduling"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
@ -49,18 +56,71 @@ var (
}, },
VolumeBindingMode: &waitForFirstConsumer, VolumeBindingMode: &waitForFirstConsumer,
} }
waitHDDSC = &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "wait-hdd-sc",
},
VolumeBindingMode: &waitForFirstConsumer,
}
) )
func makePV(name string) *v1.PersistentVolume { func makeNode(name string) *v1.Node {
return &v1.PersistentVolume{ return &v1.Node{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{
v1.LabelHostname: name,
},
}, },
} }
} }
func addPVNodeAffinity(pv *v1.PersistentVolume, volumeNodeAffinity *v1.VolumeNodeAffinity) *v1.PersistentVolume { func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node {
pv.Spec.NodeAffinity = volumeNodeAffinity for k, v := range labels {
node.Labels[k] = v
}
return node
}
func makePV(name string, className string) *v1.PersistentVolume {
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PersistentVolumeSpec{
StorageClassName: className,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
}
func setPVNodeAffinity(pv *v1.PersistentVolume, keyValues map[string][]string) *v1.PersistentVolume {
matchExpressions := make([]v1.NodeSelectorRequirement, 0)
for key, values := range keyValues {
matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{
Key: key,
Operator: v1.NodeSelectorOpIn,
Values: values,
})
}
pv.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: matchExpressions,
},
},
},
}
return pv
}
func setPVCapacity(pv *v1.PersistentVolume, capacity resource.Quantity) *v1.PersistentVolume {
pv.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): capacity,
}
return pv return pv
} }
@ -81,6 +141,15 @@ func makePVC(name string, boundPVName string, storageClassName string) *v1.Persi
return pvc return pvc
} }
func setPVCRequestStorage(pvc *v1.PersistentVolumeClaim, request resource.Quantity) *v1.PersistentVolumeClaim {
pvc.Spec.Resources = v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): request,
},
}
return pvc
}
func makePod(name string, pvcNames []string) *v1.Pod { func makePod(name string, pvcNames []string) *v1.Pod {
p := &v1.Pod{ p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -105,30 +174,42 @@ func TestVolumeBinding(t *testing.T) {
table := []struct { table := []struct {
name string name string
pod *v1.Pod pod *v1.Pod
node *v1.Node nodes []*v1.Node
pvcs []*v1.PersistentVolumeClaim pvcs []*v1.PersistentVolumeClaim
pvs []*v1.PersistentVolume pvs []*v1.PersistentVolume
feature featuregate.Feature
wantPreFilterStatus *framework.Status wantPreFilterStatus *framework.Status
wantStateAfterPreFilter *stateData wantStateAfterPreFilter *stateData
wantFilterStatus *framework.Status wantFilterStatus []*framework.Status
wantScores []int64
}{ }{
{ {
name: "pod has not pvcs", name: "pod has not pvcs",
pod: makePod("pod-a", nil), pod: makePod("pod-a", nil),
node: &v1.Node{}, nodes: []*v1.Node{
makeNode("node-a"),
},
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
skip: true, skip: true,
}, },
wantFilterStatus: []*framework.Status{
nil,
},
wantScores: []int64{
0,
},
}, },
{ {
name: "all bound", name: "all bound",
pod: makePod("pod-a", []string{"pvc-a"}), pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{}, nodes: []*v1.Node{
makeNode("node-a"),
},
pvcs: []*v1.PersistentVolumeClaim{ pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name), makePVC("pvc-a", "pv-a", waitSC.Name),
}, },
pvs: []*v1.PersistentVolume{ pvs: []*v1.PersistentVolume{
makePV("pv-a"), makePV("pv-a", waitSC.Name),
}, },
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{ boundClaims: []*v1.PersistentVolumeClaim{
@ -137,36 +218,68 @@ func TestVolumeBinding(t *testing.T) {
claimsToBind: []*v1.PersistentVolumeClaim{}, claimsToBind: []*v1.PersistentVolumeClaim{},
podVolumesByNode: map[string]*scheduling.PodVolumes{}, podVolumesByNode: map[string]*scheduling.PodVolumes{},
}, },
wantFilterStatus: []*framework.Status{
nil,
},
wantScores: []int64{
0,
},
}, },
{ {
name: "PVC does not exist", name: "PVC does not exist",
pod: makePod("pod-a", []string{"pvc-a"}), pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{}, nodes: []*v1.Node{
makeNode("node-a"),
},
pvcs: []*v1.PersistentVolumeClaim{}, pvcs: []*v1.PersistentVolumeClaim{},
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`), wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`),
wantFilterStatus: []*framework.Status{
nil,
},
wantScores: []int64{
0,
},
}, },
{ {
name: "Part of PVCs do not exist", name: "Part of PVCs do not exist",
pod: makePod("pod-a", []string{"pvc-a", "pvc-b"}), pod: makePod("pod-a", []string{"pvc-a", "pvc-b"}),
node: &v1.Node{}, nodes: []*v1.Node{
makeNode("node-a"),
},
pvcs: []*v1.PersistentVolumeClaim{ pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name), makePVC("pvc-a", "pv-a", waitSC.Name),
}, },
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-b" not found`), wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-b" not found`),
wantFilterStatus: []*framework.Status{
nil,
},
wantScores: []int64{
0,
},
}, },
{ {
name: "immediate claims not bound", name: "immediate claims not bound",
pod: makePod("pod-a", []string{"pvc-a"}), pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{}, nodes: []*v1.Node{
makeNode("node-a"),
},
pvcs: []*v1.PersistentVolumeClaim{ pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "", immediateSC.Name), makePVC("pvc-a", "", immediateSC.Name),
}, },
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "pod has unbound immediate PersistentVolumeClaims"), wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "pod has unbound immediate PersistentVolumeClaims"),
wantFilterStatus: []*framework.Status{
nil,
},
wantScores: []int64{
0,
},
}, },
{ {
name: "unbound claims no matches", name: "unbound claims no matches",
pod: makePod("pod-a", []string{"pvc-a"}), pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{}, nodes: []*v1.Node{
makeNode("node-a"),
},
pvcs: []*v1.PersistentVolumeClaim{ pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "", waitSC.Name), makePVC("pvc-a", "", waitSC.Name),
}, },
@ -177,37 +290,28 @@ func TestVolumeBinding(t *testing.T) {
}, },
podVolumesByNode: map[string]*scheduling.PodVolumes{}, podVolumesByNode: map[string]*scheduling.PodVolumes{},
}, },
wantFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonBindConflict)), wantFilterStatus: []*framework.Status{
framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonBindConflict)),
},
wantScores: []int64{
0,
},
}, },
{ {
name: "bound and unbound unsatisfied", name: "bound and unbound unsatisfied",
pod: makePod("pod-a", []string{"pvc-a", "pvc-b"}), pod: makePod("pod-a", []string{"pvc-a", "pvc-b"}),
node: &v1.Node{ nodes: []*v1.Node{
ObjectMeta: metav1.ObjectMeta{ mergeNodeLabels(makeNode("node-a"), map[string]string{
Labels: map[string]string{ "foo": "barbar",
"foo": "barbar", }),
},
},
}, },
pvcs: []*v1.PersistentVolumeClaim{ pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name), makePVC("pvc-a", "pv-a", waitSC.Name),
makePVC("pvc-b", "", waitSC.Name), makePVC("pvc-b", "", waitSC.Name),
}, },
pvs: []*v1.PersistentVolume{ pvs: []*v1.PersistentVolume{
addPVNodeAffinity(makePV("pv-a"), &v1.VolumeNodeAffinity{ setPVNodeAffinity(makePV("pv-a", waitSC.Name), map[string][]string{
Required: &v1.NodeSelector{ "foo": {"bar"},
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "foo",
Operator: v1.NodeSelectorOpIn,
Values: []string{"bar"},
},
},
},
},
},
}), }),
}, },
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
@ -219,19 +323,33 @@ func TestVolumeBinding(t *testing.T) {
}, },
podVolumesByNode: map[string]*scheduling.PodVolumes{}, podVolumesByNode: map[string]*scheduling.PodVolumes{},
}, },
wantFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonNodeConflict), string(scheduling.ErrReasonBindConflict)), wantFilterStatus: []*framework.Status{
framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonNodeConflict), string(scheduling.ErrReasonBindConflict)),
},
wantScores: []int64{
0,
},
}, },
{ {
name: "pvc not found", name: "pvc not found",
pod: makePod("pod-a", []string{"pvc-a"}), pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{}, nodes: []*v1.Node{
makeNode("node-a"),
},
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`), wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`),
wantFilterStatus: nil, wantFilterStatus: []*framework.Status{
nil,
},
wantScores: []int64{
0,
},
}, },
{ {
name: "pv not found", name: "pv not found",
pod: makePod("pod-a", []string{"pvc-a"}), pod: makePod("pod-a", []string{"pvc-a"}),
node: &v1.Node{}, nodes: []*v1.Node{
makeNode("node-a"),
},
pvcs: []*v1.PersistentVolumeClaim{ pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", "pv-a", waitSC.Name), makePVC("pvc-a", "pv-a", waitSC.Name),
}, },
@ -243,12 +361,176 @@ func TestVolumeBinding(t *testing.T) {
claimsToBind: []*v1.PersistentVolumeClaim{}, claimsToBind: []*v1.PersistentVolumeClaim{},
podVolumesByNode: map[string]*scheduling.PodVolumes{}, podVolumesByNode: map[string]*scheduling.PodVolumes{},
}, },
wantFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `pvc(s) bound to non-existent pv(s)`), wantFilterStatus: []*framework.Status{
framework.NewStatus(framework.UnschedulableAndUnresolvable, `pvc(s) bound to non-existent pv(s)`),
},
wantScores: []int64{
0,
},
},
{
name: "local volumes with close capacity are preferred",
pod: makePod("pod-a", []string{"pvc-a"}),
nodes: []*v1.Node{
makeNode("node-a"),
makeNode("node-b"),
makeNode("node-c"),
},
pvcs: []*v1.PersistentVolumeClaim{
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
},
pvs: []*v1.PersistentVolume{
setPVNodeAffinity(setPVCapacity(makePV("pv-a-0", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-a-1", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-b-0", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-b-1", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
},
feature: features.VolumeCapacityPriority,
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{},
claimsToBind: []*v1.PersistentVolumeClaim{
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
},
podVolumesByNode: map[string]*scheduling.PodVolumes{},
},
wantFilterStatus: []*framework.Status{
nil,
nil,
framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
},
wantScores: []int64{
25,
50,
0,
},
},
{
name: "local volumes with close capacity are preferred (multiple pvcs)",
pod: makePod("pod-a", []string{"pvc-0", "pvc-1"}),
nodes: []*v1.Node{
makeNode("node-a"),
makeNode("node-b"),
makeNode("node-c"),
},
pvcs: []*v1.PersistentVolumeClaim{
setPVCRequestStorage(makePVC("pvc-0", "", waitSC.Name), resource.MustParse("50Gi")),
setPVCRequestStorage(makePVC("pvc-1", "", waitHDDSC.Name), resource.MustParse("100Gi")),
},
pvs: []*v1.PersistentVolume{
setPVNodeAffinity(setPVCapacity(makePV("pv-a-0", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-a-1", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-a-2", waitHDDSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-a-3", waitHDDSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-b-0", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-b-1", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-b-2", waitHDDSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
setPVNodeAffinity(setPVCapacity(makePV("pv-b-3", waitHDDSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
},
feature: features.VolumeCapacityPriority,
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{},
claimsToBind: []*v1.PersistentVolumeClaim{
setPVCRequestStorage(makePVC("pvc-0", "", waitSC.Name), resource.MustParse("50Gi")),
setPVCRequestStorage(makePVC("pvc-1", "", waitHDDSC.Name), resource.MustParse("100Gi")),
},
podVolumesByNode: map[string]*scheduling.PodVolumes{},
},
wantFilterStatus: []*framework.Status{
nil,
nil,
framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
},
wantScores: []int64{
38,
75,
0,
},
},
{
name: "zonal volumes with close capacity are preferred",
pod: makePod("pod-a", []string{"pvc-a"}),
nodes: []*v1.Node{
mergeNodeLabels(makeNode("zone-a-node-a"), map[string]string{
"topology.kubernetes.io/region": "region-a",
"topology.kubernetes.io/zone": "zone-a",
}),
mergeNodeLabels(makeNode("zone-a-node-b"), map[string]string{
"topology.kubernetes.io/region": "region-a",
"topology.kubernetes.io/zone": "zone-a",
}),
mergeNodeLabels(makeNode("zone-b-node-a"), map[string]string{
"topology.kubernetes.io/region": "region-b",
"topology.kubernetes.io/zone": "zone-b",
}),
mergeNodeLabels(makeNode("zone-b-node-b"), map[string]string{
"topology.kubernetes.io/region": "region-b",
"topology.kubernetes.io/zone": "zone-b",
}),
mergeNodeLabels(makeNode("zone-c-node-a"), map[string]string{
"topology.kubernetes.io/region": "region-c",
"topology.kubernetes.io/zone": "zone-c",
}),
mergeNodeLabels(makeNode("zone-c-node-b"), map[string]string{
"topology.kubernetes.io/region": "region-c",
"topology.kubernetes.io/zone": "zone-c",
}),
},
pvcs: []*v1.PersistentVolumeClaim{
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
},
pvs: []*v1.PersistentVolume{
setPVNodeAffinity(setPVCapacity(makePV("pv-a-0", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}),
setPVNodeAffinity(setPVCapacity(makePV("pv-a-1", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}),
setPVNodeAffinity(setPVCapacity(makePV("pv-b-0", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}),
setPVNodeAffinity(setPVCapacity(makePV("pv-b-1", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}),
},
feature: features.VolumeCapacityPriority,
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{},
claimsToBind: []*v1.PersistentVolumeClaim{
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
},
podVolumesByNode: map[string]*scheduling.PodVolumes{},
},
wantFilterStatus: []*framework.Status{
nil,
nil,
nil,
nil,
framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
},
wantScores: []int64{
25,
25,
50,
50,
0,
0,
},
}, },
} }
for _, item := range table { for _, item := range table {
t.Run(item.name, func(t *testing.T) { t.Run(item.name, func(t *testing.T) {
if item.feature != "" {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, item.feature, true)()
}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
client := fake.NewSimpleClientset() client := fake.NewSimpleClientset()
@ -271,8 +553,9 @@ func TestVolumeBinding(t *testing.T) {
t.Log("Feed testing data and wait for them to be synced") t.Log("Feed testing data and wait for them to be synced")
client.StorageV1().StorageClasses().Create(ctx, immediateSC, metav1.CreateOptions{}) client.StorageV1().StorageClasses().Create(ctx, immediateSC, metav1.CreateOptions{})
client.StorageV1().StorageClasses().Create(ctx, waitSC, metav1.CreateOptions{}) client.StorageV1().StorageClasses().Create(ctx, waitSC, metav1.CreateOptions{})
if item.node != nil { client.StorageV1().StorageClasses().Create(ctx, waitHDDSC, metav1.CreateOptions{})
client.CoreV1().Nodes().Create(ctx, item.node, metav1.CreateOptions{}) for _, node := range item.nodes {
client.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
} }
for _, pvc := range item.pvcs { for _, pvc := range item.pvcs {
client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
@ -290,8 +573,12 @@ func TestVolumeBinding(t *testing.T) {
t.Log("Verify") t.Log("Verify")
p := pl.(*VolumeBinding) p := pl.(*VolumeBinding)
nodeInfo := framework.NewNodeInfo() nodeInfos := make([]*framework.NodeInfo, 0)
nodeInfo.SetNode(item.node) for _, node := range item.nodes {
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(node)
nodeInfos = append(nodeInfos, nodeInfo)
}
state := framework.NewCycleState() state := framework.NewCycleState()
t.Logf("Verify: call PreFilter and check status") t.Logf("Verify: call PreFilter and check status")
@ -305,18 +592,35 @@ func TestVolumeBinding(t *testing.T) {
} }
t.Logf("Verify: check state after prefilter phase") t.Logf("Verify: check state after prefilter phase")
stateData, err := getStateData(state) got, err := getStateData(state)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !reflect.DeepEqual(stateData, item.wantStateAfterPreFilter) { stateCmpOpts := []cmp.Option{
t.Errorf("state got after prefilter does not match: %v, want: %v", stateData, item.wantStateAfterPreFilter) cmp.AllowUnexported(stateData{}),
cmpopts.IgnoreFields(stateData{}, "Mutex"),
}
if diff := cmp.Diff(item.wantStateAfterPreFilter, got, stateCmpOpts...); diff != "" {
t.Errorf("state got after prefilter does not match (-want,+got):\n%s", diff)
} }
t.Logf("Verify: call Filter and check status") t.Logf("Verify: call Filter and check status")
gotStatus := p.Filter(ctx, state, item.pod, nodeInfo) for i, nodeInfo := range nodeInfos {
if !reflect.DeepEqual(gotStatus, item.wantFilterStatus) { gotStatus := p.Filter(ctx, state, item.pod, nodeInfo)
t.Errorf("filter status does not match: %v, want: %v", gotStatus, item.wantFilterStatus) if !reflect.DeepEqual(gotStatus, item.wantFilterStatus[i]) {
t.Errorf("filter status does not match for node %q, got: %v, want: %v", nodeInfo.Node().Name, gotStatus, item.wantFilterStatus)
}
}
t.Logf("Verify: Score")
for i, node := range item.nodes {
score, status := p.Score(ctx, state, item.pod, node.Name)
if !status.IsSuccess() {
t.Errorf("Score expects success status, got: %v", status)
}
if score != item.wantScores[i] {
t.Errorf("Score expects score %d for node %q, got: %d", item.wantScores[i], node.Name, score)
}
} }
}) })
} }