volume scheduler: introduce special string type

This makes it possible to search for the special strings more easily
(https://github.com/kubernetes/kubernetes/pull/88230#discussion_r382367043).
This commit is contained in:
Patrick Ohly 2020-02-25 11:10:27 +01:00
parent 6eb0b034ac
commit 6329b17d2f
7 changed files with 42 additions and 31 deletions

View File

@ -45,11 +45,22 @@ import (
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
// ConflictReason is used for the special strings which explain why
// volume binding is impossible for a node.
type ConflictReason string
// ConflictReasons contains all reasons that explain why volume binding is impossible for a node.
type ConflictReasons []ConflictReason
func (reasons ConflictReasons) Len() int { return len(reasons) }
func (reasons ConflictReasons) Less(i, j int) bool { return reasons[i] < reasons[j] }
func (reasons ConflictReasons) Swap(i, j int) { reasons[i], reasons[j] = reasons[j], reasons[i] }
const (
// ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error.
ErrReasonBindConflict = "node(s) didn't find available persistent volumes to bind"
ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind"
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrReasonNodeConflict = "node(s) had volume node affinity conflict"
ErrReasonNodeConflict ConflictReason = "node(s) had volume node affinity conflict"
)
// InTreeToCSITranslator contains methods required to check migratable status
@ -94,7 +105,7 @@ type SchedulerVolumeBinder interface {
// (currently) not usable for the pod.
//
// This function is called by the volume binding scheduler predicate and can be called in parallel
FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error)
FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error)
// AssumePodVolumes will:
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming
@ -173,7 +184,7 @@ func (b *volumeBinder) GetBindingsCache() PodBindingCache {
// FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache.
// This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer.
// That's necessary because some operations will need to pass in to the predicate fake node objects.
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) {
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error) {
podName := getPodName(pod)
// Warning: Below log needs high verbosity as it can be printed several times (#60933).

View File

@ -21,7 +21,7 @@ import "k8s.io/api/core/v1"
// FakeVolumeBinderConfig holds configurations for fake volume binder.
type FakeVolumeBinderConfig struct {
AllBound bool
FindReasons []string
FindReasons ConflictReasons
FindErr error
AssumeErr error
BindErr error
@ -43,7 +43,7 @@ type FakeVolumeBinder struct {
}
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) {
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error) {
return b.config.FindReasons, b.config.FindErr
}

View File

@ -739,7 +739,7 @@ func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
// reasonNames pretty-prints a list of reasons with variable names in
// case of a test failure because that is easier to read than the full
// strings.
func reasonNames(reasons []string) string {
func reasonNames(reasons ConflictReasons) string {
var varNames []string
for _, reason := range reasons {
switch reason {
@ -748,16 +748,16 @@ func reasonNames(reasons []string) string {
case ErrReasonNodeConflict:
varNames = append(varNames, "ErrReasonNodeConflict")
default:
varNames = append(varNames, reason)
varNames = append(varNames, string(reason))
}
}
return fmt.Sprintf("%v", varNames)
}
func checkReasons(t *testing.T, actual, expected []string) {
func checkReasons(t *testing.T, actual, expected ConflictReasons) {
equal := len(actual) == len(expected)
sort.Strings(actual)
sort.Strings(expected)
sort.Sort(actual)
sort.Sort(expected)
if equal {
for i, reason := range actual {
if reason != expected[i] {
@ -785,7 +785,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
expectedBindings []*bindingInfo
// Expected return values
reasons []string
reasons ConflictReasons
shouldFail bool
}
scenarios := map[string]scenarioType{
@ -821,7 +821,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
"unbound-pvc,pv-different-node": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
pvs: []*v1.PersistentVolume{pvNode2},
reasons: []string{ErrReasonBindConflict},
reasons: ConflictReasons{ErrReasonBindConflict},
},
"two-unbound-pvcs": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
@ -837,7 +837,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)},
reasons: []string{ErrReasonBindConflict},
reasons: ConflictReasons{ErrReasonBindConflict},
},
"one-bound,one-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC},
@ -847,7 +847,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
"one-bound,one-unbound,no-match": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC},
pvs: []*v1.PersistentVolume{pvBound, pvNode2},
reasons: []string{ErrReasonBindConflict},
reasons: ConflictReasons{ErrReasonBindConflict},
},
"one-prebound,one-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
@ -861,7 +861,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
"immediate-bound-pvc-wrong-node": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediateNode2},
reasons: []string{ErrReasonNodeConflict},
reasons: ConflictReasons{ErrReasonNodeConflict},
},
"immediate-unbound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC},
@ -940,7 +940,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
expectedProvisions []*v1.PersistentVolumeClaim
// Expected return values
reasons []string
reasons ConflictReasons
shouldFail bool
}
scenarios := map[string]scenarioType{
@ -975,11 +975,11 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
},
"invalid-provisioner": {
podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC},
reasons: []string{ErrReasonBindConflict},
reasons: ConflictReasons{ErrReasonBindConflict},
},
"volume-topology-unsatisfied": {
podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC},
reasons: []string{ErrReasonBindConflict},
reasons: ConflictReasons{ErrReasonBindConflict},
},
}
@ -1047,7 +1047,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
initCSINodes []*storagev1.CSINode
// Expected return values
reasons []string
reasons ConflictReasons
shouldFail bool
}
scenarios := map[string]scenarioType{
@ -1073,7 +1073,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone2},
initCSINodes: []*storagev1.CSINode{csiNode1Migrated},
reasons: []string{ErrReasonNodeConflict},
reasons: ConflictReasons{ErrReasonNodeConflict},
},
}

View File

@ -1999,8 +1999,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonNodeConflict),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonNodeConflict)),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict)),
},
expected: map[string]bool{"machine4": true},
},

View File

@ -81,7 +81,7 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p
if len(reasons) > 0 {
status := framework.NewStatus(framework.UnschedulableAndUnresolvable)
for _, reason := range reasons {
status.AppendReason(reason)
status.AppendReason(string(reason))
}
return status
}

View File

@ -67,18 +67,18 @@ func TestVolumeBinding(t *testing.T) {
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindReasons: []string{volumescheduling.ErrReasonBindConflict},
FindReasons: []volumescheduling.ConflictReason{volumescheduling.ErrReasonBindConflict},
},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict),
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict)),
},
{
name: "bound and unbound unsatisfied",
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
FindReasons: []volumescheduling.ConflictReason{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict),
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict), string(volumescheduling.ErrReasonNodeConflict)),
},
{
name: "unbound/found matches/bind succeeds",

View File

@ -911,7 +911,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
name: "bound/invalid pv affinity",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true,
FindReasons: []string{volumescheduling.ErrReasonNodeConflict},
FindReasons: volumescheduling.ConflictReasons{volumescheduling.ErrReasonNodeConflict},
},
eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
@ -919,7 +919,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "unbound/no matches",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindReasons: []string{volumescheduling.ErrReasonBindConflict},
FindReasons: volumescheduling.ConflictReasons{volumescheduling.ErrReasonBindConflict},
},
eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
@ -927,7 +927,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "bound and unbound unsatisfied",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
FindReasons: volumescheduling.ConflictReasons{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
},
eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),