volume scheduler: move reason strings into volume code

The scheduler doesn't really need to know in detail which reasons
rendered a node unusable for a node. All it needs from the volume
binder is a list of reasons that it then can present to the user.

This seems a bit cleaner. But the main reason for the change is that
it simplifies the checking of CSI inline volumes and perhaps later
capacity checking. Both will lead to new failure reasons, which then
can be added without changing the interface.
This commit is contained in:
Patrick Ohly
2020-02-14 13:40:29 +01:00
parent c73532c4f7
commit 6eb0b034ac
7 changed files with 174 additions and 216 deletions

View File

@@ -901,9 +901,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "all bound",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true,
FindUnboundSatsified: true,
FindBoundSatsified: true,
AllBound: true,
},
expectAssumeCalled: true,
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
@@ -912,9 +910,8 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "bound/invalid pv affinity",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true,
FindUnboundSatsified: true,
FindBoundSatsified: false,
AllBound: true,
FindReasons: []string{volumescheduling.ErrReasonNodeConflict},
},
eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
@@ -922,8 +919,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "unbound/no matches",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false,
FindBoundSatsified: true,
FindReasons: []string{volumescheduling.ErrReasonBindConflict},
},
eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
@@ -931,18 +927,14 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "bound and unbound unsatisfied",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false,
FindBoundSatsified: false,
FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
},
eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
},
{
name: "unbound/found matches/bind succeeds",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true,
FindBoundSatsified: true,
},
name: "unbound/found matches/bind succeeds",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{},
expectAssumeCalled: true,
expectBindCalled: true,
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
@@ -959,9 +951,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "assume error",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true,
FindBoundSatsified: true,
AssumeErr: assumeErr,
AssumeErr: assumeErr,
},
expectAssumeCalled: true,
eventReason: "FailedScheduling",
@@ -970,9 +960,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "bind error",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true,
FindBoundSatsified: true,
BindErr: bindErr,
BindErr: bindErr,
},
expectAssumeCalled: true,
expectBindCalled: true,