mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
volume scheduler: move reason strings into volume code
The scheduler doesn't really need to know in detail which reasons rendered a node unusable for a node. All it needs from the volume binder is a list of reasons that it then can present to the user. This seems a bit cleaner. But the main reason for the change is that it simplifies the checking of CSI inline volumes and perhaps later capacity checking. Both will lead to new failure reasons, which then can be added without changing the interface.
This commit is contained in:
@@ -901,9 +901,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
{
|
||||
name: "all bound",
|
||||
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
|
||||
AllBound: true,
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: true,
|
||||
AllBound: true,
|
||||
},
|
||||
expectAssumeCalled: true,
|
||||
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
|
||||
@@ -912,9 +910,8 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
{
|
||||
name: "bound/invalid pv affinity",
|
||||
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
|
||||
AllBound: true,
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: false,
|
||||
AllBound: true,
|
||||
FindReasons: []string{volumescheduling.ErrReasonNodeConflict},
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
|
||||
@@ -922,8 +919,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
{
|
||||
name: "unbound/no matches",
|
||||
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: false,
|
||||
FindBoundSatsified: true,
|
||||
FindReasons: []string{volumescheduling.ErrReasonBindConflict},
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
|
||||
@@ -931,18 +927,14 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
{
|
||||
name: "bound and unbound unsatisfied",
|
||||
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: false,
|
||||
FindBoundSatsified: false,
|
||||
FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
|
||||
},
|
||||
{
|
||||
name: "unbound/found matches/bind succeeds",
|
||||
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: true,
|
||||
},
|
||||
name: "unbound/found matches/bind succeeds",
|
||||
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{},
|
||||
expectAssumeCalled: true,
|
||||
expectBindCalled: true,
|
||||
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
|
||||
@@ -959,9 +951,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
{
|
||||
name: "assume error",
|
||||
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: true,
|
||||
AssumeErr: assumeErr,
|
||||
AssumeErr: assumeErr,
|
||||
},
|
||||
expectAssumeCalled: true,
|
||||
eventReason: "FailedScheduling",
|
||||
@@ -970,9 +960,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
{
|
||||
name: "bind error",
|
||||
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: true,
|
||||
BindErr: bindErr,
|
||||
BindErr: bindErr,
|
||||
},
|
||||
expectAssumeCalled: true,
|
||||
expectBindCalled: true,
|
||||
|
||||
Reference in New Issue
Block a user