Merge pull request #86850 from haosdent/clean-filter-deps-volumerestrictions

Break volumerestrictions Filter plugins dependency on predicates package
This commit is contained in:
Kubernetes Prow Robot 2020-01-05 20:57:41 -08:00 committed by GitHub
commit 0f61791bc7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 18 additions and 16 deletions

View File

@ -31,8 +31,6 @@ var (
// it can never be made to pass by removing pods, you need to add the predicate // it can never be made to pass by removing pods, you need to add the predicate
// failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go // failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go
// ErrDiskConflict is used for NoDiskConflict predicate error.
ErrDiskConflict = NewPredicateFailureError("NoDiskConflict", "node(s) had no available disk")
// ErrVolumeZoneConflict is used for NoVolumeZoneConflict predicate error. // ErrVolumeZoneConflict is used for NoVolumeZoneConflict predicate error.
ErrVolumeZoneConflict = NewPredicateFailureError("NoVolumeZoneConflict", "node(s) had no available volume zone") ErrVolumeZoneConflict = NewPredicateFailureError("NoVolumeZoneConflict", "node(s) had no available volume zone")
// ErrNodeSelectorNotMatch is used for MatchNodeSelector predicate error. // ErrNodeSelectorNotMatch is used for MatchNodeSelector predicate error.

View File

@ -58,6 +58,7 @@ go_test(
"//pkg/scheduler/framework/plugins/noderesources:go_default_library", "//pkg/scheduler/framework/plugins/noderesources:go_default_library",
"//pkg/scheduler/framework/plugins/podtopologyspread:go_default_library", "//pkg/scheduler/framework/plugins/podtopologyspread:go_default_library",
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library", "//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",

View File

@ -27,6 +27,8 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -1903,7 +1905,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
name: "Mix of failed predicates works fine", name: "Mix of failed predicates works fine",
nodesStatuses: framework.NodeToStatusMap{ nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()), "machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrDiskConflict.GetReason()), "machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
"machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()), "machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
}, },
expected: map[string]bool{"machine3": true, "machine4": true}, expected: map[string]bool{"machine3": true, "machine4": true},
@ -1976,7 +1978,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
func TestPreempt(t *testing.T) { func TestPreempt(t *testing.T) {
defaultFailedNodeToStatusMap := framework.NodeToStatusMap{ defaultFailedNodeToStatusMap := framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300).GetReason()), "machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300).GetReason()),
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrDiskConflict.GetReason()), "machine2": framework.NewStatus(framework.Unschedulable, volumerestrictions.ErrReasonDiskConflict),
"machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()), "machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
} }
// Prepare 3 node names. // Prepare 3 node names.

View File

@ -43,7 +43,7 @@ func TestPredicateResultToFrameworkStatus(t *testing.T) {
{ {
name: "Error with reason", name: "Error with reason",
err: errors.New("Failed with error"), err: errors.New("Failed with error"),
reasons: []predicates.PredicateFailureReason{predicates.ErrDiskConflict}, reasons: []predicates.PredicateFailureReason{predicates.ErrTaintsTolerationsNotMatch},
wantStatus: framework.NewStatus(framework.Error, "Failed with error"), wantStatus: framework.NewStatus(framework.Error, "Failed with error"),
}, },
{ {
@ -53,8 +53,8 @@ func TestPredicateResultToFrameworkStatus(t *testing.T) {
}, },
{ {
name: "Unschedulable and Unresolvable", name: "Unschedulable and Unresolvable",
reasons: []predicates.PredicateFailureReason{predicates.ErrDiskConflict, predicates.ErrNodeSelectorNotMatch}, reasons: []predicates.PredicateFailureReason{predicates.ErrTaintsTolerationsNotMatch, predicates.ErrNodeSelectorNotMatch},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "node(s) had no available disk", "node(s) didn't match node selector"), wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "node(s) had taints that the pod didn't tolerate", "node(s) didn't match node selector"),
}, },
} }
for _, tt := range tests { for _, tt := range tests {

View File

@ -6,7 +6,6 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions", importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
@ -33,7 +32,6 @@ go_test(
srcs = ["volume_restrictions_test.go"], srcs = ["volume_restrictions_test.go"],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",

View File

@ -21,7 +21,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
@ -34,6 +33,11 @@ var _ framework.FilterPlugin = &VolumeRestrictions{}
// Name is the name of the plugin used in the plugin registry and configurations. // Name is the name of the plugin used in the plugin registry and configurations.
const Name = "VolumeRestrictions" const Name = "VolumeRestrictions"
const (
// ErrReasonDiskConflict is used for NoDiskConflict predicate error.
ErrReasonDiskConflict = "node(s) had no available disk"
)
// Name returns name of the plugin. It is used in logs, etc. // Name returns name of the plugin. It is used in logs, etc.
func (pl *VolumeRestrictions) Name() string { func (pl *VolumeRestrictions) Name() string {
return Name return Name
@ -118,7 +122,7 @@ func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleStat
for _, v := range pod.Spec.Volumes { for _, v := range pod.Spec.Volumes {
for _, ev := range nodeInfo.Pods() { for _, ev := range nodeInfo.Pods() {
if isVolumeConflict(v, ev) { if isVolumeConflict(v, ev) {
return framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason()) return framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
} }
} }
} }

View File

@ -22,7 +22,6 @@ import (
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
@ -50,7 +49,7 @@ func TestGCEDiskConflicts(t *testing.T) {
}, },
}, },
} }
errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason()) errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
@ -98,7 +97,7 @@ func TestAWSDiskConflicts(t *testing.T) {
}, },
}, },
} }
errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason()) errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
@ -152,7 +151,7 @@ func TestRBDDiskConflicts(t *testing.T) {
}, },
}, },
} }
errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason()) errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
@ -206,7 +205,7 @@ func TestISCSIDiskConflicts(t *testing.T) {
}, },
}, },
} }
errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason()) errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo