[migration phase 1] PodFitsHostPorts as filter plugin

This commit is contained in:
Guoliang Wang
2019-10-09 13:39:34 +08:00
parent a78340410e
commit 78be6a628d
6 changed files with 272 additions and 12 deletions

View File

@@ -108,7 +108,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
] ]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"TestServiceAffinity", "TestServiceAffinity",
"TestLabelsPresence", "TestLabelsPresence",
), ),
@@ -123,6 +122,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -160,7 +160,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
] ]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"MaxEBSVolumeCount", "MaxEBSVolumeCount",
"MaxGCEPDVolumeCount", "MaxGCEPDVolumeCount",
@@ -181,6 +180,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -222,7 +222,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
] ]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"MaxEBSVolumeCount", "MaxEBSVolumeCount",
@@ -245,6 +244,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -291,7 +291,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
] ]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"CheckNodeDiskPressure", "CheckNodeDiskPressure",
@@ -317,6 +316,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -372,7 +372,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}] }]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"CheckNodeDiskPressure", "CheckNodeDiskPressure",
@@ -398,6 +397,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -465,7 +465,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}] }]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"CheckNodeDiskPressure", "CheckNodeDiskPressure",
@@ -492,6 +491,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -560,7 +560,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}] }]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"CheckNodeDiskPressure", "CheckNodeDiskPressure",
@@ -587,6 +586,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -660,7 +660,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}] }]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"CheckNodeDiskPressure", "CheckNodeDiskPressure",
@@ -688,6 +687,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -773,7 +773,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}] }]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"CheckNodeDiskPressure", "CheckNodeDiskPressure",
@@ -802,6 +801,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -888,7 +888,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}] }]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"CheckNodeDiskPressure", "CheckNodeDiskPressure",
@@ -918,6 +917,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -1003,7 +1003,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}] }]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"CheckNodeDiskPressure", "CheckNodeDiskPressure",
@@ -1034,6 +1033,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -1123,7 +1123,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}] }]
}`, }`,
wantPredicates: sets.NewString( wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict", "NoVolumeZoneConflict",
"CheckNodeMemoryPressure", "CheckNodeMemoryPressure",
"CheckNodeDiskPressure", "CheckNodeDiskPressure",
@@ -1154,6 +1153,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{ wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeName"}, {Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"}, {Name: "NodeAffinity"},
{Name: "NodeResources"}, {Name: "NodeResources"},
{Name: "VolumeRestrictions"}, {Name: "VolumeRestrictions"},
@@ -1185,6 +1185,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
filterToPredicateMap := map[string]string{ filterToPredicateMap := map[string]string{
"TaintToleration": "PodToleratesNodeTaints", "TaintToleration": "PodToleratesNodeTaints",
"NodeName": "HostName", "NodeName": "HostName",
"NodePorts": "PodFitsHostPorts",
"NodeResources": "PodFitsResources", "NodeResources": "PodFitsResources",
"NodeAffinity": "MatchNodeSelector", "NodeAffinity": "MatchNodeSelector",
"VolumeBinding": "CheckVolumeBinding", "VolumeBinding": "CheckVolumeBinding",

View File

@@ -12,6 +12,7 @@ go_library(
"//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/plugins/nodeaffinity:go_default_library", "//pkg/scheduler/framework/plugins/nodeaffinity:go_default_library",
"//pkg/scheduler/framework/plugins/nodename:go_default_library", "//pkg/scheduler/framework/plugins/nodename:go_default_library",
"//pkg/scheduler/framework/plugins/nodeports:go_default_library",
"//pkg/scheduler/framework/plugins/noderesources:go_default_library", "//pkg/scheduler/framework/plugins/noderesources:go_default_library",
"//pkg/scheduler/framework/plugins/tainttoleration:go_default_library", "//pkg/scheduler/framework/plugins/tainttoleration:go_default_library",
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library", "//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
@@ -40,6 +41,7 @@ filegroup(
"//pkg/scheduler/framework/plugins/migration:all-srcs", "//pkg/scheduler/framework/plugins/migration:all-srcs",
"//pkg/scheduler/framework/plugins/nodeaffinity:all-srcs", "//pkg/scheduler/framework/plugins/nodeaffinity:all-srcs",
"//pkg/scheduler/framework/plugins/nodename:all-srcs", "//pkg/scheduler/framework/plugins/nodename:all-srcs",
"//pkg/scheduler/framework/plugins/nodeports:all-srcs",
"//pkg/scheduler/framework/plugins/noderesources:all-srcs", "//pkg/scheduler/framework/plugins/noderesources:all-srcs",
"//pkg/scheduler/framework/plugins/tainttoleration:all-srcs", "//pkg/scheduler/framework/plugins/tainttoleration:all-srcs",
"//pkg/scheduler/framework/plugins/volumebinding:all-srcs", "//pkg/scheduler/framework/plugins/volumebinding:all-srcs",

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
@@ -59,6 +60,7 @@ func NewDefaultRegistry(args *RegistryArgs) framework.Registry {
tainttoleration.Name: tainttoleration.New, tainttoleration.Name: tainttoleration.New,
noderesources.Name: noderesources.New, noderesources.Name: noderesources.New,
nodename.Name: nodename.New, nodename.Name: nodename.New,
nodeports.Name: nodeports.New,
nodeaffinity.Name: nodeaffinity.New, nodeaffinity.Name: nodeaffinity.New,
volumebinding.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { volumebinding.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return volumebinding.NewFromVolumeBinder(args.VolumeBinder), nil return volumebinding.NewFromVolumeBinder(args.VolumeBinder), nil
@@ -106,6 +108,11 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry {
plugins.Filter = appendToPluginSet(plugins.Filter, nodename.Name, nil) plugins.Filter = appendToPluginSet(plugins.Filter, nodename.Name, nil)
return return
}) })
registry.RegisterPredicate(predicates.PodFitsHostPortsPred,
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
plugins.Filter = appendToPluginSet(plugins.Filter, nodeports.Name, nil)
return
})
registry.RegisterPredicate(predicates.MatchNodeSelectorPred, registry.RegisterPredicate(predicates.MatchNodeSelectorPred,
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
plugins.Filter = appendToPluginSet(plugins.Filter, nodeaffinity.Name, nil) plugins.Filter = appendToPluginSet(plugins.Filter, nodeaffinity.Name, nil)

View File

@@ -0,0 +1,42 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["node_ports.go"],
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/framework/plugins/migration:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["node_ports_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
],
)

View File

@@ -0,0 +1,50 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeports
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// NodePorts is a plugin that checks if a node has free ports for the requested pod ports.
type NodePorts struct{}
var _ = framework.FilterPlugin(&NodePorts{})
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = "NodePorts"
// Name returns name of the plugin. It is used in logs, etc.
func (pl *NodePorts) Name() string {
return Name
}
// Filter invoked at the filter extension point.
func (pl *NodePorts) Filter(_ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
_, reasons, err := predicates.PodFitsHostPorts(pod, nil, nodeInfo)
return migration.PredicateResultToFrameworkStatus(reasons, err)
}
// New initializes a new plugin and returns it.
func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &NodePorts{}, nil
}

View File

@@ -0,0 +1,158 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeports
import (
"reflect"
"strconv"
"strings"
"testing"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
func newPod(host string, hostPortInfos ...string) *v1.Pod {
networkPorts := []v1.ContainerPort{}
for _, portInfo := range hostPortInfos {
splited := strings.Split(portInfo, "/")
hostPort, _ := strconv.Atoi(splited[2])
networkPorts = append(networkPorts, v1.ContainerPort{
HostIP: splited[1],
HostPort: int32(hostPort),
Protocol: v1.Protocol(splited[0]),
})
}
return &v1.Pod{
Spec: v1.PodSpec{
NodeName: host,
Containers: []v1.Container{
{
Ports: networkPorts,
},
},
},
}
}
func TestNodePorts(t *testing.T) {
tests := []struct {
pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo
name string
wantStatus *framework.Status
}{
{
pod: &v1.Pod{},
nodeInfo: schedulernodeinfo.NewNodeInfo(),
name: "nothing running",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/9090")),
name: "other port",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")),
name: "same udp port",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")),
name: "same tcp port",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.2/8080")),
name: "different host ip",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")),
name: "different protocol",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")),
name: "second udp port conflict",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")),
name: "first tcp port conflict",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/0.0.0.0/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")),
name: "first tcp port conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")),
name: "TCP hostPort conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")),
name: "second tcp port conflict to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")),
name: "second different protocol",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")),
name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
p, _ := New(nil, nil)
gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
})
}
}