Merge pull request #83659 from wgliang/scheduler-v2/pod-fits-host-ports

[migration phase 1] PodFitsHostPorts as filter plugin
This commit is contained in:
Kubernetes Prow Robot 2019-10-13 11:18:35 -07:00 committed by GitHub
commit 5a427e8704
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 272 additions and 12 deletions

View File

@ -108,7 +108,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -123,6 +122,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -160,7 +160,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"MaxEBSVolumeCount",
"MaxGCEPDVolumeCount",
@ -180,6 +179,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -224,7 +224,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"MaxEBSVolumeCount",
@ -246,6 +245,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -295,7 +295,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -320,6 +319,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -378,7 +378,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -403,6 +402,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -473,7 +473,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -499,6 +498,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -570,7 +570,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -596,6 +595,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -672,7 +672,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -699,6 +698,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -787,7 +787,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -815,6 +814,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -904,7 +904,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -933,6 +932,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -1021,7 +1021,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -1051,6 +1050,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -1143,7 +1143,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"PodFitsHostPorts",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -1173,6 +1172,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResources"},
{Name: "VolumeRestrictions"},
@ -1207,6 +1207,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
filterToPredicateMap := map[string]string{
"TaintToleration": "PodToleratesNodeTaints",
"NodeName": "HostName",
"NodePorts": "PodFitsHostPorts",
"NodeResources": "PodFitsResources",
"NodeAffinity": "MatchNodeSelector",
"VolumeBinding": "CheckVolumeBinding",

View File

@ -13,6 +13,7 @@ go_library(
"//pkg/scheduler/framework/plugins/imagelocality:go_default_library",
"//pkg/scheduler/framework/plugins/nodeaffinity:go_default_library",
"//pkg/scheduler/framework/plugins/nodename:go_default_library",
"//pkg/scheduler/framework/plugins/nodeports:go_default_library",
"//pkg/scheduler/framework/plugins/noderesources:go_default_library",
"//pkg/scheduler/framework/plugins/tainttoleration:go_default_library",
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
@ -42,6 +43,7 @@ filegroup(
"//pkg/scheduler/framework/plugins/migration:all-srcs",
"//pkg/scheduler/framework/plugins/nodeaffinity:all-srcs",
"//pkg/scheduler/framework/plugins/nodename:all-srcs",
"//pkg/scheduler/framework/plugins/nodeports:all-srcs",
"//pkg/scheduler/framework/plugins/noderesources:all-srcs",
"//pkg/scheduler/framework/plugins/tainttoleration:all-srcs",
"//pkg/scheduler/framework/plugins/volumebinding:all-srcs",

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
@ -61,6 +62,7 @@ func NewDefaultRegistry(args *RegistryArgs) framework.Registry {
tainttoleration.Name: tainttoleration.New,
noderesources.Name: noderesources.New,
nodename.Name: nodename.New,
nodeports.Name: nodeports.New,
nodeaffinity.Name: nodeaffinity.New,
volumebinding.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return volumebinding.NewFromVolumeBinder(args.VolumeBinder), nil
@ -108,6 +110,11 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry {
plugins.Filter = appendToPluginSet(plugins.Filter, nodename.Name, nil)
return
})
registry.RegisterPredicate(predicates.PodFitsHostPortsPred,
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
plugins.Filter = appendToPluginSet(plugins.Filter, nodeports.Name, nil)
return
})
registry.RegisterPredicate(predicates.MatchNodeSelectorPred,
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
plugins.Filter = appendToPluginSet(plugins.Filter, nodeaffinity.Name, nil)

View File

@ -0,0 +1,42 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["node_ports.go"],
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/framework/plugins/migration:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["node_ports_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
],
)

View File

@ -0,0 +1,50 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeports
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// NodePorts is a plugin that checks if a node has free ports for the requested pod ports.
type NodePorts struct{}
var _ = framework.FilterPlugin(&NodePorts{})
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = "NodePorts"
// Name returns name of the plugin. It is used in logs, etc.
func (pl *NodePorts) Name() string {
return Name
}
// Filter invoked at the filter extension point.
func (pl *NodePorts) Filter(_ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
_, reasons, err := predicates.PodFitsHostPorts(pod, nil, nodeInfo)
return migration.PredicateResultToFrameworkStatus(reasons, err)
}
// New initializes a new plugin and returns it.
func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &NodePorts{}, nil
}

View File

@ -0,0 +1,158 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeports
import (
"reflect"
"strconv"
"strings"
"testing"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
func newPod(host string, hostPortInfos ...string) *v1.Pod {
networkPorts := []v1.ContainerPort{}
for _, portInfo := range hostPortInfos {
splited := strings.Split(portInfo, "/")
hostPort, _ := strconv.Atoi(splited[2])
networkPorts = append(networkPorts, v1.ContainerPort{
HostIP: splited[1],
HostPort: int32(hostPort),
Protocol: v1.Protocol(splited[0]),
})
}
return &v1.Pod{
Spec: v1.PodSpec{
NodeName: host,
Containers: []v1.Container{
{
Ports: networkPorts,
},
},
},
}
}
func TestNodePorts(t *testing.T) {
tests := []struct {
pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo
name string
wantStatus *framework.Status
}{
{
pod: &v1.Pod{},
nodeInfo: schedulernodeinfo.NewNodeInfo(),
name: "nothing running",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/9090")),
name: "other port",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")),
name: "same udp port",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")),
name: "same tcp port",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.2/8080")),
name: "different host ip",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")),
name: "different protocol",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")),
name: "second udp port conflict",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")),
name: "first tcp port conflict",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/0.0.0.0/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")),
name: "first tcp port conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")),
name: "TCP hostPort conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")),
name: "second tcp port conflict to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
{
pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")),
name: "second different protocol",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")),
name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
p, _ := New(nil, nil)
gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
})
}
}