From 2d44ef9dfa5da87c0c86af4c4f8dc2c28963f1da Mon Sep 17 00:00:00 2001 From: chenxingyu Date: Thu, 7 Sep 2017 17:12:22 +0800 Subject: [PATCH] add hostip protocol to the hostport predicates and make unit test adapt to the code change --- .../algorithm/predicates/metadata.go | 4 +- .../algorithm/predicates/metadata_test.go | 2 +- .../algorithm/predicates/predicates.go | 42 ++++++- .../algorithm/predicates/predicates_test.go | 112 ++++++++++++++---- .../scheduler/algorithm/predicates/utils.go | 23 ++++ .../algorithm/predicates/utils_test.go | 45 +++++++ .../scheduler/schedulercache/cache_test.go | 62 +++++----- .../pkg/scheduler/schedulercache/node_info.go | 31 +++-- plugin/pkg/scheduler/util/utils.go | 20 +++- 9 files changed, 267 insertions(+), 74 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/metadata.go b/plugin/pkg/scheduler/algorithm/predicates/metadata.go index bb15272db3d..c0eda6a24de 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/metadata.go +++ b/plugin/pkg/scheduler/algorithm/predicates/metadata.go @@ -46,7 +46,7 @@ type predicateMetadata struct { pod *v1.Pod podBestEffort bool podRequest *schedulercache.Resource - podPorts map[int]bool + podPorts map[string]bool //key is a pod full name with the anti-affinity rules. matchingAntiAffinityTerms map[string][]matchingPodAntiAffinityTerm serviceAffinityInUse bool @@ -172,7 +172,7 @@ func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata { podRequest: meta.podRequest, serviceAffinityInUse: meta.serviceAffinityInUse, } - newPredMeta.podPorts = map[int]bool{} + newPredMeta.podPorts = map[string]bool{} for k, v := range meta.podPorts { newPredMeta.podPorts[k] = v } diff --git a/plugin/pkg/scheduler/algorithm/predicates/metadata_test.go b/plugin/pkg/scheduler/algorithm/predicates/metadata_test.go index 640da1fcace..0a96f5a0c91 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/metadata_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/metadata_test.go @@ -373,7 +373,7 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) { Memory: 300, AllowedPodNumber: 4, }, - podPorts: map[int]bool{1234: true, 456: false}, + podPorts: map[string]bool{"1234": true, "456": false}, matchingAntiAffinityTerms: map[string][]matchingPodAntiAffinityTerm{ "term1": { { diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 6d54e32a8b5..9d75bf5c8f2 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -890,7 +890,7 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.Predi // PodFitsHostPorts checks if a node has free ports for the requested pod ports. func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { - var wantPorts map[int]bool + var wantPorts map[string]bool if predicateMeta, ok := meta.(*predicateMetadata); ok { wantPorts = predicateMeta.podPorts } else { @@ -902,11 +902,45 @@ func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s } existingPorts := nodeInfo.UsedPorts() - for wport := range wantPorts { - if wport != 0 && existingPorts[wport] { - return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil + + // try to see whether two hostPorts will conflict or not + for existingPort := range existingPorts { + existingHostPortInfo := decode(existingPort) + + if existingHostPortInfo.hostIP == "0.0.0.0" { + // loop through all the want hostPort to see if there exists a conflict + for wantPort := range wantPorts { + wantHostPortInfo := decode(wantPort) + + // if there already exists one hostPort whose hostIP is 0.0.0.0, then the other want hostport (which has the same protocol and port) will not fit + if wantHostPortInfo.hostPort == existingHostPortInfo.hostPort && wantHostPortInfo.protocol == existingHostPortInfo.protocol { + return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil + } + } } } + + for wantPort := range wantPorts { + wantHostPortInfo := decode(wantPort) + + if wantHostPortInfo.hostIP == "0.0.0.0" { + // loop through all the existing hostPort to see if there exists a conflict + for existingPort := range existingPorts { + existingHostPortInfo := decode(existingPort) + + // if there already exists one hostPort whose hostIP may be 127.0.0.1, then a hostPort (which wants 0.0.0.0 hostIP and has the same protocol and port) will not fit + if wantHostPortInfo.hostPort == existingHostPortInfo.hostPort && wantHostPortInfo.protocol == existingHostPortInfo.protocol { + return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil + } + } + } else { + // general check hostPort conflict procedure for hostIP is not 0.0.0.0 + if wantPort != "" && existingPorts[wantPort] { + return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil + } + } + } + return true, nil, nil } diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index bd02f92d939..ac50597b093 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -554,10 +554,17 @@ func TestPodFitsHost(t *testing.T) { } } -func newPod(host string, hostPorts ...int) *v1.Pod { +func newPod(host string, hostPortInfos ...string) *v1.Pod { networkPorts := []v1.ContainerPort{} - for _, port := range hostPorts { - networkPorts = append(networkPorts, v1.ContainerPort{HostPort: int32(port)}) + for _, portInfo := range hostPortInfos { + hostPortInfo := decode(portInfo) + hostPort, _ := strconv.Atoi(hostPortInfo.hostPort) + + networkPorts = append(networkPorts, v1.ContainerPort{ + HostIP: hostPortInfo.hostIP, + HostPort: int32(hostPort), + Protocol: v1.Protocol(hostPortInfo.protocol), + }) } return &v1.Pod{ Spec: v1.PodSpec{ @@ -585,32 +592,88 @@ func TestPodFitsHostPorts(t *testing.T) { test: "nothing running", }, { - pod: newPod("m1", 8080), + pod: newPod("m1", "UDP/127.0.0.1/8080"), nodeInfo: schedulercache.NewNodeInfo( - newPod("m1", 9090)), + newPod("m1", "UDP/127.0.0.1/9090")), fits: true, test: "other port", }, { - pod: newPod("m1", 8080), + pod: newPod("m1", "UDP/127.0.0.1/8080"), nodeInfo: schedulercache.NewNodeInfo( - newPod("m1", 8080)), + newPod("m1", "UDP/127.0.0.1/8080")), fits: false, - test: "same port", + test: "same udp port", }, { - pod: newPod("m1", 8000, 8080), + pod: newPod("m1", "TCP/127.0.0.1/8080"), nodeInfo: schedulercache.NewNodeInfo( - newPod("m1", 8080)), + newPod("m1", "TCP/127.0.0.1/8080")), fits: false, - test: "second port", + test: "same tcp port", }, { - pod: newPod("m1", 8000, 8080), + pod: newPod("m1", "TCP/127.0.0.1/8080"), nodeInfo: schedulercache.NewNodeInfo( - newPod("m1", 8001, 8080)), + newPod("m1", "TCP/127.0.0.2/8080")), + fits: true, + test: "different host ip", + }, + { + pod: newPod("m1", "UDP/127.0.0.1/8080"), + nodeInfo: schedulercache.NewNodeInfo( + newPod("m1", "TCP/127.0.0.1/8080")), + fits: true, + test: "different protocol", + }, + { + pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"), + nodeInfo: schedulercache.NewNodeInfo( + newPod("m1", "UDP/127.0.0.1/8080")), fits: false, - test: "second port", + test: "second udp port conflict", + }, + { + pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"), + nodeInfo: schedulercache.NewNodeInfo( + newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")), + fits: false, + test: "first tcp port conflict", + }, + { + pod: newPod("m1", "TCP/0.0.0.0/8001"), + nodeInfo: schedulercache.NewNodeInfo( + newPod("m1", "TCP/127.0.0.1/8001")), + fits: false, + test: "first tcp port conflict due to 0.0.0.0 hostIP", + }, + { + pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"), + nodeInfo: schedulercache.NewNodeInfo( + newPod("m1", "TCP/127.0.0.1/8001")), + fits: false, + test: "TCP hostPort conflict due to 0.0.0.0 hostIP", + }, + { + pod: newPod("m1", "TCP/127.0.0.1/8001"), + nodeInfo: schedulercache.NewNodeInfo( + newPod("m1", "TCP/0.0.0.0/8001")), + fits: false, + test: "second tcp port conflict to 0.0.0.0 hostIP", + }, + { + pod: newPod("m1", "UDP/127.0.0.1/8001"), + nodeInfo: schedulercache.NewNodeInfo( + newPod("m1", "TCP/0.0.0.0/8001")), + fits: true, + test: "second different protocol", + }, + { + pod: newPod("m1", "UDP/127.0.0.1/8001"), + nodeInfo: schedulercache.NewNodeInfo( + newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")), + fits: false, + test: "UDP hostPort conflict due to 0.0.0.0 hostIP", }, } expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts} @@ -631,29 +694,28 @@ func TestPodFitsHostPorts(t *testing.T) { func TestGetUsedPorts(t *testing.T) { tests := []struct { - pods []*v1.Pod - - ports map[int]bool + pods []*v1.Pod + ports map[string]bool }{ { []*v1.Pod{ - newPod("m1", 9090), + newPod("m1", "UDP/127.0.0.1/9090"), }, - map[int]bool{9090: true}, + map[string]bool{"UDP/127.0.0.1/9090": true}, }, { []*v1.Pod{ - newPod("m1", 9090), - newPod("m1", 9091), + newPod("m1", "UDP/127.0.0.1/9090"), + newPod("m1", "UDP/127.0.0.1/9091"), }, - map[int]bool{9090: true, 9091: true}, + map[string]bool{"UDP/127.0.0.1/9090": true, "UDP/127.0.0.1/9091": true}, }, { []*v1.Pod{ - newPod("m1", 9090), - newPod("m2", 9091), + newPod("m1", "TCP/0.0.0.0/9090"), + newPod("m2", "UDP/127.0.0.1/9091"), }, - map[int]bool{9090: true, 9091: true}, + map[string]bool{"TCP/0.0.0.0/9090": true, "UDP/127.0.0.1/9091": true}, }, } diff --git a/plugin/pkg/scheduler/algorithm/predicates/utils.go b/plugin/pkg/scheduler/algorithm/predicates/utils.go index 84d096d23aa..5e115575caa 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/utils.go +++ b/plugin/pkg/scheduler/algorithm/predicates/utils.go @@ -17,6 +17,8 @@ limitations under the License. package predicates import ( + "strings" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -89,3 +91,24 @@ func GetEquivalencePod(pod *v1.Pod) interface{} { type EquivalencePod struct { ControllerRef metav1.OwnerReference } + +type hostPortInfo struct { + protocol string + hostIP string + hostPort string +} + +// decode a string ("protocol/hostIP/hostPort") to *hostPortInfo object +func decode(info string) *hostPortInfo { + hostPortInfoSlice := strings.Split(info, "/") + + protocol := hostPortInfoSlice[0] + hostIP := hostPortInfoSlice[1] + hostPort := hostPortInfoSlice[2] + + return &hostPortInfo{ + protocol: protocol, + hostIP: hostIP, + hostPort: hostPort, + } +} diff --git a/plugin/pkg/scheduler/algorithm/predicates/utils_test.go b/plugin/pkg/scheduler/algorithm/predicates/utils_test.go index 305a27d1304..00e35a6cb7f 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/utils_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/utils_test.go @@ -18,6 +18,8 @@ package predicates import ( "fmt" + "reflect" + "testing" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -68,3 +70,46 @@ func ExampleFindLabelsInSet() { // label1=value1,label2=value2,label3=will_see_this // pod1,pod2, } + +func Test_decode(t *testing.T) { + tests := []struct { + name string + args string + want *hostPortInfo + }{ + { + name: "test1", + args: "UDP/127.0.0.1/80", + want: &hostPortInfo{ + protocol: "UDP", + hostIP: "127.0.0.1", + hostPort: "80", + }, + }, + { + name: "test2", + args: "TCP/127.0.0.1/80", + want: &hostPortInfo{ + protocol: "TCP", + hostIP: "127.0.0.1", + hostPort: "80", + }, + }, + { + name: "test3", + args: "TCP/0.0.0.0/80", + want: &hostPortInfo{ + protocol: "TCP", + hostIP: "0.0.0.0", + hostPort: "80", + }, + }, + } + + for _, tt := range tests { + if got := decode(tt.args); !reflect.DeepEqual(got, tt.want) { + t.Errorf("test name = %v, decode() = %v, want %v", tt.name, got, tt.want) + } + + } +} diff --git a/plugin/pkg/scheduler/schedulercache/cache_test.go b/plugin/pkg/scheduler/schedulercache/cache_test.go index c0ffc989ffa..cde40585c31 100644 --- a/plugin/pkg/scheduler/schedulercache/cache_test.go +++ b/plugin/pkg/scheduler/schedulercache/cache_test.go @@ -49,12 +49,12 @@ func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *No func TestAssumePodScheduled(t *testing.T) { nodeName := "node" testPods := []*v1.Pod{ - makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}), - makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}), - makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}), - makeBasePod(t, nodeName, "test-nonzero", "", "", "", []v1.ContainerPort{{HostPort: 80}}), - makeBasePod(t, nodeName, "test", "100m", "500", "oir-foo:3", []v1.ContainerPort{{HostPort: 80}}), - makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "oir-foo:5", []v1.ContainerPort{{HostPort: 8080}}), + makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test-nonzero", "", "", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test", "100m", "500", "oir-foo:3", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "oir-foo:5", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}), makeBasePod(t, nodeName, "test", "100m", "500", "random-invalid-oir-key:100", []v1.ContainerPort{{}}), } @@ -75,7 +75,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[0]}, - usedPorts: map[int]bool{80: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, }, }, { pods: []*v1.Pod{testPods[1], testPods[2]}, @@ -90,7 +90,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[1], testPods[2]}, - usedPorts: map[int]bool{80: true, 8080: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true, "TCP/127.0.0.1/8080": true}, }, }, { // test non-zero request pods: []*v1.Pod{testPods[3]}, @@ -105,7 +105,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[3]}, - usedPorts: map[int]bool{80: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, }, }, { pods: []*v1.Pod{testPods[4]}, @@ -121,7 +121,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[4]}, - usedPorts: map[int]bool{80: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, }, }, { pods: []*v1.Pod{testPods[4], testPods[5]}, @@ -137,7 +137,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[4], testPods[5]}, - usedPorts: map[int]bool{80: true, 8080: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true, "TCP/127.0.0.1/8080": true}, }, }, { pods: []*v1.Pod{testPods[6]}, @@ -152,7 +152,7 @@ func TestAssumePodScheduled(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[6]}, - usedPorts: map[int]bool{}, + usedPorts: map[string]bool{}, }, }, } @@ -195,8 +195,8 @@ func assumeAndFinishBinding(cache *schedulerCache, pod *v1.Pod, assumedTime time func TestExpirePod(t *testing.T) { nodeName := "node" testPods := []*v1.Pod{ - makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}), - makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}), + makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}), } now := time.Now() ttl := 10 * time.Second @@ -228,7 +228,7 @@ func TestExpirePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[1]}, - usedPorts: map[int]bool{8080: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": false, "TCP/127.0.0.1/8080": true}, }, }} @@ -255,8 +255,8 @@ func TestAddPodWillConfirm(t *testing.T) { ttl := 10 * time.Second testPods := []*v1.Pod{ - makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}), - makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}), + makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}), } tests := []struct { podsToAssume []*v1.Pod @@ -277,7 +277,7 @@ func TestAddPodWillConfirm(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[0]}, - usedPorts: map[int]bool{80: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true, "TCP/127.0.0.1/8080": false}, }, }} @@ -366,7 +366,7 @@ func TestAddPodWillReplaceAssumed(t *testing.T) { func TestAddPodAfterExpiration(t *testing.T) { nodeName := "node" ttl := 10 * time.Second - basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}) + basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) tests := []struct { pod *v1.Pod @@ -384,7 +384,7 @@ func TestAddPodAfterExpiration(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{basePod}, - usedPorts: map[int]bool{80: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, }, }} @@ -414,8 +414,8 @@ func TestUpdatePod(t *testing.T) { nodeName := "node" ttl := 10 * time.Second testPods := []*v1.Pod{ - makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}), - makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}), + makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}), } tests := []struct { podsToAssume []*v1.Pod @@ -437,7 +437,7 @@ func TestUpdatePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[1]}, - usedPorts: map[int]bool{8080: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/8080": true}, }, { requestedResource: &Resource{ MilliCPU: 100, @@ -449,7 +449,7 @@ func TestUpdatePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[0]}, - usedPorts: map[int]bool{80: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, }}, }} @@ -480,8 +480,8 @@ func TestExpireAddUpdatePod(t *testing.T) { nodeName := "node" ttl := 10 * time.Second testPods := []*v1.Pod{ - makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}), - makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}), + makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), + makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}), } tests := []struct { podsToAssume []*v1.Pod @@ -504,7 +504,7 @@ func TestExpireAddUpdatePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[1]}, - usedPorts: map[int]bool{8080: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/8080": true}, }, { requestedResource: &Resource{ MilliCPU: 100, @@ -516,7 +516,7 @@ func TestExpireAddUpdatePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{testPods[0]}, - usedPorts: map[int]bool{80: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, }}, }} @@ -553,7 +553,7 @@ func TestExpireAddUpdatePod(t *testing.T) { // TestRemovePod tests after added pod is removed, its information should also be subtracted. func TestRemovePod(t *testing.T) { nodeName := "node" - basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}) + basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) tests := []struct { pod *v1.Pod wNodeInfo *NodeInfo @@ -570,7 +570,7 @@ func TestRemovePod(t *testing.T) { }, allocatableResource: &Resource{}, pods: []*v1.Pod{basePod}, - usedPorts: map[int]bool{80: true}, + usedPorts: map[string]bool{"TCP/127.0.0.1/80": true}, }, }} @@ -595,7 +595,7 @@ func TestRemovePod(t *testing.T) { func TestForgetPod(t *testing.T) { nodeName := "node" - basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}) + basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) tests := []struct { pods []*v1.Pod }{{ diff --git a/plugin/pkg/scheduler/schedulercache/node_info.go b/plugin/pkg/scheduler/schedulercache/node_info.go index 6f323bcd262..37eefeb288d 100644 --- a/plugin/pkg/scheduler/schedulercache/node_info.go +++ b/plugin/pkg/scheduler/schedulercache/node_info.go @@ -38,7 +38,7 @@ type NodeInfo struct { pods []*v1.Pod podsWithAffinity []*v1.Pod - usedPorts map[int]bool + usedPorts map[string]bool // Total requested resource of all pods on this node. // It includes assumed pods which scheduler sends binding to apiserver but @@ -164,7 +164,7 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { nonzeroRequest: &Resource{}, allocatableResource: &Resource{}, generation: 0, - usedPorts: make(map[int]bool), + usedPorts: make(map[string]bool), } for _, pod := range pods { ni.AddPod(pod) @@ -188,7 +188,7 @@ func (n *NodeInfo) Pods() []*v1.Pod { return n.pods } -func (n *NodeInfo) UsedPorts() map[int]bool { +func (n *NodeInfo) UsedPorts() map[string]bool { if n == nil { return nil } @@ -269,7 +269,7 @@ func (n *NodeInfo) Clone() *NodeInfo { taintsErr: n.taintsErr, memoryPressureCondition: n.memoryPressureCondition, diskPressureCondition: n.diskPressureCondition, - usedPorts: make(map[int]bool), + usedPorts: make(map[string]bool), generation: n.generation, } if len(n.pods) > 0 { @@ -408,11 +408,26 @@ func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, used bool) { // "0" is explicitly ignored in PodFitsHostPorts, // which is the only function that uses this value. if podPort.HostPort != 0 { - if used { - n.usedPorts[int(podPort.HostPort)] = used - } else { - delete(n.usedPorts, int(podPort.HostPort)) + // user does not explicitly set protocol, default is tcp + portProtocol := podPort.Protocol + if podPort.Protocol == "" { + portProtocol = v1.ProtocolTCP } + + // user does not explicitly set hostIP, default is 0.0.0.0 + portHostIP := podPort.HostIP + if podPort.HostIP == "" { + portHostIP = "0.0.0.0" + } + + str := fmt.Sprintf("%s/%s/%d", portProtocol, portHostIP, podPort.HostPort) + + if used { + n.usedPorts[str] = used + } else { + delete(n.usedPorts, str) + } + } } } diff --git a/plugin/pkg/scheduler/util/utils.go b/plugin/pkg/scheduler/util/utils.go index 2cbe26f2e3a..c4193d626ff 100644 --- a/plugin/pkg/scheduler/util/utils.go +++ b/plugin/pkg/scheduler/util/utils.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "fmt" "sort" "k8s.io/api/core/v1" @@ -25,8 +26,8 @@ import ( // GetUsedPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair // will be in the result; but it does not resolve port conflict. -func GetUsedPorts(pods ...*v1.Pod) map[int]bool { - ports := make(map[int]bool) +func GetUsedPorts(pods ...*v1.Pod) map[string]bool { + ports := make(map[string]bool) for _, pod := range pods { for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] @@ -35,7 +36,20 @@ func GetUsedPorts(pods ...*v1.Pod) map[int]bool { // "0" is explicitly ignored in PodFitsHostPorts, // which is the only function that uses this value. if podPort.HostPort != 0 { - ports[int(podPort.HostPort)] = true + // user does not explicitly set protocol, default is tcp + portProtocol := podPort.Protocol + if podPort.Protocol == "" { + portProtocol = v1.ProtocolTCP + } + + // user does not explicitly set hostIP, default is 0.0.0.0 + portHostIP := podPort.HostIP + if podPort.HostIP == "" { + portHostIP = "0.0.0.0" + } + + str := fmt.Sprintf("%s/%s/%d", portProtocol, portHostIP, podPort.HostPort) + ports[str] = true } } }