Merge pull request #14724 from HaiyangDING/PodFitsHostPorts

Replace PodFitsPorts with PodFitsHostPorts
This commit is contained in:
David Oppenheimer 2015-10-03 12:45:35 -07:00
commit b9cfab87e3
6 changed files with 15 additions and 7 deletions

View File

@ -41,7 +41,7 @@ The purpose of filtering the nodes is to filter out the nodes that do not meet c
- `NoDiskConflict`: Evaluate if a pod can fit due to the volumes it requests, and those that are already mounted.
- `PodFitsResources`: Check if the free resource (CPU and Memory) meets the requirement of the Pod. The free resource is measured by the capacity minus the sum of requests of all Pods on the node. To learn more about the resource QoS in Kubernetes, please check [QoS proposal](../proposals/resource-qos.md).
- `PodFitsPorts`: Check if any HostPort required by the Pod is already occupied on the node.
- `PodFitsHostPorts`: Check if any HostPort required by the Pod is already occupied on the node.
- `PodFitsHost`: Filter out all nodes except the one specified in the PodSpec's NodeName field.
- `PodSelectorMatches`: Check if the labels of the node match the labels specified in the Pod's `nodeSelector` field ([Here](../user-guide/node-selection/) is an example of how to use `nodeSelector` field).
- `CheckNodeLabelPresence`: Check if all the specified labels exist on a node or not, regardless of the value.

View File

@ -356,7 +356,7 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api
return affinitySelector.Matches(labels.Set(node.Labels)), nil
}
func PodFitsPorts(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
func PodFitsHostPorts(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
existingPorts := getUsedPorts(existingPods...)
wantPorts := getUsedPorts(pod)
for wport := range wantPorts {

View File

@ -244,7 +244,7 @@ func newPod(host string, hostPorts ...int) *api.Pod {
}
}
func TestPodFitsPorts(t *testing.T) {
func TestPodFitsHostPorts(t *testing.T) {
tests := []struct {
pod *api.Pod
existingPods []*api.Pod
@ -291,7 +291,7 @@ func TestPodFitsPorts(t *testing.T) {
},
}
for _, test := range tests {
fits, err := PodFitsPorts(test.pod, test.existingPods, "machine")
fits, err := PodFitsHostPorts(test.pod, test.existingPods, "machine")
if err != nil {
t.Errorf("unexpected error: %v", err)
}

View File

@ -69,11 +69,16 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
// Do not change this JSON after 1.1 is tagged. A failure indicates backwards compatibility with 1.1 was broken.
"1.1": {
JSON: `{
"priorities": [
"predicates": [
{"name": "PodFitsHostPorts"}
],"priorities": [
{"name": "SelectorSpreadPriority", "weight": 2}
]
}`,
ExpectedPolicy: schedulerapi.Policy{
Predicates: []schedulerapi.PredicatePolicy{
{Name: "PodFitsHostPorts"},
},
Priorities: []schedulerapi.PriorityPolicy{
{Name: "SelectorSpreadPriority", Weight: 2},
},

View File

@ -46,12 +46,15 @@ func init() {
Weight: 1,
},
)
// PodFitsPorts has been replaced by PodFitsHostPorts for better user understanding.
// For backwards compatibility with 1.0, PodFitsPorts is regitered as well.
factory.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsHostPorts)
}
func defaultPredicates() sets.String {
return sets.NewString(
// Fit is defined based on the absence of port conflicts.
factory.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsPorts),
factory.RegisterFitPredicate("PodFitsHostPorts", predicates.PodFitsHostPorts),
// Fit is determined by resource availability.
factory.RegisterFitPredicateFactory(
"PodFitsResources",

View File

@ -188,7 +188,7 @@ func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
// Create the scheduler config
algo := NewGenericScheduler(
map[string]algorithm.FitPredicate{"PodFitsPorts": predicates.PodFitsPorts},
map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts},
[]algorithm.PriorityConfig{},
modeler.PodLister(),
rand.New(rand.NewSource(time.Now().UnixNano())))