Remove nodeName from predicate signature.

This commit is contained in:
Wojciech Tyczynski 2016-04-28 16:51:17 +02:00
parent 7e462c2310
commit a51f266ebf
8 changed files with 150 additions and 112 deletions

View File

@ -2339,7 +2339,7 @@ func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, str
}
nodeInfo := schedulercache.NewNodeInfo(otherPods...)
nodeInfo.SetNode(node)
fit, err := predicates.GeneralPredicates(pod, kl.nodeName, nodeInfo)
fit, err := predicates.GeneralPredicates(pod, nodeInfo)
if !fit {
if re, ok := err.(*predicates.PredicateFailureError); ok {
reason := re.PredicateName

View File

@ -101,7 +101,7 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
// - AWS EBS forbids any two pods mounting the same volume ID
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
// TODO: migrate this into some per-volume specific code?
func NoDiskConflict(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func NoDiskConflict(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
for _, v := range pod.Spec.Volumes {
for _, ev := range nodeInfo.Pods() {
if isVolumeConflict(v, ev) {
@ -177,7 +177,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []api.Volume, namespace
return nil
}
func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
newVolumes := make(map[string]bool)
if err := c.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
return false, err
@ -275,10 +275,10 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum
return c.predicate
}
func (c *VolumeZoneChecker) predicate(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func (c *VolumeZoneChecker) predicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found: %q", nodeName)
return false, fmt.Errorf("node not found")
}
nodeConstraints := make(map[string]string)
@ -335,7 +335,7 @@ func (c *VolumeZoneChecker) predicate(pod *api.Pod, nodeName string, nodeInfo *s
}
nodeV, _ := nodeConstraints[k]
if v != nodeV {
glog.V(2).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, nodeName, pvName, k)
glog.V(2).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
return false, ErrVolumeZoneConflict
}
}
@ -391,10 +391,10 @@ func podName(pod *api.Pod) string {
return pod.Namespace + "/" + pod.Name
}
func podFitsResourcesInternal(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PodFitsResources(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found: %q", nodeName)
return false, fmt.Errorf("node not found")
}
allocatable := node.Status.Allocatable
allowedPodNumber := allocatable.Pods().Value()
@ -419,14 +419,10 @@ func podFitsResourcesInternal(pod *api.Pod, nodeName string, nodeInfo *scheduler
newInsufficientResourceError(memoryResoureceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory)
}
glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.",
podName(pod), nodeName, len(nodeInfo.Pods()), allowedPodNumber)
podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber)
return true, nil
}
func PodFitsResources(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
return podFitsResourcesInternal(pod, nodeName, nodeInfo)
}
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
// terms are ORed, and an emtpy a list of terms will match nothing.
func nodeMatchesNodeSelectorTerms(node *api.Node, nodeSelectorTerms []api.NodeSelectorTerm) bool {
@ -496,10 +492,10 @@ func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
return nodeAffinityMatches
}
func PodSelectorMatches(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PodSelectorMatches(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found: %q", nodeName)
return false, fmt.Errorf("node not found")
}
if PodMatchesNodeLabels(pod, node) {
return true, nil
@ -507,11 +503,15 @@ func PodSelectorMatches(pod *api.Pod, nodeName string, nodeInfo *schedulercache.
return false, ErrNodeSelectorNotMatch
}
func PodFitsHost(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PodFitsHost(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
if len(pod.Spec.NodeName) == 0 {
return true, nil
}
if pod.Spec.NodeName == nodeName {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found")
}
if pod.Spec.NodeName == node.Name {
return true, nil
}
return false, ErrPodNotMatchHostName
@ -542,10 +542,10 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found: %q", nodeName)
return false, fmt.Errorf("node not found")
}
var exists bool
@ -585,7 +585,7 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
// - L is listed in the ServiceAffinity object that is passed into the function
// - the pod does not have any NodeSelector for L
// - some other pod from the same service is already scheduled onto a node that has value V for label L
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
var affinitySelector labels.Selector
// check if the pod being scheduled has the affinity labels specified in its NodeSelector
@ -645,9 +645,9 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeName string, no
affinitySelector = labels.Set(affinityLabels).AsSelector()
}
node, err := s.nodeInfo.GetNodeInfo(nodeName)
if err != nil {
return false, err
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found")
}
// check if the node matches the selector
@ -657,7 +657,7 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeName string, no
return false, ErrServiceAffinityViolated
}
func PodFitsHostPorts(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PodFitsHostPorts(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
wantPorts := getUsedPorts(pod)
if len(wantPorts) == 0 {
return true, nil
@ -703,21 +703,21 @@ func haveSame(a1, a2 []string) bool {
return false
}
func GeneralPredicates(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
fit, err := podFitsResourcesInternal(pod, nodeName, nodeInfo)
func GeneralPredicates(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
fit, err := PodFitsResources(pod, nodeInfo)
if !fit {
return fit, err
}
fit, err = PodFitsHost(pod, nodeName, nodeInfo)
fit, err = PodFitsHost(pod, nodeInfo)
if !fit {
return fit, err
}
fit, err = PodFitsHostPorts(pod, nodeName, nodeInfo)
fit, err = PodFitsHostPorts(pod, nodeInfo)
if !fit {
return fit, err
}
fit, err = PodSelectorMatches(pod, nodeName, nodeInfo)
fit, err = PodSelectorMatches(pod, nodeInfo)
if !fit {
return fit, err
}
@ -739,10 +739,10 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister, failu
return checker.InterPodAffinityMatches
}
func (checker *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node, err := checker.info.GetNodeInfo(nodeName)
if err != nil {
return false, err
func (checker *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found")
}
allPods, err := checker.podLister.List(labels.Everything())
if err != nil {

View File

@ -162,7 +162,7 @@ func TestPodFitsResources(t *testing.T) {
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)}}
test.nodeInfo.SetNode(&node)
fits, err := PodFitsResources(test.pod, "machine", test.nodeInfo)
fits, err := PodFitsResources(test.pod, test.nodeInfo)
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
}
@ -207,7 +207,7 @@ func TestPodFitsResources(t *testing.T) {
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1)}}
test.nodeInfo.SetNode(&node)
fits, err := PodFitsResources(test.pod, "machine", test.nodeInfo)
fits, err := PodFitsResources(test.pod, test.nodeInfo)
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
}
@ -220,13 +220,13 @@ func TestPodFitsResources(t *testing.T) {
func TestPodFitsHost(t *testing.T) {
tests := []struct {
pod *api.Pod
node string
node *api.Node
fits bool
test string
}{
{
pod: &api.Pod{},
node: "foo",
node: &api.Node{},
fits: true,
test: "no host specified",
},
@ -236,7 +236,11 @@ func TestPodFitsHost(t *testing.T) {
NodeName: "foo",
},
},
node: "foo",
node: &api.Node{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
},
fits: true,
test: "host matches",
},
@ -246,14 +250,20 @@ func TestPodFitsHost(t *testing.T) {
NodeName: "bar",
},
},
node: "foo",
node: &api.Node{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
},
fits: false,
test: "host doesn't match",
},
}
for _, test := range tests {
result, err := PodFitsHost(test.pod, test.node, schedulercache.NewNodeInfo())
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(test.node)
result, err := PodFitsHost(test.pod, nodeInfo)
if !reflect.DeepEqual(err, ErrPodNotMatchHostName) && err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -326,7 +336,7 @@ func TestPodFitsHostPorts(t *testing.T) {
},
}
for _, test := range tests {
fits, err := PodFitsHostPorts(test.pod, "machine", test.nodeInfo)
fits, err := PodFitsHostPorts(test.pod, test.nodeInfo)
if !reflect.DeepEqual(err, ErrPodNotFitsHostPorts) && err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -411,7 +421,7 @@ func TestDiskConflicts(t *testing.T) {
}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, "machine", test.nodeInfo)
ok, err := NoDiskConflict(test.pod, test.nodeInfo)
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -463,7 +473,7 @@ func TestAWSDiskConflicts(t *testing.T) {
}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, "machine", test.nodeInfo)
ok, err := NoDiskConflict(test.pod, test.nodeInfo)
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -521,7 +531,7 @@ func TestRBDDiskConflicts(t *testing.T) {
}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, "machine", test.nodeInfo)
ok, err := NoDiskConflict(test.pod, test.nodeInfo)
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -999,7 +1009,7 @@ func TestPodFitsSelector(t *testing.T) {
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(&node)
fits, err := PodSelectorMatches(test.pod, "machine", nodeInfo)
fits, err := PodSelectorMatches(test.pod, nodeInfo)
if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -1064,7 +1074,7 @@ func TestNodeLabelPresence(t *testing.T) {
nodeInfo.SetNode(&node)
labelChecker := NodeLabelChecker{test.labels, test.presence}
fits, err := labelChecker.CheckNodeLabelPresence(test.pod, "machine", nodeInfo)
fits, err := labelChecker.CheckNodeLabelPresence(test.pod, nodeInfo)
if !reflect.DeepEqual(err, ErrNodeLabelPresenceViolated) && err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -1104,28 +1114,28 @@ func TestServiceAffinity(t *testing.T) {
pod *api.Pod
pods []*api.Pod
services []api.Service
node string
node *api.Node
labels []string
fits bool
test string
}{
{
pod: new(api.Pod),
node: "machine1",
node: &node1,
fits: true,
labels: []string{"region"},
test: "nothing scheduled",
},
{
pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r1"}}},
node: "machine1",
node: &node1,
fits: true,
labels: []string{"region"},
test: "pod with region label match",
},
{
pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r2"}}},
node: "machine1",
node: &node1,
fits: false,
labels: []string{"region"},
test: "pod with region label mismatch",
@ -1133,7 +1143,7 @@ func TestServiceAffinity(t *testing.T) {
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine1",
node: &node1,
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: true,
labels: []string{"region"},
@ -1142,7 +1152,7 @@ func TestServiceAffinity(t *testing.T) {
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine1",
node: &node1,
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: true,
labels: []string{"region"},
@ -1151,7 +1161,7 @@ func TestServiceAffinity(t *testing.T) {
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine1",
node: &node1,
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: false,
labels: []string{"region"},
@ -1160,7 +1170,7 @@ func TestServiceAffinity(t *testing.T) {
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
node: "machine1",
node: &node1,
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns2"}}},
fits: true,
labels: []string{"region"},
@ -1169,7 +1179,7 @@ func TestServiceAffinity(t *testing.T) {
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}},
node: "machine1",
node: &node1,
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
fits: true,
labels: []string{"region"},
@ -1178,7 +1188,7 @@ func TestServiceAffinity(t *testing.T) {
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
node: "machine1",
node: &node1,
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
fits: false,
labels: []string{"region"},
@ -1187,7 +1197,7 @@ func TestServiceAffinity(t *testing.T) {
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine1",
node: &node1,
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: false,
labels: []string{"region", "zone"},
@ -1196,7 +1206,7 @@ func TestServiceAffinity(t *testing.T) {
{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
node: "machine4",
node: &node4,
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: true,
labels: []string{"region", "zone"},
@ -1207,7 +1217,9 @@ func TestServiceAffinity(t *testing.T) {
for _, test := range tests {
nodes := []api.Node{node1, node2, node3, node4, node5}
serviceAffinity := ServiceAffinity{algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, test.node, schedulercache.NewNodeInfo())
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(test.node)
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, nodeInfo)
if !reflect.DeepEqual(err, ErrServiceAffinityViolated) && err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -1430,7 +1442,7 @@ func TestEBSVolumeCountConflicts(t *testing.T) {
for _, test := range tests {
pred := NewMaxPDVolumeCountPredicate(filter, test.maxVols, pvInfo, pvcInfo)
fits, err := pred(test.newPod, "some-node", schedulercache.NewNodeInfo(test.existingPods...))
fits, err := pred(test.newPod, schedulercache.NewNodeInfo(test.existingPods...))
if err != nil && !reflect.DeepEqual(err, ErrMaxVolumeCountExceeded) {
t.Errorf("unexpected error: %v", err)
}
@ -1505,7 +1517,6 @@ func newPodWithPort(hostPorts ...int) *api.Pod {
func TestRunGeneralPredicates(t *testing.T) {
resourceTests := []struct {
pod *api.Pod
nodeName string
nodeInfo *schedulercache.NodeInfo
node *api.Node
fits bool
@ -1513,21 +1524,25 @@ func TestRunGeneralPredicates(t *testing.T) {
wErr error
}{
{
pod: &api.Pod{},
nodeName: "machine1",
pod: &api.Pod{},
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)}},
node: &api.Node{
ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)},
},
fits: true,
wErr: nil,
test: "no resources/port/host requested always fits",
},
{
pod: newResourcePod(resourceRequest{milliCPU: 8, memory: 10}),
nodeName: "machine1",
pod: newResourcePod(resourceRequest{milliCPU: 8, memory: 10}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)}},
node: &api.Node{
ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)},
},
fits: false,
wErr: newInsufficientResourceError("CPU", 8, 5, 10),
test: "not enough cpu resource",
@ -1538,26 +1553,30 @@ func TestRunGeneralPredicates(t *testing.T) {
NodeName: "machine2",
},
},
nodeName: "machine1",
nodeInfo: schedulercache.NewNodeInfo(),
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)}},
fits: false,
wErr: ErrPodNotMatchHostName,
test: "host not match",
node: &api.Node{
ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)},
},
fits: false,
wErr: ErrPodNotMatchHostName,
test: "host not match",
},
{
pod: newPodWithPort(123),
nodeName: "machine1",
nodeInfo: schedulercache.NewNodeInfo(newPodWithPort(123)),
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)}},
fits: false,
wErr: ErrPodNotFitsHostPorts,
test: "hostport conflict",
node: &api.Node{
ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)},
},
fits: false,
wErr: ErrPodNotFitsHostPorts,
test: "hostport conflict",
},
}
for _, test := range resourceTests {
test.nodeInfo.SetNode(test.node)
fits, err := GeneralPredicates(test.pod, test.nodeName, test.nodeInfo)
fits, err := GeneralPredicates(test.pod, test.nodeInfo)
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
}
@ -1578,13 +1597,13 @@ func TestInterPodAffinity(t *testing.T) {
tests := []struct {
pod *api.Pod
pods []*api.Pod
node api.Node
node *api.Node
fits bool
test string
}{
{
pod: new(api.Pod),
node: node1,
node: &node1,
fits: true,
test: "A pod that has no required pod affinity scheduling rules can schedule onto a node with no existing pods",
},
@ -1610,7 +1629,7 @@ func TestInterPodAffinity(t *testing.T) {
},
},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
node: node1,
node: &node1,
fits: true,
test: "satisfies with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod",
},
@ -1636,7 +1655,7 @@ func TestInterPodAffinity(t *testing.T) {
},
},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
node: node1,
node: &node1,
fits: true,
test: "satisfies the pod with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod",
},
@ -1662,7 +1681,7 @@ func TestInterPodAffinity(t *testing.T) {
},
},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel, Namespace: "ns"}}},
node: node1,
node: &node1,
fits: false,
test: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace",
},
@ -1687,7 +1706,7 @@ func TestInterPodAffinity(t *testing.T) {
},
},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
node: node1,
node: &node1,
fits: false,
test: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
},
@ -1730,7 +1749,7 @@ func TestInterPodAffinity(t *testing.T) {
},
},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
node: node1,
node: &node1,
fits: true,
test: "satisfies the PodAffinity with different label Operators in multiple RequiredDuringSchedulingIgnoredDuringExecution ",
},
@ -1773,7 +1792,7 @@ func TestInterPodAffinity(t *testing.T) {
},
},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
node: node1,
node: &node1,
fits: false,
test: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node becasue one of the matchExpression item don't match.",
},
@ -1811,7 +1830,7 @@ func TestInterPodAffinity(t *testing.T) {
},
},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
node: node1,
node: &node1,
fits: true,
test: "satisfies the PodAffinity and PodAntiAffinity with the existing pod",
},
@ -1855,7 +1874,7 @@ func TestInterPodAffinity(t *testing.T) {
// },
// },
// pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podlabel}}},
// node: node1,
// node: &node1,
// fits: true,
// test: "satisfies the PodAffinity with different Label Operators in multiple RequiredDuringSchedulingRequiredDuringExecution ",
//},
@ -1910,7 +1929,7 @@ func TestInterPodAffinity(t *testing.T) {
}}`,
}},
}},
node: node1,
node: &node1,
fits: true,
test: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod",
},
@ -1948,7 +1967,7 @@ func TestInterPodAffinity(t *testing.T) {
},
},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
node: node1,
node: &node1,
fits: false,
test: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod",
},
@ -2003,7 +2022,7 @@ func TestInterPodAffinity(t *testing.T) {
}}`,
}},
}},
node: node1,
node: &node1,
fits: false,
test: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod",
},
@ -2029,7 +2048,7 @@ func TestInterPodAffinity(t *testing.T) {
},
},
pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}},
node: node1,
node: &node1,
fits: false,
test: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
},
@ -2044,11 +2063,13 @@ func TestInterPodAffinity(t *testing.T) {
}
fit := PodAffinityChecker{
info: FakeNodeInfo(node),
info: FakeNodeInfo(*node),
podLister: algorithm.FakePodLister(test.pods),
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
}
fits, err := fit.InterPodAffinityMatches(test.pod, test.node.Name, schedulercache.NewNodeInfo(podsOnNode...))
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
nodeInfo.SetNode(test.node)
fits, err := fit.InterPodAffinityMatches(test.pod, nodeInfo)
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
t.Errorf("%s: unexpected error %v", test.test, err)
}
@ -2212,7 +2233,9 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
podLister: algorithm.FakePodLister(test.pods),
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
}
fits, err := testFit.InterPodAffinityMatches(test.pod, node.Name, schedulercache.NewNodeInfo(podsOnNode...))
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
nodeInfo.SetNode(&node)
fits, err := testFit.InterPodAffinityMatches(test.pod, nodeInfo)
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
t.Errorf("%s: unexpected error %v", test.test, err)
}
@ -2223,7 +2246,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
if affinity.NodeAffinity != nil {
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(&node)
fits2, err := PodSelectorMatches(test.pod, node.Name, nodeInfo)
fits2, err := PodSelectorMatches(test.pod, nodeInfo)
if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil {
t.Errorf("unexpected error: %v", err)
}

View File

@ -24,7 +24,7 @@ import (
// FitPredicate is a function that indicates if a pod fits into an existing node.
// The failure information is given by the error.
type FitPredicate func(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error)
type FitPredicate func(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error)
type PriorityFunction func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister NodeLister) (schedulerapi.HostPriorityList, error)

View File

@ -116,11 +116,11 @@ func TestCreateFromEmptyConfig(t *testing.T) {
factory.CreateFromConfig(policy)
}
func PredicateOne(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PredicateOne(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
return true, nil
}
func PredicateTwo(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PredicateTwo(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
return true, nil
}

View File

@ -144,7 +144,7 @@ func findNodesThatFit(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.No
checkNode := func(i int) {
nodeName := nodes.Items[i].Name
fits, failedPredicate, err := podFitsOnNode(pod, nodeName, nodeNameToInfo[nodeName], predicateFuncs)
fits, failedPredicate, err := podFitsOnNode(pod, nodeNameToInfo[nodeName], predicateFuncs)
predicateResultLock.Lock()
defer predicateResultLock.Unlock()
@ -179,9 +179,9 @@ func findNodesThatFit(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.No
}
// Checks whether node with a given name and NodeInfo satisfies all predicateFuncs.
func podFitsOnNode(pod *api.Pod, nodeName string, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate) (bool, string, error) {
func podFitsOnNode(pod *api.Pod, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate) (bool, string, error) {
for _, predicate := range predicateFuncs {
fit, err := predicate(pod, nodeName, info)
fit, err := predicate(pod, info)
if err != nil {
switch e := err.(type) {
case *predicates.InsufficientResourceError:

View File

@ -23,32 +23,37 @@ import (
"reflect"
"strconv"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
algorithmpredicates "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing"
)
func falsePredicate(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func falsePredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
return false, algorithmpredicates.ErrFakePredicate
}
func truePredicate(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func truePredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
return true, nil
}
func matchesPredicate(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
if pod.Name == nodeName {
func matchesPredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found")
}
if pod.Name == node.Name {
return true, nil
}
return false, algorithmpredicates.ErrFakePredicate
}
func hasNoPodsPredicate(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func hasNoPodsPredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
if len(nodeInfo.Pods()) == 0 {
return true, nil
}
@ -273,7 +278,14 @@ func TestGenericScheduler(t *testing.T) {
}
for _, test := range tests {
random := rand.New(rand.NewSource(0))
scheduler := NewGenericScheduler(schedulertesting.PodsToCache(test.pods), test.predicates, test.prioritizers, []algorithm.SchedulerExtender{}, random)
cache := schedulercache.New(time.Duration(0), wait.NeverStop)
for _, pod := range test.pods {
cache.AddPod(pod)
}
for _, name := range test.nodes {
cache.AddNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}})
}
scheduler := NewGenericScheduler(cache, test.predicates, test.prioritizers, []algorithm.SchedulerExtender{}, random)
machine, err := scheduler.Schedule(test.pod, algorithm.FakeNodeLister(makeNodeList(test.nodes)))
if test.expectsErr {
if err == nil {
@ -328,6 +340,9 @@ func TestFindFitSomeError(t *testing.T) {
"2": schedulercache.NewNodeInfo(),
"1": schedulercache.NewNodeInfo(pod),
}
for name := range nodeNameToInfo {
nodeNameToInfo[name].SetNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}})
}
_, predicateMap, err := findNodesThatFit(pod, nodeNameToInfo, predicates, makeNodeList(nodes), nil)
if err != nil && !reflect.DeepEqual(err, algorithmpredicates.ErrFakePredicate) {

View File

@ -35,14 +35,14 @@ var (
// It automatically starts a go routine that manages expiration of assumed pods.
// "ttl" is how long the assumed pod will get expired.
// "stop" is the channel that would close the background goroutine.
func New(ttl time.Duration, stop chan struct{}) Cache {
func New(ttl time.Duration, stop <-chan struct{}) Cache {
cache := newSchedulerCache(ttl, cleanAssumedPeriod, stop)
cache.run()
return cache
}
type schedulerCache struct {
stop chan struct{}
stop <-chan struct{}
ttl time.Duration
period time.Duration
@ -62,7 +62,7 @@ type podState struct {
deadline *time.Time
}
func newSchedulerCache(ttl, period time.Duration, stop chan struct{}) *schedulerCache {
func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedulerCache {
return &schedulerCache{
ttl: ttl,
period: period,