mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #28670 from wojtek-t/scheduler_metadata
Automatic merge from submit-queue Add meta field to predicate signature to avoid computing the same things multiple times This PR only uses it to avoid computing QOS of a pod for every node from scratch. Ref #28590
This commit is contained in:
commit
a261776f3e
@ -2232,7 +2232,7 @@ func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, str
|
|||||||
}
|
}
|
||||||
nodeInfo := schedulercache.NewNodeInfo(otherPods...)
|
nodeInfo := schedulercache.NewNodeInfo(otherPods...)
|
||||||
nodeInfo.SetNode(node)
|
nodeInfo.SetNode(node)
|
||||||
fit, err := predicates.GeneralPredicates(pod, nodeInfo)
|
fit, err := predicates.GeneralPredicates(pod, nil, nodeInfo)
|
||||||
if !fit {
|
if !fit {
|
||||||
if re, ok := err.(*predicates.PredicateFailureError); ok {
|
if re, ok := err.(*predicates.PredicateFailureError); ok {
|
||||||
reason := re.PredicateName
|
reason := re.PredicateName
|
||||||
|
@ -24,11 +24,27 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
|
go2idlparser "k8s.io/kubernetes/cmd/libs/go2idl/parser"
|
||||||
|
"k8s.io/kubernetes/cmd/libs/go2idl/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetPublicFunctions lists all public functions (not methods) from a golang source file.
|
// GetPublicFunctions lists all public functions (not methods) from a golang source file.
|
||||||
func GetPublicFunctions(filePath string) ([]string, error) {
|
func GetPublicFunctions(pkg, filePath string) ([]*types.Type, error) {
|
||||||
var functionNames []string
|
builder := go2idlparser.New()
|
||||||
|
data, err := ioutil.ReadFile(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := builder.AddFile(pkg, filePath, data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
universe, err := builder.FindTypes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var functions []*types.Type
|
||||||
|
|
||||||
// Create the AST by parsing src.
|
// Create the AST by parsing src.
|
||||||
fset := token.NewFileSet() // positions are relative to fset
|
fset := token.NewFileSet() // positions are relative to fset
|
||||||
@ -45,13 +61,13 @@ func GetPublicFunctions(filePath string) ([]string, error) {
|
|||||||
s = x.Name.Name
|
s = x.Name.Name
|
||||||
// It's a function (not method), and is public, record it.
|
// It's a function (not method), and is public, record it.
|
||||||
if x.Recv == nil && isPublic(s) {
|
if x.Recv == nil && isPublic(s) {
|
||||||
functionNames = append(functionNames, s)
|
functions = append(functions, universe[pkg].Function(x.Name.Name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
return functionNames, nil
|
return functions, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isPublic checks if a given string is a public function name.
|
// isPublic checks if a given string is a public function name.
|
||||||
|
@ -65,6 +65,22 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
|
|||||||
return node.(*api.Node), nil
|
return node.(*api.Node), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// podMetadata defines a type, that is an expected type that is passed
|
||||||
|
// as metadata for predicate functions
|
||||||
|
type predicateMetadata struct {
|
||||||
|
podBestEffort bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func PredicateMetadata(pod *api.Pod) interface{} {
|
||||||
|
if pod == nil {
|
||||||
|
// We cannot compute metadata, just return nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &predicateMetadata{
|
||||||
|
podBestEffort: isPodBestEffort(pod),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
|
func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
|
||||||
// fast path if there is no conflict checking targets.
|
// fast path if there is no conflict checking targets.
|
||||||
if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil {
|
if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil {
|
||||||
@ -106,7 +122,7 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
|
|||||||
// - AWS EBS forbids any two pods mounting the same volume ID
|
// - AWS EBS forbids any two pods mounting the same volume ID
|
||||||
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
|
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
|
||||||
// TODO: migrate this into some per-volume specific code?
|
// TODO: migrate this into some per-volume specific code?
|
||||||
func NoDiskConflict(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func NoDiskConflict(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
for _, v := range pod.Spec.Volumes {
|
for _, v := range pod.Spec.Volumes {
|
||||||
for _, ev := range nodeInfo.Pods() {
|
for _, ev := range nodeInfo.Pods() {
|
||||||
if isVolumeConflict(v, ev) {
|
if isVolumeConflict(v, ev) {
|
||||||
@ -203,7 +219,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []api.Volume, namespace
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||||
if len(pod.Spec.Volumes) == 0 {
|
if len(pod.Spec.Volumes) == 0 {
|
||||||
@ -307,7 +323,7 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum
|
|||||||
return c.predicate
|
return c.predicate
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *VolumeZoneChecker) predicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||||
if len(pod.Spec.Volumes) == 0 {
|
if len(pod.Spec.Volumes) == 0 {
|
||||||
@ -450,7 +466,7 @@ func podName(pod *api.Pod) string {
|
|||||||
return pod.Namespace + "/" + pod.Name
|
return pod.Namespace + "/" + pod.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodFitsResources(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, fmt.Errorf("node not found")
|
return false, fmt.Errorf("node not found")
|
||||||
@ -556,7 +572,7 @@ func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
|
|||||||
return nodeAffinityMatches
|
return nodeAffinityMatches
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodSelectorMatches(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func PodSelectorMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, fmt.Errorf("node not found")
|
return false, fmt.Errorf("node not found")
|
||||||
@ -567,7 +583,7 @@ func PodSelectorMatches(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool,
|
|||||||
return false, ErrNodeSelectorNotMatch
|
return false, ErrNodeSelectorNotMatch
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodFitsHost(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func PodFitsHost(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
if len(pod.Spec.NodeName) == 0 {
|
if len(pod.Spec.NodeName) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@ -606,7 +622,7 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat
|
|||||||
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
|
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
|
||||||
// A node may have a label with "retiring" as key and the date as the value
|
// A node may have a label with "retiring" as key and the date as the value
|
||||||
// and it may be desirable to avoid scheduling new pods on this node
|
// and it may be desirable to avoid scheduling new pods on this node
|
||||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, fmt.Errorf("node not found")
|
return false, fmt.Errorf("node not found")
|
||||||
@ -649,7 +665,7 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
|
|||||||
// - L is listed in the ServiceAffinity object that is passed into the function
|
// - L is listed in the ServiceAffinity object that is passed into the function
|
||||||
// - the pod does not have any NodeSelector for L
|
// - the pod does not have any NodeSelector for L
|
||||||
// - some other pod from the same service is already scheduled onto a node that has value V for label L
|
// - some other pod from the same service is already scheduled onto a node that has value V for label L
|
||||||
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
var affinitySelector labels.Selector
|
var affinitySelector labels.Selector
|
||||||
|
|
||||||
// check if the pod being scheduled has the affinity labels specified in its NodeSelector
|
// check if the pod being scheduled has the affinity labels specified in its NodeSelector
|
||||||
@ -721,7 +737,7 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeInfo *scheduler
|
|||||||
return false, ErrServiceAffinityViolated
|
return false, ErrServiceAffinityViolated
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodFitsHostPorts(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func PodFitsHostPorts(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
wantPorts := getUsedPorts(pod)
|
wantPorts := getUsedPorts(pod)
|
||||||
if len(wantPorts) == 0 {
|
if len(wantPorts) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -767,21 +783,21 @@ func haveSame(a1, a2 []string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func GeneralPredicates(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func GeneralPredicates(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
fit, err := PodFitsResources(pod, nodeInfo)
|
fit, err := PodFitsResources(pod, meta, nodeInfo)
|
||||||
if !fit {
|
if !fit {
|
||||||
return fit, err
|
return fit, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fit, err = PodFitsHost(pod, nodeInfo)
|
fit, err = PodFitsHost(pod, meta, nodeInfo)
|
||||||
if !fit {
|
if !fit {
|
||||||
return fit, err
|
return fit, err
|
||||||
}
|
}
|
||||||
fit, err = PodFitsHostPorts(pod, nodeInfo)
|
fit, err = PodFitsHostPorts(pod, meta, nodeInfo)
|
||||||
if !fit {
|
if !fit {
|
||||||
return fit, err
|
return fit, err
|
||||||
}
|
}
|
||||||
fit, err = PodSelectorMatches(pod, nodeInfo)
|
fit, err = PodSelectorMatches(pod, meta, nodeInfo)
|
||||||
if !fit {
|
if !fit {
|
||||||
return fit, err
|
return fit, err
|
||||||
}
|
}
|
||||||
@ -803,7 +819,7 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister, failu
|
|||||||
return checker.InterPodAffinityMatches
|
return checker.InterPodAffinityMatches
|
||||||
}
|
}
|
||||||
|
|
||||||
func (checker *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func (checker *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, fmt.Errorf("node not found")
|
return false, fmt.Errorf("node not found")
|
||||||
@ -994,7 +1010,7 @@ func NewTolerationMatchPredicate(info NodeInfo) algorithm.FitPredicate {
|
|||||||
return tolerationMatch.PodToleratesNodeTaints
|
return tolerationMatch.PodToleratesNodeTaints
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TolerationMatch) PodToleratesNodeTaints(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func (t *TolerationMatch) PodToleratesNodeTaints(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
|
|
||||||
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
|
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
|
||||||
@ -1045,14 +1061,24 @@ func isPodBestEffort(pod *api.Pod) bool {
|
|||||||
|
|
||||||
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
|
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
|
||||||
// reporting memory pressure condition.
|
// reporting memory pressure condition.
|
||||||
func CheckNodeMemoryPressurePredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, fmt.Errorf("node not found")
|
return false, fmt.Errorf("node not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var podBestEffort bool
|
||||||
|
|
||||||
|
predicateMeta, ok := meta.(*predicateMetadata)
|
||||||
|
if ok {
|
||||||
|
podBestEffort = predicateMeta.podBestEffort
|
||||||
|
} else {
|
||||||
|
// We couldn't parse metadata - fallback to computing it.
|
||||||
|
podBestEffort = isPodBestEffort(pod)
|
||||||
|
}
|
||||||
|
|
||||||
// pod is not BestEffort pod
|
// pod is not BestEffort pod
|
||||||
if !isPodBestEffort(pod) {
|
if !podBestEffort {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,10 +19,13 @@ package predicates
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/cmd/libs/go2idl/parser"
|
||||||
|
"k8s.io/kubernetes/cmd/libs/go2idl/types"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/util/codeinspector"
|
"k8s.io/kubernetes/pkg/util/codeinspector"
|
||||||
@ -226,7 +229,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)}}
|
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)}}
|
||||||
test.nodeInfo.SetNode(&node)
|
test.nodeInfo.SetNode(&node)
|
||||||
|
|
||||||
fits, err := PodFitsResources(test.pod, test.nodeInfo)
|
fits, err := PodFitsResources(test.pod, PredicateMetadata(test.pod), test.nodeInfo)
|
||||||
if !reflect.DeepEqual(err, test.wErr) {
|
if !reflect.DeepEqual(err, test.wErr) {
|
||||||
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
|
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
|
||||||
}
|
}
|
||||||
@ -279,7 +282,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}}
|
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}}
|
||||||
test.nodeInfo.SetNode(&node)
|
test.nodeInfo.SetNode(&node)
|
||||||
|
|
||||||
fits, err := PodFitsResources(test.pod, test.nodeInfo)
|
fits, err := PodFitsResources(test.pod, PredicateMetadata(test.pod), test.nodeInfo)
|
||||||
if !reflect.DeepEqual(err, test.wErr) {
|
if !reflect.DeepEqual(err, test.wErr) {
|
||||||
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
|
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
|
||||||
}
|
}
|
||||||
@ -335,7 +338,7 @@ func TestPodFitsHost(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeInfo := schedulercache.NewNodeInfo()
|
nodeInfo := schedulercache.NewNodeInfo()
|
||||||
nodeInfo.SetNode(test.node)
|
nodeInfo.SetNode(test.node)
|
||||||
result, err := PodFitsHost(test.pod, nodeInfo)
|
result, err := PodFitsHost(test.pod, PredicateMetadata(test.pod), nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrPodNotMatchHostName) && err != nil {
|
if !reflect.DeepEqual(err, ErrPodNotMatchHostName) && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -408,7 +411,7 @@ func TestPodFitsHostPorts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
fits, err := PodFitsHostPorts(test.pod, test.nodeInfo)
|
fits, err := PodFitsHostPorts(test.pod, PredicateMetadata(test.pod), test.nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrPodNotFitsHostPorts) && err != nil {
|
if !reflect.DeepEqual(err, ErrPodNotFitsHostPorts) && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -493,7 +496,7 @@ func TestDiskConflicts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
ok, err := NoDiskConflict(test.pod, test.nodeInfo)
|
ok, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod), test.nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
|
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -545,7 +548,7 @@ func TestAWSDiskConflicts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
ok, err := NoDiskConflict(test.pod, test.nodeInfo)
|
ok, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod), test.nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
|
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -603,7 +606,7 @@ func TestRBDDiskConflicts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
ok, err := NoDiskConflict(test.pod, test.nodeInfo)
|
ok, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod), test.nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
|
if !reflect.DeepEqual(err, ErrDiskConflict) && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -1082,7 +1085,7 @@ func TestPodFitsSelector(t *testing.T) {
|
|||||||
nodeInfo := schedulercache.NewNodeInfo()
|
nodeInfo := schedulercache.NewNodeInfo()
|
||||||
nodeInfo.SetNode(&node)
|
nodeInfo.SetNode(&node)
|
||||||
|
|
||||||
fits, err := PodSelectorMatches(test.pod, nodeInfo)
|
fits, err := PodSelectorMatches(test.pod, PredicateMetadata(test.pod), nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil {
|
if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -1147,7 +1150,7 @@ func TestNodeLabelPresence(t *testing.T) {
|
|||||||
nodeInfo.SetNode(&node)
|
nodeInfo.SetNode(&node)
|
||||||
|
|
||||||
labelChecker := NodeLabelChecker{test.labels, test.presence}
|
labelChecker := NodeLabelChecker{test.labels, test.presence}
|
||||||
fits, err := labelChecker.CheckNodeLabelPresence(test.pod, nodeInfo)
|
fits, err := labelChecker.CheckNodeLabelPresence(test.pod, PredicateMetadata(test.pod), nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrNodeLabelPresenceViolated) && err != nil {
|
if !reflect.DeepEqual(err, ErrNodeLabelPresenceViolated) && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -1292,7 +1295,7 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
serviceAffinity := ServiceAffinity{algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
|
serviceAffinity := ServiceAffinity{algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
|
||||||
nodeInfo := schedulercache.NewNodeInfo()
|
nodeInfo := schedulercache.NewNodeInfo()
|
||||||
nodeInfo.SetNode(test.node)
|
nodeInfo.SetNode(test.node)
|
||||||
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, nodeInfo)
|
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, PredicateMetadata(test.pod), nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrServiceAffinityViolated) && err != nil {
|
if !reflect.DeepEqual(err, ErrServiceAffinityViolated) && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -1573,7 +1576,7 @@ func TestEBSVolumeCountConflicts(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
pred := NewMaxPDVolumeCountPredicate(filter, test.maxVols, pvInfo, pvcInfo)
|
pred := NewMaxPDVolumeCountPredicate(filter, test.maxVols, pvInfo, pvcInfo)
|
||||||
fits, err := pred(test.newPod, schedulercache.NewNodeInfo(test.existingPods...))
|
fits, err := pred(test.newPod, PredicateMetadata(test.newPod), schedulercache.NewNodeInfo(test.existingPods...))
|
||||||
if err != nil && !reflect.DeepEqual(err, ErrMaxVolumeCountExceeded) {
|
if err != nil && !reflect.DeepEqual(err, ErrMaxVolumeCountExceeded) {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -1584,8 +1587,26 @@ func TestEBSVolumeCountConflicts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getPredicateSignature() (*types.Signature, error) {
|
||||||
|
filePath := "./../types.go"
|
||||||
|
pkgName := filepath.Dir(filePath)
|
||||||
|
builder := parser.New()
|
||||||
|
if err := builder.AddDir(pkgName); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
universe, err := builder.FindTypes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result, ok := universe[pkgName].Types["FitPredicate"]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("FitPredicate type not defined")
|
||||||
|
}
|
||||||
|
return result.Signature, nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestPredicatesRegistered(t *testing.T) {
|
func TestPredicatesRegistered(t *testing.T) {
|
||||||
var functionNames []string
|
var functions []*types.Type
|
||||||
|
|
||||||
// Files and directories which predicates may be referenced
|
// Files and directories which predicates may be referenced
|
||||||
targetFiles := []string{
|
targetFiles := []string{
|
||||||
@ -1603,27 +1624,42 @@ func TestPredicatesRegistered(t *testing.T) {
|
|||||||
|
|
||||||
// Get all public predicates in files.
|
// Get all public predicates in files.
|
||||||
for _, filePath := range files {
|
for _, filePath := range files {
|
||||||
functions, err := codeinspector.GetPublicFunctions(filePath)
|
fileFunctions, err := codeinspector.GetPublicFunctions("k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", filePath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
functionNames = append(functionNames, functions...)
|
functions = append(functions, fileFunctions...)
|
||||||
} else {
|
} else {
|
||||||
t.Errorf("unexpected error when parsing %s", filePath)
|
t.Errorf("unexpected error when parsing %s", filePath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
predSignature, err := getPredicateSignature()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Couldn't get predicates signature")
|
||||||
|
}
|
||||||
|
|
||||||
// Check if all public predicates are referenced in target files.
|
// Check if all public predicates are referenced in target files.
|
||||||
for _, functionName := range functionNames {
|
for _, function := range functions {
|
||||||
args := []string{"-rl", functionName}
|
// Ignore functions that doesn't match FitPredicate signature.
|
||||||
|
signature := function.Underlying.Signature
|
||||||
|
if len(predSignature.Parameters) != len(signature.Parameters) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(predSignature.Results) != len(signature.Results) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// TODO: Check exact types of parameters and results.
|
||||||
|
|
||||||
|
args := []string{"-rl", function.Name.Name}
|
||||||
args = append(args, targetFiles...)
|
args = append(args, targetFiles...)
|
||||||
|
|
||||||
err := exec.Command("grep", args...).Run()
|
err := exec.Command("grep", args...).Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err.Error() {
|
switch err.Error() {
|
||||||
case "exit status 2":
|
case "exit status 2":
|
||||||
t.Errorf("unexpected error when checking %s", functionName)
|
t.Errorf("unexpected error when checking %s", function.Name)
|
||||||
case "exit status 1":
|
case "exit status 1":
|
||||||
t.Errorf("predicate %s is implemented as public but seems not registered or used in any other place",
|
t.Errorf("predicate %s is implemented as public but seems not registered or used in any other place",
|
||||||
functionName)
|
function.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1734,7 +1770,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, test := range resourceTests {
|
for _, test := range resourceTests {
|
||||||
test.nodeInfo.SetNode(test.node)
|
test.nodeInfo.SetNode(test.node)
|
||||||
fits, err := GeneralPredicates(test.pod, test.nodeInfo)
|
fits, err := GeneralPredicates(test.pod, PredicateMetadata(test.pod), test.nodeInfo)
|
||||||
if !reflect.DeepEqual(err, test.wErr) {
|
if !reflect.DeepEqual(err, test.wErr) {
|
||||||
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
|
t.Errorf("%s: unexpected error: %v, want: %v", test.test, err, test.wErr)
|
||||||
}
|
}
|
||||||
@ -2227,7 +2263,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
|
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
|
||||||
nodeInfo.SetNode(test.node)
|
nodeInfo.SetNode(test.node)
|
||||||
fits, err := fit.InterPodAffinityMatches(test.pod, nodeInfo)
|
fits, err := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod), nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
|
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
|
||||||
t.Errorf("%s: unexpected error %v", test.test, err)
|
t.Errorf("%s: unexpected error %v", test.test, err)
|
||||||
}
|
}
|
||||||
@ -2393,7 +2429,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
|
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...)
|
||||||
nodeInfo.SetNode(&node)
|
nodeInfo.SetNode(&node)
|
||||||
fits, err := testFit.InterPodAffinityMatches(test.pod, nodeInfo)
|
fits, err := testFit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod), nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
|
if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil {
|
||||||
t.Errorf("%s: unexpected error %v", test.test, err)
|
t.Errorf("%s: unexpected error %v", test.test, err)
|
||||||
}
|
}
|
||||||
@ -2404,7 +2440,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
if affinity.NodeAffinity != nil {
|
if affinity.NodeAffinity != nil {
|
||||||
nodeInfo := schedulercache.NewNodeInfo()
|
nodeInfo := schedulercache.NewNodeInfo()
|
||||||
nodeInfo.SetNode(&node)
|
nodeInfo.SetNode(&node)
|
||||||
fits2, err := PodSelectorMatches(test.pod, nodeInfo)
|
fits2, err := PodSelectorMatches(test.pod, PredicateMetadata(test.pod), nodeInfo)
|
||||||
if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil {
|
if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -2691,7 +2727,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||||||
tolerationMatch := TolerationMatch{FakeNodeInfo(test.node)}
|
tolerationMatch := TolerationMatch{FakeNodeInfo(test.node)}
|
||||||
nodeInfo := schedulercache.NewNodeInfo()
|
nodeInfo := schedulercache.NewNodeInfo()
|
||||||
nodeInfo.SetNode(&test.node)
|
nodeInfo.SetNode(&test.node)
|
||||||
fits, err := tolerationMatch.PodToleratesNodeTaints(test.pod, nodeInfo)
|
fits, err := tolerationMatch.PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod), nodeInfo)
|
||||||
if fits == false && !reflect.DeepEqual(err, ErrTaintsTolerationsNotMatch) {
|
if fits == false && !reflect.DeepEqual(err, ErrTaintsTolerationsNotMatch) {
|
||||||
t.Errorf("%s, unexpected error: %v", test.test, err)
|
t.Errorf("%s, unexpected error: %v", test.test, err)
|
||||||
}
|
}
|
||||||
@ -2797,7 +2833,7 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
fits, err := CheckNodeMemoryPressurePredicate(test.pod, test.nodeInfo)
|
fits, err := CheckNodeMemoryPressurePredicate(test.pod, PredicateMetadata(test.pod), test.nodeInfo)
|
||||||
if fits != test.fits {
|
if fits != test.fits {
|
||||||
t.Errorf("%s: expected %v got %v", test.name, test.fits, fits)
|
t.Errorf("%s: expected %v got %v", test.name, test.fits, fits)
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,8 @@ func calculateResourceOccupancy(pod *api.Pod, node *api.Node, nodeInfo *schedule
|
|||||||
|
|
||||||
// Add the resources requested by the current pod being scheduled.
|
// Add the resources requested by the current pod being scheduled.
|
||||||
// This also helps differentiate between differently sized, but empty, nodes.
|
// This also helps differentiate between differently sized, but empty, nodes.
|
||||||
for _, container := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
|
container := &pod.Spec.Containers[i]
|
||||||
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||||
totalMilliCPU += cpu
|
totalMilliCPU += cpu
|
||||||
totalMemory += memory
|
totalMemory += memory
|
||||||
@ -156,11 +157,11 @@ func ImageLocalityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercac
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, container := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
for i := range nodes.Items {
|
for j := range nodes.Items {
|
||||||
node := &nodes.Items[i]
|
node := &nodes.Items[j]
|
||||||
// Check if this container's image is present and get its size.
|
// Check if this container's image is present and get its size.
|
||||||
imageSize := checkContainerImageOnNode(node, container)
|
imageSize := checkContainerImageOnNode(node, &pod.Spec.Containers[i])
|
||||||
// Add this size to the total result of this node.
|
// Add this size to the total result of this node.
|
||||||
sumSizeMap[node.Name] += imageSize
|
sumSizeMap[node.Name] += imageSize
|
||||||
}
|
}
|
||||||
@ -177,7 +178,7 @@ func ImageLocalityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercac
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkContainerImageOnNode checks if a container image is present on a node and returns its size.
|
// checkContainerImageOnNode checks if a container image is present on a node and returns its size.
|
||||||
func checkContainerImageOnNode(node *api.Node, container api.Container) int64 {
|
func checkContainerImageOnNode(node *api.Node, container *api.Container) int64 {
|
||||||
for _, image := range node.Status.Images {
|
for _, image := range node.Status.Images {
|
||||||
for _, name := range image.Names {
|
for _, name := range image.Names {
|
||||||
if container.Image == name {
|
if container.Image == name {
|
||||||
@ -236,7 +237,8 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node *api.Node, nodeInfo
|
|||||||
score := int(0)
|
score := int(0)
|
||||||
// Add the resources requested by the current pod being scheduled.
|
// Add the resources requested by the current pod being scheduled.
|
||||||
// This also helps differentiate between differently sized, but empty, nodes.
|
// This also helps differentiate between differently sized, but empty, nodes.
|
||||||
for _, container := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
|
container := &pod.Spec.Containers[i]
|
||||||
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||||
totalMilliCPU += cpu
|
totalMilliCPU += cpu
|
||||||
totalMemory += memory
|
totalMemory += memory
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/cmd/libs/go2idl/types"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
@ -887,7 +888,7 @@ func makeImageNode(node string, status api.NodeStatus) api.Node {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPrioritiesRegistered(t *testing.T) {
|
func TestPrioritiesRegistered(t *testing.T) {
|
||||||
var functionNames []string
|
var functions []*types.Type
|
||||||
|
|
||||||
// Files and directories which priorities may be referenced
|
// Files and directories which priorities may be referenced
|
||||||
targetFiles := []string{
|
targetFiles := []string{
|
||||||
@ -904,27 +905,27 @@ func TestPrioritiesRegistered(t *testing.T) {
|
|||||||
|
|
||||||
// Get all public priorities in files.
|
// Get all public priorities in files.
|
||||||
for _, filePath := range files {
|
for _, filePath := range files {
|
||||||
functions, err := codeinspector.GetPublicFunctions(filePath)
|
fileFunctions, err := codeinspector.GetPublicFunctions("k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", filePath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
functionNames = append(functionNames, functions...)
|
functions = append(functions, fileFunctions...)
|
||||||
} else {
|
} else {
|
||||||
t.Errorf("unexpected error when parsing %s", filePath)
|
t.Errorf("unexpected error when parsing %s", filePath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if all public priorities are referenced in target files.
|
// Check if all public priorities are referenced in target files.
|
||||||
for _, functionName := range functionNames {
|
for _, function := range functions {
|
||||||
args := []string{"-rl", functionName}
|
args := []string{"-rl", function.Name.Name}
|
||||||
args = append(args, targetFiles...)
|
args = append(args, targetFiles...)
|
||||||
|
|
||||||
err := exec.Command("grep", args...).Run()
|
err := exec.Command("grep", args...).Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err.Error() {
|
switch err.Error() {
|
||||||
case "exit status 2":
|
case "exit status 2":
|
||||||
t.Errorf("unexpected error when checking %s", functionName)
|
t.Errorf("unexpected error when checking %s", function.Name)
|
||||||
case "exit status 1":
|
case "exit status 1":
|
||||||
t.Errorf("priority %s is implemented as public but seems not registered or used in any other place",
|
t.Errorf("priority %s is implemented as public but seems not registered or used in any other place",
|
||||||
functionName)
|
function.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
|
|
||||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||||
// The failure information is given by the error.
|
// The failure information is given by the error.
|
||||||
type FitPredicate func(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error)
|
type FitPredicate func(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error)
|
||||||
|
|
||||||
type PriorityFunction func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister NodeLister) (schedulerapi.HostPriorityList, error)
|
type PriorityFunction func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister NodeLister) (schedulerapi.HostPriorityList, error)
|
||||||
|
|
||||||
|
@ -116,11 +116,11 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
|||||||
factory.CreateFromConfig(policy)
|
factory.CreateFromConfig(policy)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PredicateOne(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func PredicateOne(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PredicateTwo(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func PredicateTwo(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,9 +146,10 @@ func findNodesThatFit(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.No
|
|||||||
} else {
|
} else {
|
||||||
predicateResultLock := sync.Mutex{}
|
predicateResultLock := sync.Mutex{}
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
|
meta := predicates.PredicateMetadata(pod)
|
||||||
checkNode := func(i int) {
|
checkNode := func(i int) {
|
||||||
nodeName := nodes.Items[i].Name
|
nodeName := nodes.Items[i].Name
|
||||||
fits, failedPredicate, err := podFitsOnNode(pod, nodeNameToInfo[nodeName], predicateFuncs)
|
fits, failedPredicate, err := podFitsOnNode(pod, meta, nodeNameToInfo[nodeName], predicateFuncs)
|
||||||
|
|
||||||
predicateResultLock.Lock()
|
predicateResultLock.Lock()
|
||||||
defer predicateResultLock.Unlock()
|
defer predicateResultLock.Unlock()
|
||||||
@ -184,9 +185,9 @@ func findNodesThatFit(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.No
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Checks whether node with a given name and NodeInfo satisfies all predicateFuncs.
|
// Checks whether node with a given name and NodeInfo satisfies all predicateFuncs.
|
||||||
func podFitsOnNode(pod *api.Pod, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate) (bool, string, error) {
|
func podFitsOnNode(pod *api.Pod, meta interface{}, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate) (bool, string, error) {
|
||||||
for _, predicate := range predicateFuncs {
|
for _, predicate := range predicateFuncs {
|
||||||
fit, err := predicate(pod, info)
|
fit, err := predicate(pod, meta, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case *predicates.InsufficientResourceError:
|
case *predicates.InsufficientResourceError:
|
||||||
|
@ -33,15 +33,15 @@ import (
|
|||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
func falsePredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func falsePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
return false, algorithmpredicates.ErrFakePredicate
|
return false, algorithmpredicates.ErrFakePredicate
|
||||||
}
|
}
|
||||||
|
|
||||||
func truePredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func truePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func matchesPredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func matchesPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, fmt.Errorf("node not found")
|
return false, fmt.Errorf("node not found")
|
||||||
@ -52,7 +52,7 @@ func matchesPredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, er
|
|||||||
return false, algorithmpredicates.ErrFakePredicate
|
return false, algorithmpredicates.ErrFakePredicate
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasNoPodsPredicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
func hasNoPodsPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||||
if len(nodeInfo.Pods()) == 0 {
|
if len(nodeInfo.Pods()) == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user