mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 15:05:20 +00:00
Merge pull request #71978 from denkensk/move-predicate-types
Move predicate types from algorithm to predicates
This commit is contained in:
@@ -59,7 +59,6 @@ go_test(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
@@ -36,7 +35,7 @@ type CSIMaxVolumeLimitChecker struct {
|
||||
|
||||
// NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes
|
||||
func NewCSIMaxVolumeLimitPredicate(
|
||||
pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate {
|
||||
pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) FitPredicate {
|
||||
c := &CSIMaxVolumeLimitChecker{
|
||||
pvInfo: pvInfo,
|
||||
pvcInfo: pvcInfo,
|
||||
@@ -45,7 +44,7 @@ func NewCSIMaxVolumeLimitPredicate(
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
||||
pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
|
||||
// if feature gate is disable we return
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
@@ -101,7 +100,7 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
||||
if ok {
|
||||
currentVolumeCount := attachedVolumeCount[volumeLimitKey]
|
||||
if currentVolumeCount+count > int(maxVolumeLimit) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
return false, []PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
@@ -105,12 +104,12 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
}
|
||||
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo("csi-ebs", "csi-ebs"), getFakeCSIPVCInfo("csi-ebs"))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
|
||||
@@ -134,6 +134,11 @@ func (e *PredicateFailureError) GetReason() string {
|
||||
return e.PredicateDesc
|
||||
}
|
||||
|
||||
// PredicateFailureReason interface represents the failure reason of a predicate.
|
||||
type PredicateFailureReason interface {
|
||||
GetReason() string
|
||||
}
|
||||
|
||||
// FailureReason describes a failure reason.
|
||||
type FailureReason struct {
|
||||
reason string
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
@@ -742,13 +741,13 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
|
||||
// running attachable predicate tests without feature gate and no limit present on nodes
|
||||
for _, test := range tests {
|
||||
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulernodeinfo.NewNodeInfo(test.existingPods...))
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), schedulernodeinfo.NewNodeInfo(test.existingPods...))
|
||||
if err != nil {
|
||||
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
@@ -766,7 +765,7 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
|
||||
@@ -34,6 +34,16 @@ import (
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
// PredicateMetadata interface represents anything that can access a predicate metadata.
|
||||
type PredicateMetadata interface {
|
||||
ShallowCopy() PredicateMetadata
|
||||
AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error
|
||||
RemovePod(deletedPod *v1.Pod) error
|
||||
}
|
||||
|
||||
// PredicateMetadataProducer is a function that computes predicate metadata for a given pod.
|
||||
type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata
|
||||
|
||||
// PredicateMetadataFactory defines a factory of predicate metadata.
|
||||
type PredicateMetadataFactory struct {
|
||||
podLister algorithm.PodLister
|
||||
@@ -91,21 +101,27 @@ type predicateMetadata struct {
|
||||
}
|
||||
|
||||
// Ensure that predicateMetadata implements algorithm.PredicateMetadata.
|
||||
var _ algorithm.PredicateMetadata = &predicateMetadata{}
|
||||
var _ PredicateMetadata = &predicateMetadata{}
|
||||
|
||||
// PredicateMetadataProducer function produces predicate metadata.
|
||||
type PredicateMetadataProducer func(pm *predicateMetadata)
|
||||
// predicateMetadataProducer function produces predicate metadata. It is stored in a global variable below
|
||||
// and used to modify the return values of PredicateMetadataProducer
|
||||
type predicateMetadataProducer func(pm *predicateMetadata)
|
||||
|
||||
var predicateMetaProducerRegisterLock sync.Mutex
|
||||
var predicateMetadataProducers = make(map[string]PredicateMetadataProducer)
|
||||
var predicateMetadataProducers = make(map[string]predicateMetadataProducer)
|
||||
|
||||
// RegisterPredicateMetadataProducer registers a PredicateMetadataProducer.
|
||||
func RegisterPredicateMetadataProducer(predicateName string, precomp PredicateMetadataProducer) {
|
||||
func RegisterPredicateMetadataProducer(predicateName string, precomp predicateMetadataProducer) {
|
||||
predicateMetaProducerRegisterLock.Lock()
|
||||
defer predicateMetaProducerRegisterLock.Unlock()
|
||||
predicateMetadataProducers[predicateName] = precomp
|
||||
}
|
||||
|
||||
// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type.
|
||||
func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterPredicateMetadataProducerWithExtendedResourceOptions registers a
|
||||
// PredicateMetadataProducer that creates predicate metadata with the provided
|
||||
// options for extended resources.
|
||||
@@ -118,7 +134,7 @@ func RegisterPredicateMetadataProducerWithExtendedResourceOptions(ignoredExtende
|
||||
}
|
||||
|
||||
// NewPredicateMetadataFactory creates a PredicateMetadataFactory.
|
||||
func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.PredicateMetadataProducer {
|
||||
func NewPredicateMetadataFactory(podLister algorithm.PodLister) PredicateMetadataProducer {
|
||||
factory := &PredicateMetadataFactory{
|
||||
podLister,
|
||||
}
|
||||
@@ -126,7 +142,7 @@ func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.Predic
|
||||
}
|
||||
|
||||
// GetMetadata returns the predicateMetadata used which will be used by various predicates.
|
||||
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
|
||||
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||
// If we cannot compute metadata, just return nil
|
||||
if pod == nil {
|
||||
return nil
|
||||
@@ -285,7 +301,7 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulernodei
|
||||
|
||||
// ShallowCopy copies a metadata struct into a new struct and creates a copy of
|
||||
// its maps and slices, but it does not copy the contents of pointer values.
|
||||
func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata {
|
||||
func (meta *predicateMetadata) ShallowCopy() PredicateMetadata {
|
||||
newPredMeta := &predicateMetadata{
|
||||
pod: meta.pod,
|
||||
podBestEffort: meta.podBestEffort,
|
||||
@@ -304,7 +320,7 @@ func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata {
|
||||
meta.serviceAffinityMatchingPodServices...)
|
||||
newPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil),
|
||||
meta.serviceAffinityMatchingPodList...)
|
||||
return (algorithm.PredicateMetadata)(newPredMeta)
|
||||
return (PredicateMetadata)(newPredMeta)
|
||||
}
|
||||
|
||||
type affinityTermProperties struct {
|
||||
|
||||
@@ -137,6 +137,10 @@ var (
|
||||
CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
|
||||
)
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
// The failure information is given by the error.
|
||||
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
|
||||
// NodeInfo interface represents anything that can get node object from node ID.
|
||||
type NodeInfo interface {
|
||||
GetNodeInfo(nodeID string) (*v1.Node, error)
|
||||
@@ -274,11 +278,11 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
|
||||
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
|
||||
// - ISCSI forbids if any two pods share at least same IQN, LUN and Target
|
||||
// TODO: migrate this into some per-volume specific code?
|
||||
func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func NoDiskConflict(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
for _, ev := range nodeInfo.Pods() {
|
||||
if isVolumeConflict(v, ev) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrDiskConflict}, nil
|
||||
return false, []PredicateFailureReason{ErrDiskConflict}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -313,7 +317,7 @@ type VolumeFilter struct {
|
||||
// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over
|
||||
// the maximum.
|
||||
func NewMaxPDVolumeCountPredicate(
|
||||
filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate {
|
||||
filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) FitPredicate {
|
||||
var filter VolumeFilter
|
||||
var volumeLimitKey v1.ResourceName
|
||||
|
||||
@@ -447,7 +451,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
@@ -492,7 +496,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.Predicat
|
||||
|
||||
if numExistingVolumes+numNewVolumes > maxAttachLimit {
|
||||
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
||||
return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
return false, []PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
}
|
||||
if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) {
|
||||
nodeInfo.TransientInfo.TransientLock.Lock()
|
||||
@@ -575,7 +579,7 @@ type VolumeZoneChecker struct {
|
||||
// determining the zone of a volume during scheduling, and that is likely to
|
||||
// require calling out to the cloud provider. It seems that we are moving away
|
||||
// from inline volume declarations anyway.
|
||||
func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) algorithm.FitPredicate {
|
||||
func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) FitPredicate {
|
||||
c := &VolumeZoneChecker{
|
||||
pvInfo: pvInfo,
|
||||
pvcInfo: pvcInfo,
|
||||
@@ -584,7 +588,7 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum
|
||||
return c.predicate
|
||||
}
|
||||
|
||||
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
@@ -671,7 +675,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad
|
||||
|
||||
if !volumeVSet.Has(nodeV) {
|
||||
klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
|
||||
return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil
|
||||
return false, []PredicateFailureReason{ErrVolumeZoneConflict}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -725,13 +729,13 @@ func podName(pod *v1.Pod) string {
|
||||
// PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
|
||||
// First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the
|
||||
// predicate failure reasons if the node has insufficient resources to run the pod.
|
||||
func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodFitsResources(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
var predicateFails []PredicateFailureReason
|
||||
allowedPodNumber := nodeInfo.AllowedPodNumber()
|
||||
if len(nodeInfo.Pods())+1 > allowedPodNumber {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))
|
||||
@@ -850,7 +854,7 @@ func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool {
|
||||
}
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodMatchNodeSelector(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@@ -858,11 +862,11 @@ func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInf
|
||||
if podMatchesNodeSelectorAndAffinityTerms(pod, node) {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
|
||||
}
|
||||
|
||||
// PodFitsHost checks if a pod spec node name matches the current node.
|
||||
func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodFitsHost(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if len(pod.Spec.NodeName) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
@@ -873,7 +877,7 @@ func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedu
|
||||
if pod.Spec.NodeName == node.Name {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil
|
||||
return false, []PredicateFailureReason{ErrPodNotMatchHostName}, nil
|
||||
}
|
||||
|
||||
// NodeLabelChecker contains information to check node labels for a predicate.
|
||||
@@ -884,7 +888,7 @@ type NodeLabelChecker struct {
|
||||
|
||||
// NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the
|
||||
// node labels which match a filter that it requests.
|
||||
func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate {
|
||||
func NewNodeLabelPredicate(labels []string, presence bool) FitPredicate {
|
||||
labelChecker := &NodeLabelChecker{
|
||||
labels: labels,
|
||||
presence: presence,
|
||||
@@ -904,7 +908,7 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat
|
||||
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
|
||||
// A node may have a label with "retiring" as key and the date as the value
|
||||
// and it may be desirable to avoid scheduling new pods on this node
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@@ -915,7 +919,7 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.Pr
|
||||
for _, label := range n.labels {
|
||||
exists = nodeLabels.Has(label)
|
||||
if (exists && !n.presence) || (!exists && n.presence) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil
|
||||
}
|
||||
}
|
||||
return true, nil, nil
|
||||
@@ -952,7 +956,7 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
|
||||
}
|
||||
|
||||
// NewServiceAffinityPredicate creates a ServiceAffinity.
|
||||
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataProducer) {
|
||||
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (FitPredicate, predicateMetadataProducer) {
|
||||
affinity := &ServiceAffinity{
|
||||
podLister: podLister,
|
||||
serviceLister: serviceLister,
|
||||
@@ -989,7 +993,7 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
|
||||
//
|
||||
// WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed...
|
||||
// For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction.
|
||||
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var services []*v1.Service
|
||||
var pods []*v1.Pod
|
||||
if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) {
|
||||
@@ -1024,11 +1028,11 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.Predi
|
||||
if CreateSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil
|
||||
return false, []PredicateFailureReason{ErrServiceAffinityViolated}, nil
|
||||
}
|
||||
|
||||
// PodFitsHostPorts checks if a node has free ports for the requested pod ports.
|
||||
func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodFitsHostPorts(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var wantPorts []*v1.ContainerPort
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
wantPorts = predicateMeta.podPorts
|
||||
@@ -1044,7 +1048,7 @@ func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
|
||||
|
||||
// try to see whether existingPorts and wantPorts will conflict or not
|
||||
if portsConflict(existingPorts, wantPorts) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil
|
||||
return false, []PredicateFailureReason{ErrPodNotFitsHostPorts}, nil
|
||||
}
|
||||
|
||||
return true, nil, nil
|
||||
@@ -1068,8 +1072,8 @@ func haveOverlap(a1, a2 []string) bool {
|
||||
|
||||
// GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates
|
||||
// that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need
|
||||
func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
func GeneralPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
@@ -1090,8 +1094,8 @@ func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *
|
||||
}
|
||||
|
||||
// noncriticalPredicates are the predicates that only non-critical pods need
|
||||
func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
func noncriticalPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
fit, reasons, err := PodFitsResources(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
@@ -1104,8 +1108,8 @@ func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeIn
|
||||
}
|
||||
|
||||
// EssentialPredicates are the predicates that all pods, including critical pods, need
|
||||
func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
func EssentialPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
fit, reasons, err := PodFitsHost(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
@@ -1141,7 +1145,7 @@ type PodAffinityChecker struct {
|
||||
}
|
||||
|
||||
// NewPodAffinityPredicate creates a PodAffinityChecker.
|
||||
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate {
|
||||
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) FitPredicate {
|
||||
checker := &PodAffinityChecker{
|
||||
info: info,
|
||||
podLister: podLister,
|
||||
@@ -1152,13 +1156,13 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algor
|
||||
// InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
|
||||
// First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the
|
||||
// predicate failure reasons if the pod cannot be scheduled on the specified node.
|
||||
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
if failedPredicates, error := c.satisfiesExistingPodsAntiAffinity(pod, meta, nodeInfo); failedPredicates != nil {
|
||||
failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
|
||||
failedPredicates := append([]PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
|
||||
return false, failedPredicates, error
|
||||
}
|
||||
|
||||
@@ -1168,7 +1172,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm
|
||||
return true, nil, nil
|
||||
}
|
||||
if failedPredicates, error := c.satisfiesPodsAffinityAntiAffinity(pod, meta, nodeInfo, affinity); failedPredicates != nil {
|
||||
failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
|
||||
failedPredicates := append([]PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
|
||||
return false, failedPredicates, error
|
||||
}
|
||||
|
||||
@@ -1290,7 +1294,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.
|
||||
|
||||
// Checks if scheduling the pod onto this node would break any anti-affinity
|
||||
// terms indicated by the existing pods.
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (algorithm.PredicateFailureReason, error) {
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil")
|
||||
@@ -1365,8 +1369,8 @@ func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPai
|
||||
|
||||
// Checks if scheduling the pod onto this node would break any term of this pod.
|
||||
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) {
|
||||
meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
affinity *v1.Affinity) (PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return ErrPodAffinityRulesNotMatch, fmt.Errorf("Node is nil")
|
||||
@@ -1466,9 +1470,9 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
}
|
||||
|
||||
// CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec.
|
||||
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
|
||||
// If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`.
|
||||
@@ -1479,16 +1483,16 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada
|
||||
|
||||
// TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13.
|
||||
if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnschedulable}, nil
|
||||
}
|
||||
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
|
||||
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodToleratesNodeTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
|
||||
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
|
||||
@@ -1498,13 +1502,13 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeI
|
||||
}
|
||||
|
||||
// PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints
|
||||
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
|
||||
return t.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
}
|
||||
|
||||
func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []PredicateFailureReason, error) {
|
||||
taints, err := nodeInfo.Taints()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
@@ -1513,7 +1517,7 @@ func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, f
|
||||
if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, filter) {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
|
||||
return false, []PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
|
||||
}
|
||||
|
||||
// isPodBestEffort checks if pod is scheduled with best-effort QoS
|
||||
@@ -1523,7 +1527,7 @@ func isPodBestEffort(pod *v1.Pod) bool {
|
||||
|
||||
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting memory pressure condition.
|
||||
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var podBestEffort bool
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
podBestEffort = predicateMeta.podBestEffort
|
||||
@@ -1538,38 +1542,37 @@ func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetad
|
||||
|
||||
// check if node is under memory pressure
|
||||
if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting disk pressure condition.
|
||||
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// check if node is under disk pressure
|
||||
if nodeInfo.DiskPressureCondition() == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting pid pressure condition.
|
||||
func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodePIDPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// check if node is under pid pressure
|
||||
if nodeInfo.PIDPressureCondition() == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnderPIDPressure}, nil
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting
|
||||
// network unavailable and not ready condition. Only node conditions are accounted in this predicate.
|
||||
func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
reasons := []algorithm.PredicateFailureReason{}
|
||||
|
||||
func CheckNodeConditionPredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
reasons := []PredicateFailureReason{}
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
|
||||
node := nodeInfo.Node()
|
||||
@@ -1607,14 +1610,14 @@ type VolumeBindingChecker struct {
|
||||
//
|
||||
// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound
|
||||
// PVCs can be matched with an available and node-compatible PV.
|
||||
func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) algorithm.FitPredicate {
|
||||
func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) FitPredicate {
|
||||
c := &VolumeBindingChecker{
|
||||
binder: binder,
|
||||
}
|
||||
return c.predicate
|
||||
}
|
||||
|
||||
func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
return true, nil, nil
|
||||
}
|
||||
@@ -1629,7 +1632,7 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
failReasons := []algorithm.PredicateFailureReason{}
|
||||
failReasons := []PredicateFailureReason{}
|
||||
if !boundSatisfied {
|
||||
klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
failReasons = append(failReasons, ErrVolumeNodeConflict)
|
||||
|
||||
@@ -34,7 +34,6 @@ import (
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
@@ -91,7 +90,7 @@ func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Po
|
||||
return pod
|
||||
}
|
||||
|
||||
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
|
||||
func GetPredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||
pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}}
|
||||
return pm.GetMetadata(p, nodeInfo)
|
||||
}
|
||||
@@ -102,7 +101,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
ignoredExtendedResources sets.String
|
||||
}{
|
||||
{
|
||||
@@ -118,7 +117,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
fits: false,
|
||||
name: "too many resources fails",
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
||||
NewInsufficientResourceError(v1.ResourceMemory, 1, 20, 20),
|
||||
},
|
||||
@@ -129,7 +128,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to init container cpu",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
|
||||
@@ -137,7 +136,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to highest init container cpu",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}),
|
||||
@@ -145,7 +144,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to init container memory",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
|
||||
@@ -153,7 +152,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to highest init container memory",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
@@ -182,7 +181,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})),
|
||||
fits: false,
|
||||
name: "one resource memory fits",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
|
||||
@@ -190,7 +189,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
name: "one resource cpu fits",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
@@ -225,7 +224,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "extended resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -234,7 +233,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "extended resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -243,7 +242,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -252,7 +251,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -262,7 +261,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for multiple containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -281,7 +280,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for multiple init containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -290,7 +289,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for unknown resource",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -299,7 +298,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for unknown resource for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -308,7 +307,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "kubernetes.io resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -317,7 +316,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "kubernetes.io resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -326,7 +325,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "hugepages resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@@ -335,7 +334,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "hugepages resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -345,7 +344,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
|
||||
fits: false,
|
||||
name: "hugepages resource allocatable enforced for multiple containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@@ -363,7 +362,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
RegisterPredicateMetadataProducerWithExtendedResourceOptions(test.ignoredExtendedResources)
|
||||
meta := PredicateMetadata(test.pod, nil)
|
||||
meta := GetPredicateMetadata(test.pod, nil)
|
||||
fits, reasons, err := PodFitsResources(test.pod, meta, test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@@ -382,7 +381,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
@@ -390,7 +389,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
fits: false,
|
||||
name: "even without specified resources predicate fails when there's no space for additional pod",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
@@ -398,7 +397,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||
fits: false,
|
||||
name: "even if both resources fit predicate fails when there's no space for additional pod",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
@@ -406,7 +405,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
name: "even for equal edge case predicate fails when there's no space for additional pod",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
@@ -414,14 +413,14 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
}
|
||||
for _, test := range notEnoughPodsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -439,7 +438,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
@@ -447,7 +446,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})),
|
||||
fits: false,
|
||||
name: "due to container scratch disk",
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
||||
},
|
||||
},
|
||||
@@ -464,7 +463,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
|
||||
fits: false,
|
||||
name: "storage ephemeral local storage request exceeds allocatable",
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceEphemeralStorage, 25, 0, 20),
|
||||
},
|
||||
},
|
||||
@@ -481,7 +480,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -538,13 +537,13 @@ func TestPodFitsHost(t *testing.T) {
|
||||
name: "host doesn't match",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrPodNotMatchHostName}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(test.node)
|
||||
fits, reasons, err := PodFitsHost(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := PodFitsHost(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -680,11 +679,11 @@ func TestPodFitsHostPorts(t *testing.T) {
|
||||
name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrPodNotFitsHostPorts}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := PodFitsHostPorts(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := PodFitsHostPorts(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -732,11 +731,11 @@ func TestGCEDiskConflicts(t *testing.T) {
|
||||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -787,11 +786,11 @@ func TestAWSDiskConflicts(t *testing.T) {
|
||||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -848,11 +847,11 @@ func TestRBDDiskConflicts(t *testing.T) {
|
||||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -909,11 +908,11 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
||||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -1603,7 +1602,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
name: "Pod with two terms: both matchFields and matchExpressions do not match",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
@@ -1614,7 +1613,7 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
fits, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -1674,7 +1673,7 @@ func TestNodeLabelPresence(t *testing.T) {
|
||||
name: "all labels match, presence false",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeLabelPresenceViolated}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
@@ -1683,7 +1682,7 @@ func TestNodeLabelPresence(t *testing.T) {
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
labelChecker := NodeLabelChecker{test.labels, test.presence}
|
||||
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -1823,7 +1822,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
name: "service pod on different node, multiple labels, all match",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrServiceAffinityViolated}
|
||||
for _, test := range tests {
|
||||
testIt := func(skipPrecompute bool) {
|
||||
t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) {
|
||||
@@ -1839,7 +1838,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
precompute(pm)
|
||||
}
|
||||
})
|
||||
if pmeta, ok := (PredicateMetadata(test.pod, nodeInfoMap)).(*predicateMetadata); ok {
|
||||
if pmeta, ok := (GetPredicateMetadata(test.pod, nodeInfoMap)).(*predicateMetadata); ok {
|
||||
fits, reasons, err := predicate(test.pod, pmeta, nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@@ -1885,7 +1884,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
fits bool
|
||||
name string
|
||||
wErr error
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
@@ -1909,7 +1908,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceCPU, 8, 5, 10),
|
||||
NewInsufficientResourceError(v1.ResourceMemory, 10, 19, 20),
|
||||
},
|
||||
@@ -1928,7 +1927,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{ErrPodNotMatchHostName},
|
||||
reasons: []PredicateFailureReason{ErrPodNotMatchHostName},
|
||||
name: "host not match",
|
||||
},
|
||||
{
|
||||
@@ -1940,14 +1939,14 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts},
|
||||
reasons: []PredicateFailureReason{ErrPodNotFitsHostPorts},
|
||||
name: "hostport conflict",
|
||||
},
|
||||
}
|
||||
for _, test := range resourceTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
test.nodeInfo.SetNode(test.node)
|
||||
fits, reasons, err := GeneralPredicates(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := GeneralPredicates(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -1976,7 +1975,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node *v1.Node
|
||||
fits bool
|
||||
name string
|
||||
expectFailureReasons []algorithm.PredicateFailureReason
|
||||
expectFailureReasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: new(v1.Pod),
|
||||
@@ -2076,7 +2075,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2107,7 +2106,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2204,7 +2203,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2371,7 +2370,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "satisfies the PodAffinity but doesn't satisfy the PodAntiAffinity with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2444,7 +2443,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfy PodAntiAffinity symmetry with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2476,7 +2475,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2512,7 +2511,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. doesn't satisfy PodAntiAffinity symmetry with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2612,7 +2611,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: &node1,
|
||||
fits: false,
|
||||
name: "satisfies the PodAntiAffinity with existing pod but doesn't satisfy PodAntiAffinity symmetry with incoming pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@@ -2675,7 +2674,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check a1: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
{
|
||||
@@ -2739,7 +2738,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check a2: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
{
|
||||
@@ -2814,7 +2813,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check b1: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
{
|
||||
@@ -2889,7 +2888,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check b2: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
}
|
||||
@@ -2911,7 +2910,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
||||
nodeInfo.SetNode(test.node)
|
||||
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
|
||||
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
if !fits && !reflect.DeepEqual(reasons, test.expectFailureReasons) {
|
||||
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.expectFailureReasons)
|
||||
}
|
||||
@@ -2944,7 +2943,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []v1.Node
|
||||
nodesExpectAffinityFailureReasons [][]algorithm.PredicateFailureReason
|
||||
nodesExpectAffinityFailureReasons [][]PredicateFailureReason
|
||||
fits map[string]bool
|
||||
name string
|
||||
nometa bool
|
||||
@@ -2985,7 +2984,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
"machine2": true,
|
||||
"machine3": false,
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}},
|
||||
name: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that matches the affinity rules",
|
||||
},
|
||||
{
|
||||
@@ -3034,7 +3033,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": true,
|
||||
@@ -3087,7 +3086,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
@@ -3125,7 +3124,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": false,
|
||||
@@ -3174,7 +3173,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3215,7 +3214,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, nil},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, nil},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": false,
|
||||
@@ -3279,7 +3278,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeD", Labels: labelRgUS}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
@@ -3359,7 +3358,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
nil,
|
||||
@@ -3403,7 +3402,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
@@ -3444,7 +3443,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
@@ -3507,7 +3506,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3568,7 +3567,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3621,7 +3620,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
@@ -3675,7 +3674,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
@@ -3727,7 +3726,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3782,7 +3781,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3869,7 +3868,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: map[string]string{"region": "r1", "zone": "z3", "hostname": "nodeC"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3925,7 +3924,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{},
|
||||
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3986,7 +3985,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
@@ -3998,7 +3997,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
selectorExpectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
selectorExpectedFailureReasons := []PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
|
||||
for indexTest, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
@@ -4023,9 +4022,9 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
podLister: schedulertesting.FakePodLister(test.pods),
|
||||
}
|
||||
|
||||
var meta algorithm.PredicateMetadata
|
||||
var meta PredicateMetadata
|
||||
if !test.nometa {
|
||||
meta = PredicateMetadata(test.pod, nodeInfoMap)
|
||||
meta = GetPredicateMetadata(test.pod, nodeInfoMap)
|
||||
}
|
||||
|
||||
fits, reasons, _ := testFit.InterPodAffinityMatches(test.pod, meta, nodeInfoMap[node.Name])
|
||||
@@ -4037,7 +4036,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{node.Name: nodeInfo}
|
||||
fits2, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
fits2, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4238,13 +4237,13 @@ func TestPodToleratesTaints(t *testing.T) {
|
||||
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrTaintsTolerationsNotMatch}
|
||||
|
||||
for _, test := range podTolerateTaintsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&test.node)
|
||||
fits, reasons, err := PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := PodToleratesNodeTaints(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4352,11 +4351,11 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
|
||||
name: "non best-effort pod schedulable on node without memory pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderMemoryPressure}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4426,11 +4425,11 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
|
||||
name: "pod not schedulable on node with pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderDiskPressure}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4486,11 +4485,11 @@ func TestPodSchedulesOnNodeWithPIDPressureCondition(t *testing.T) {
|
||||
name: "pod not schedulable on node with pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderPIDPressure}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := CheckNodePIDPressurePredicate(&v1.Pod{}, PredicateMetadata(&v1.Pod{}, nil), test.nodeInfo)
|
||||
fits, reasons, err := CheckNodePIDPressurePredicate(&v1.Pod{}, GetPredicateMetadata(&v1.Pod{}, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -4673,7 +4672,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
@@ -4767,7 +4766,7 @@ func TestVolumeZonePredicateMultiZone(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
@@ -31,10 +31,6 @@ var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{
|
||||
schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
||||
}
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
// The failure information is given by the error.
|
||||
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
|
||||
// PriorityMapFunction is a function that computes per-node results for a given node.
|
||||
// TODO: Figure out the exact API of this method.
|
||||
// TODO: Change interface{} to a specific type.
|
||||
@@ -46,9 +42,6 @@ type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *scheduler
|
||||
// TODO: Change interface{} to a specific type.
|
||||
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error
|
||||
|
||||
// PredicateMetadataProducer is a function that computes predicate metadata for a given pod.
|
||||
type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata
|
||||
|
||||
// PriorityMetadataProducer is a function that computes metadata for a given pod. This
|
||||
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
|
||||
type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{}
|
||||
@@ -69,21 +62,11 @@ type PriorityConfig struct {
|
||||
Weight int
|
||||
}
|
||||
|
||||
// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type.
|
||||
func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type.
|
||||
func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PredicateFailureReason interface represents the failure reason of a predicate.
|
||||
type PredicateFailureReason interface {
|
||||
GetReason() string
|
||||
}
|
||||
|
||||
// NodeLister interface represents anything that can list nodes for a scheduler.
|
||||
type NodeLister interface {
|
||||
// We explicitly return []*v1.Node, instead of v1.NodeList, to avoid
|
||||
@@ -172,10 +155,3 @@ type EmptyStatefulSetLister struct{}
|
||||
func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.StatefulSet, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// PredicateMetadata interface represents anything that can access a predicate metadata.
|
||||
type PredicateMetadata interface {
|
||||
ShallowCopy() PredicateMetadata
|
||||
AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error
|
||||
RemovePod(deletedPod *v1.Pod) error
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user