mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #85012 from alculquicondor/feat/predicate_factory
Add MetadataProducerFactory for predicates
This commit is contained in:
commit
01e014c7d6
@ -1318,7 +1318,7 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
|
||||
// - PodFitsHost: checks pod's NodeName against node
|
||||
// - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node
|
||||
// - PodToleratesNodeTaints: exclude tainted node unless pod has specific toleration
|
||||
func checkNodeFitness(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
func checkNodeFitness(pod *v1.Pod, meta predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
var predicateFails []predicates.PredicateFailureReason
|
||||
fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
|
@ -87,7 +87,7 @@ func getVolumeLimits(nodeInfo *schedulernodeinfo.NodeInfo, csiNode *storagev1bet
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
||||
pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// If the new pod doesn't have any volume attached to it, the predicate will always be true
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
return true, nil, nil
|
||||
|
@ -458,7 +458,8 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
getFakeCSIPVCLister(test.filterName, "csi-sc", test.driverNames...),
|
||||
getFakeCSIStorageClassLister("csi-sc", test.driverNames[0]))
|
||||
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := pred(test.newPod, factory.GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
|
@ -858,7 +858,8 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
getFakePVLister(test.filterName),
|
||||
getFakePVCLister(test.filterName))
|
||||
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := pred(test.newPod, factory.GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
@ -880,7 +881,8 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
getFakeStorageClassLister(test.filterName),
|
||||
getFakePVLister(test.filterName),
|
||||
getFakePVCLister(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := pred(test.newPod, factory.GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
|
@ -35,15 +35,15 @@ import (
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
// PredicateMetadata interface represents anything that can access a predicate metadata.
|
||||
type PredicateMetadata interface {
|
||||
ShallowCopy() PredicateMetadata
|
||||
// Metadata interface represents anything that can access a predicate metadata.
|
||||
type Metadata interface {
|
||||
ShallowCopy() Metadata
|
||||
AddPod(addedPod *v1.Pod, node *v1.Node) error
|
||||
RemovePod(deletedPod *v1.Pod, node *v1.Node) error
|
||||
}
|
||||
|
||||
// PredicateMetadataProducer is a function that computes predicate metadata for a given pod.
|
||||
type PredicateMetadataProducer func(pod *v1.Pod, sharedLister schedulerlisters.SharedLister) PredicateMetadata
|
||||
// MetadataProducer is a function that computes predicate metadata for a given pod.
|
||||
type MetadataProducer func(pod *v1.Pod, sharedLister schedulerlisters.SharedLister) Metadata
|
||||
|
||||
// AntiAffinityTerm's topology key value used in predicate metadata
|
||||
type topologyPair struct {
|
||||
@ -300,27 +300,27 @@ type predicateMetadata struct {
|
||||
podFitsHostPortsMetadata *podFitsHostPortsMetadata
|
||||
}
|
||||
|
||||
// Ensure that predicateMetadata implements algorithm.PredicateMetadata.
|
||||
var _ PredicateMetadata = &predicateMetadata{}
|
||||
// Ensure that predicateMetadata implements algorithm.Metadata.
|
||||
var _ Metadata = &predicateMetadata{}
|
||||
|
||||
// predicateMetadataProducer function produces predicate metadata. It is stored in a global variable below
|
||||
// and used to modify the return values of PredicateMetadataProducer
|
||||
// and used to modify the return values of MetadataProducer
|
||||
type predicateMetadataProducer func(pm *predicateMetadata)
|
||||
|
||||
var predicateMetadataProducers = make(map[string]predicateMetadataProducer)
|
||||
|
||||
// RegisterPredicateMetadataProducer registers a PredicateMetadataProducer.
|
||||
// RegisterPredicateMetadataProducer registers a MetadataProducer.
|
||||
func RegisterPredicateMetadataProducer(predicateName string, precomp predicateMetadataProducer) {
|
||||
predicateMetadataProducers[predicateName] = precomp
|
||||
}
|
||||
|
||||
// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type.
|
||||
func EmptyPredicateMetadataProducer(pod *v1.Pod, sharedLister schedulerlisters.SharedLister) PredicateMetadata {
|
||||
// EmptyMetadataProducer returns a no-op MetadataProducer type.
|
||||
func EmptyMetadataProducer(pod *v1.Pod, sharedLister schedulerlisters.SharedLister) Metadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterPredicateMetadataProducerWithExtendedResourceOptions registers a
|
||||
// PredicateMetadataProducer that creates predicate metadata with the provided
|
||||
// MetadataProducer that creates predicate metadata with the provided
|
||||
// options for extended resources.
|
||||
//
|
||||
// See the comments in "predicateMetadata" for the explanation of the options.
|
||||
@ -330,8 +330,11 @@ func RegisterPredicateMetadataProducerWithExtendedResourceOptions(ignoredExtende
|
||||
})
|
||||
}
|
||||
|
||||
// MetadataProducerFactory is a factory to produce Metadata.
|
||||
type MetadataProducerFactory struct{}
|
||||
|
||||
// GetPredicateMetadata returns the predicateMetadata which will be used by various predicates.
|
||||
func GetPredicateMetadata(pod *v1.Pod, sharedLister schedulerlisters.SharedLister) PredicateMetadata {
|
||||
func (f *MetadataProducerFactory) GetPredicateMetadata(pod *v1.Pod, sharedLister schedulerlisters.SharedLister) Metadata {
|
||||
// If we cannot compute metadata, just return nil
|
||||
if pod == nil {
|
||||
return nil
|
||||
@ -675,7 +678,7 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, node *v1.Node) error {
|
||||
|
||||
// ShallowCopy copies a metadata struct into a new struct and creates a copy of
|
||||
// its maps and slices, but it does not copy the contents of pointer values.
|
||||
func (meta *predicateMetadata) ShallowCopy() PredicateMetadata {
|
||||
func (meta *predicateMetadata) ShallowCopy() Metadata {
|
||||
newPredMeta := &predicateMetadata{
|
||||
pod: meta.pod,
|
||||
podBestEffort: meta.podBestEffort,
|
||||
@ -685,7 +688,7 @@ func (meta *predicateMetadata) ShallowCopy() PredicateMetadata {
|
||||
newPredMeta.evenPodsSpreadMetadata = meta.evenPodsSpreadMetadata.clone()
|
||||
newPredMeta.serviceAffinityMetadata = meta.serviceAffinityMetadata.clone()
|
||||
newPredMeta.podFitsResourcesMetadata = meta.podFitsResourcesMetadata.clone()
|
||||
return (PredicateMetadata)(newPredMeta)
|
||||
return (Metadata)(newPredMeta)
|
||||
}
|
||||
|
||||
type affinityTermProperties struct {
|
||||
|
@ -358,7 +358,8 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
||||
s := nodeinfosnapshot.NewSnapshot(pods, test.nodes)
|
||||
_, precompute := NewServiceAffinityPredicate(s.NodeInfos(), s.Pods(), fakelisters.ServiceLister(test.services), nil)
|
||||
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
||||
meta := GetPredicateMetadata(test.pendingPod, s)
|
||||
factory := &MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(test.pendingPod, s)
|
||||
return meta.(*predicateMetadata), s.NodeInfoMap
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ func Ordering() []string {
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
// The failure information is given by the error.
|
||||
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
type FitPredicate func(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
|
||||
func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
|
||||
// fast path if there is no conflict checking targets.
|
||||
@ -209,7 +209,7 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
|
||||
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image, and the image is read-only
|
||||
// - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only
|
||||
// TODO: migrate this into some per-volume specific code?
|
||||
func NoDiskConflict(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func NoDiskConflict(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
for _, ev := range nodeInfo.Pods() {
|
||||
if isVolumeConflict(v, ev) {
|
||||
@ -418,7 +418,7 @@ func (c *MaxPDVolumeCountChecker) matchProvisioner(pvc *v1.PersistentVolumeClaim
|
||||
return c.filter.MatchProvisioner(storageClass)
|
||||
}
|
||||
|
||||
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
@ -640,7 +640,7 @@ func NewVolumeZonePredicate(pvLister corelisters.PersistentVolumeLister, pvcList
|
||||
return c.predicate
|
||||
}
|
||||
|
||||
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
@ -787,7 +787,7 @@ func podName(pod *v1.Pod) string {
|
||||
// PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
|
||||
// First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the
|
||||
// predicate failure reasons if the node has insufficient resources to run the pod.
|
||||
func PodFitsResources(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func PodFitsResources(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@ -912,7 +912,7 @@ func PodMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool {
|
||||
}
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func PodMatchNodeSelector(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@ -924,7 +924,7 @@ func PodMatchNodeSelector(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedul
|
||||
}
|
||||
|
||||
// PodFitsHost checks if a pod spec node name matches the current node.
|
||||
func PodFitsHost(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func PodFitsHost(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if len(pod.Spec.NodeName) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
@ -968,7 +968,7 @@ func NewNodeLabelPredicate(presentLabels []string, absentLabels []string) FitPre
|
||||
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
|
||||
// A node may have a label with "retiring" as key and the date as the value
|
||||
// and it may be desirable to avoid scheduling new pods on this node.
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@ -1063,7 +1063,7 @@ func NewServiceAffinityPredicate(nodeInfoLister schedulerlisters.NodeInfoLister,
|
||||
//
|
||||
// WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed...
|
||||
// For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction.
|
||||
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var services []*v1.Service
|
||||
var pods []*v1.Pod
|
||||
if pm, ok := meta.(*predicateMetadata); ok && pm.serviceAffinityMetadata != nil && (pm.serviceAffinityMetadata.matchingPodList != nil || pm.serviceAffinityMetadata.matchingPodServices != nil) {
|
||||
@ -1102,7 +1102,7 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetada
|
||||
}
|
||||
|
||||
// PodFitsHostPorts checks if a node has free ports for the requested pod ports.
|
||||
func PodFitsHostPorts(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func PodFitsHostPorts(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var wantPorts []*v1.ContainerPort
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok && predicateMeta.podFitsHostPortsMetadata != nil {
|
||||
wantPorts = predicateMeta.podFitsHostPortsMetadata.podPorts
|
||||
@ -1145,7 +1145,7 @@ func haveOverlap(a1, a2 []string) bool {
|
||||
|
||||
// GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates
|
||||
// that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need.
|
||||
func GeneralPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func GeneralPredicates(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
for _, predicate := range []FitPredicate{noncriticalPredicates, EssentialPredicates} {
|
||||
fit, reasons, err := predicate(pod, meta, nodeInfo)
|
||||
@ -1161,7 +1161,7 @@ func GeneralPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulern
|
||||
}
|
||||
|
||||
// noncriticalPredicates are the predicates that only non-critical pods need.
|
||||
func noncriticalPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func noncriticalPredicates(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
fit, reasons, err := PodFitsResources(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
@ -1175,7 +1175,7 @@ func noncriticalPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedu
|
||||
}
|
||||
|
||||
// EssentialPredicates are the predicates that all pods, including critical pods, need.
|
||||
func EssentialPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func EssentialPredicates(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
// TODO: PodFitsHostPorts is essential for now, but kubelet should ideally
|
||||
// preempt pods to free up host ports too
|
||||
@ -1210,7 +1210,7 @@ func NewPodAffinityPredicate(nodeInfoLister schedulerlisters.NodeInfoLister, pod
|
||||
// InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
|
||||
// First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the
|
||||
// predicate failure reasons if the pod cannot be scheduled on the specified node.
|
||||
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@ -1345,7 +1345,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.
|
||||
|
||||
// Checks if scheduling the pod onto this node would break any anti-affinity
|
||||
// terms indicated by the existing pods.
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (PredicateFailureReason, error) {
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("node not found")
|
||||
@ -1420,7 +1420,7 @@ func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPai
|
||||
|
||||
// satisfiesPodsAffinityAntiAffinity checks if scheduling the pod onto this node would break any term of this pod.
|
||||
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
affinity *v1.Affinity) (PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
@ -1521,7 +1521,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
}
|
||||
|
||||
// CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec.
|
||||
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
@ -1541,7 +1541,7 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta PredicateMetadata, nodeIn
|
||||
}
|
||||
|
||||
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints.
|
||||
func PodToleratesNodeTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func PodToleratesNodeTaints(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
@ -1553,7 +1553,7 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *sched
|
||||
}
|
||||
|
||||
// PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints.
|
||||
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
|
||||
return t.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
@ -1603,7 +1603,7 @@ func podHasPVCs(pod *v1.Pod) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// If pod does not request any PVC, we don't need to do anything.
|
||||
if !podHasPVCs(pod) {
|
||||
return true, nil, nil
|
||||
@ -1641,7 +1641,7 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta PredicateMetadata, no
|
||||
|
||||
// EvenPodsSpreadPredicate checks if a pod can be scheduled on a node which satisfies
|
||||
// its topologySpreadConstraints.
|
||||
func EvenPodsSpreadPredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
func EvenPodsSpreadPredicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
|
@ -390,7 +390,8 @@ func TestPodFitsResources(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
RegisterPredicateMetadataProducerWithExtendedResourceOptions(test.ignoredExtendedResources)
|
||||
meta := GetPredicateMetadata(test.pod, nil)
|
||||
factory := &MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(test.pod, nil)
|
||||
fits, reasons, err := PodFitsResources(test.pod, meta, test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -448,7 +449,8 @@ func TestPodFitsResources(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := PodFitsResources(test.pod, factory.GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -508,7 +510,8 @@ func TestPodFitsResources(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := PodFitsResources(test.pod, factory.GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -571,7 +574,8 @@ func TestPodFitsHost(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(test.node)
|
||||
fits, reasons, err := PodFitsHost(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := PodFitsHost(test.pod, factory.GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -711,7 +715,8 @@ func TestPodFitsHostPorts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := PodFitsHostPorts(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := PodFitsHostPorts(test.pod, factory.GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -763,7 +768,8 @@ func TestGCEDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
ok, reasons, err := NoDiskConflict(test.pod, factory.GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -818,7 +824,8 @@ func TestAWSDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
ok, reasons, err := NoDiskConflict(test.pod, factory.GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -879,7 +886,8 @@ func TestRBDDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
ok, reasons, err := NoDiskConflict(test.pod, factory.GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -940,7 +948,8 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
ok, reasons, err := NoDiskConflict(test.pod, factory.GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -1641,7 +1650,8 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
fits, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := PodMatchNodeSelector(test.pod, factory.GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -1704,7 +1714,8 @@ func TestNodeLabelPresence(t *testing.T) {
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
labelChecker := NodeLabelChecker{test.presentLabels, test.absentLabels}
|
||||
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, factory.GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -1859,7 +1870,8 @@ func TestServiceAffinity(t *testing.T) {
|
||||
precompute(pm)
|
||||
}
|
||||
})
|
||||
if pmeta, ok := (GetPredicateMetadata(test.pod, s)).(*predicateMetadata); ok {
|
||||
factory := &MetadataProducerFactory{}
|
||||
if pmeta, ok := (factory.GetPredicateMetadata(test.pod, s)).(*predicateMetadata); ok {
|
||||
fits, reasons, err := predicate(test.pod, pmeta, s.NodeInfoMap[test.node.Name])
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -1967,7 +1979,8 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
for _, test := range resourceTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
test.nodeInfo.SetNode(test.node)
|
||||
fits, reasons, err := GeneralPredicates(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := GeneralPredicates(test.pod, factory.GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -2922,7 +2935,8 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
nodeInfoLister: s.NodeInfos(),
|
||||
podLister: fakelisters.PodLister(test.pods),
|
||||
}
|
||||
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, GetPredicateMetadata(test.pod, s), s.NodeInfoMap[test.node.Name])
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, factory.GetPredicateMetadata(test.pod, s), s.NodeInfoMap[test.node.Name])
|
||||
if !fits && !reflect.DeepEqual(reasons, test.expectFailureReasons) {
|
||||
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.expectFailureReasons)
|
||||
}
|
||||
@ -4018,9 +4032,10 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
podLister: snapshot.Pods(),
|
||||
}
|
||||
|
||||
var meta PredicateMetadata
|
||||
var meta Metadata
|
||||
if !test.nometa {
|
||||
meta = GetPredicateMetadata(test.pod, snapshot)
|
||||
factory := &MetadataProducerFactory{}
|
||||
meta = factory.GetPredicateMetadata(test.pod, snapshot)
|
||||
}
|
||||
|
||||
fits, reasons, _ := testFit.InterPodAffinityMatches(test.pod, meta, snapshot.NodeInfoMap[node.Name])
|
||||
@ -4030,7 +4045,8 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
||||
affinity := test.pod.Spec.Affinity
|
||||
if affinity != nil && affinity.NodeAffinity != nil {
|
||||
s := nodeinfosnapshot.NewSnapshot(nil, []*v1.Node{node})
|
||||
fits2, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, s), s.NodeInfoMap[node.Name])
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits2, reasons, err := PodMatchNodeSelector(test.pod, factory.GetPredicateMetadata(test.pod, s), s.NodeInfoMap[node.Name])
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -4238,7 +4254,8 @@ func TestPodToleratesTaints(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&test.node)
|
||||
fits, reasons, err := PodToleratesNodeTaints(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
factory := &MetadataProducerFactory{}
|
||||
fits, reasons, err := PodToleratesNodeTaints(test.pod, factory.GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -4951,7 +4968,8 @@ func TestEvenPodsSpreadPredicate_SingleConstraint(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
meta := GetPredicateMetadata(tt.pod, s)
|
||||
factory := &MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(tt.pod, s)
|
||||
for _, node := range tt.nodes {
|
||||
fits, _, _ := EvenPodsSpreadPredicate(tt.pod, meta, s.NodeInfoMap[node.Name])
|
||||
if fits != tt.fits[node.Name] {
|
||||
@ -5144,7 +5162,8 @@ func TestEvenPodsSpreadPredicate_MultipleConstraints(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
meta := GetPredicateMetadata(tt.pod, s)
|
||||
factory := &MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(tt.pod, s)
|
||||
for _, node := range tt.nodes {
|
||||
fits, _, _ := EvenPodsSpreadPredicate(tt.pod, meta, s.NodeInfoMap[node.Name])
|
||||
if fits != tt.fits[node.Name] {
|
||||
|
@ -26,8 +26,8 @@ import (
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// PriorityMetadataFactory is a factory to produce PriorityMetadata.
|
||||
type PriorityMetadataFactory struct {
|
||||
// MetadataFactory is a factory to produce PriorityMetadata.
|
||||
type MetadataFactory struct {
|
||||
serviceLister corelisters.ServiceLister
|
||||
controllerLister corelisters.ReplicationControllerLister
|
||||
replicaSetLister appslisters.ReplicaSetLister
|
||||
@ -35,15 +35,15 @@ type PriorityMetadataFactory struct {
|
||||
hardPodAffinityWeight int32
|
||||
}
|
||||
|
||||
// NewPriorityMetadataFactory creates a PriorityMetadataFactory.
|
||||
func NewPriorityMetadataFactory(
|
||||
// NewMetadataFactory creates a MetadataFactory.
|
||||
func NewMetadataFactory(
|
||||
serviceLister corelisters.ServiceLister,
|
||||
controllerLister corelisters.ReplicationControllerLister,
|
||||
replicaSetLister appslisters.ReplicaSetLister,
|
||||
statefulSetLister appslisters.StatefulSetLister,
|
||||
hardPodAffinityWeight int32,
|
||||
) PriorityMetadataProducer {
|
||||
factory := &PriorityMetadataFactory{
|
||||
) MetadataProducer {
|
||||
factory := &MetadataFactory{
|
||||
serviceLister: serviceLister,
|
||||
controllerLister: controllerLister,
|
||||
replicaSetLister: replicaSetLister,
|
||||
@ -66,8 +66,8 @@ type priorityMetadata struct {
|
||||
topologyScore topologyPairToScore
|
||||
}
|
||||
|
||||
// PriorityMetadata is a PriorityMetadataProducer. Node info can be nil.
|
||||
func (pmf *PriorityMetadataFactory) PriorityMetadata(
|
||||
// PriorityMetadata is a MetadataProducer. Node info can be nil.
|
||||
func (pmf *MetadataFactory) PriorityMetadata(
|
||||
pod *v1.Pod,
|
||||
filteredNodes []*v1.Node,
|
||||
sharedLister schedulerlisters.SharedLister,
|
||||
|
@ -165,7 +165,7 @@ func TestPriorityMetadata(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
metaDataProducer := NewPriorityMetadataFactory(
|
||||
metaDataProducer := NewMetadataFactory(
|
||||
informerFactory.Core().V1().Services().Lister(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
informerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
|
@ -346,7 +346,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
statefulSetLister: fakelisters.StatefulSetLister(test.sss),
|
||||
}
|
||||
|
||||
metaDataProducer := NewPriorityMetadataFactory(
|
||||
metaDataProducer := NewMetadataFactory(
|
||||
fakelisters.ServiceLister(test.services),
|
||||
fakelisters.ControllerLister(test.rcs),
|
||||
fakelisters.ReplicaSetLister(test.rss),
|
||||
@ -584,7 +584,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
statefulSetLister: fakelisters.StatefulSetLister(test.sss),
|
||||
}
|
||||
|
||||
metaDataProducer := NewPriorityMetadataFactory(
|
||||
metaDataProducer := NewMetadataFactory(
|
||||
fakelisters.ServiceLister(test.services),
|
||||
fakelisters.ControllerLister(test.rcs),
|
||||
fakelisters.ReplicaSetLister(test.rss),
|
||||
@ -773,7 +773,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes)
|
||||
zoneSpread := ServiceAntiAffinity{podLister: snapshot.Pods(), serviceLister: fakelisters.ServiceLister(test.services), labels: []string{"zone"}}
|
||||
|
||||
metaDataProducer := NewPriorityMetadataFactory(
|
||||
metaDataProducer := NewMetadataFactory(
|
||||
fakelisters.ServiceLister(test.services),
|
||||
fakelisters.ControllerLister(rcs),
|
||||
fakelisters.ReplicaSetLister(rss),
|
||||
|
@ -34,9 +34,9 @@ type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *scheduler
|
||||
// TODO: Change interface{} to a specific type.
|
||||
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, sharedLister schedulerlisters.SharedLister, result framework.NodeScoreList) error
|
||||
|
||||
// PriorityMetadataProducer is a function that computes metadata for a given pod. This
|
||||
// MetadataProducer is a function that computes metadata for a given pod. This
|
||||
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
|
||||
type PriorityMetadataProducer func(pod *v1.Pod, filteredNodes []*v1.Node, sharedLister schedulerlisters.SharedLister) interface{}
|
||||
type MetadataProducer func(pod *v1.Pod, filteredNodes []*v1.Node, sharedLister schedulerlisters.SharedLister) interface{}
|
||||
|
||||
// PriorityConfig is a config used for a priority function.
|
||||
type PriorityConfig struct {
|
||||
@ -46,7 +46,7 @@ type PriorityConfig struct {
|
||||
Weight int64
|
||||
}
|
||||
|
||||
// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type.
|
||||
func EmptyPriorityMetadataProducer(pod *v1.Pod, filteredNodes []*v1.Node, sharedLister schedulerlisters.SharedLister) interface{} {
|
||||
// EmptyMetadataProducer returns a no-op MetadataProducer type.
|
||||
func EmptyMetadataProducer(pod *v1.Pod, filteredNodes []*v1.Node, sharedLister schedulerlisters.SharedLister) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
@ -26,15 +26,15 @@ import (
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
// EmptyPriorityMetadataProducer should return a no-op PriorityMetadataProducer type.
|
||||
// EmptyMetadataProducer should return a no-op MetadataProducer type.
|
||||
func TestEmptyPriorityMetadataProducer(t *testing.T) {
|
||||
fakePod := st.MakePod().Name("p1").Node("node2").Obj()
|
||||
fakeLabelSelector := labels.SelectorFromSet(labels.Set{"foo": "bar"})
|
||||
fakeNodes := []*v1.Node{st.MakeNode().Name("node1").Obj(), st.MakeNode().Name("node-a").Obj()}
|
||||
|
||||
snapshot := nodeinfosnapshot.NewSnapshot([]*v1.Pod{fakePod}, fakeNodes)
|
||||
// Test EmptyPriorityMetadataProducer
|
||||
metadata := EmptyPriorityMetadataProducer(fakePod, fakeNodes, snapshot)
|
||||
// Test EmptyMetadataProducer
|
||||
metadata := EmptyMetadataProducer(fakePod, fakeNodes, snapshot)
|
||||
if metadata != nil {
|
||||
t.Errorf("failed to produce empty metadata: got %v, expected nil", metadata)
|
||||
}
|
||||
|
@ -61,11 +61,11 @@ type PluginFactoryArgs struct {
|
||||
HardPodAffinitySymmetricWeight int32
|
||||
}
|
||||
|
||||
// PriorityMetadataProducerFactory produces PriorityMetadataProducer from the given args.
|
||||
type PriorityMetadataProducerFactory func(PluginFactoryArgs) priorities.PriorityMetadataProducer
|
||||
// PriorityMetadataProducerFactory produces MetadataProducer from the given args.
|
||||
type PriorityMetadataProducerFactory func(PluginFactoryArgs) priorities.MetadataProducer
|
||||
|
||||
// PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args.
|
||||
type PredicateMetadataProducerFactory func(PluginFactoryArgs) predicates.PredicateMetadataProducer
|
||||
// PredicateMetadataProducerFactory produces MetadataProducer from the given args.
|
||||
type PredicateMetadataProducerFactory func(PluginFactoryArgs) predicates.MetadataProducer
|
||||
|
||||
// FitPredicateFactory produces a FitPredicate from the given args.
|
||||
type FitPredicateFactory func(PluginFactoryArgs) predicates.FitPredicate
|
||||
@ -90,8 +90,8 @@ var (
|
||||
algorithmProviderMap = make(map[string]AlgorithmProviderConfig)
|
||||
|
||||
// Registered metadata producers
|
||||
priorityMetadataProducer PriorityMetadataProducerFactory
|
||||
predicateMetadataProducer predicates.PredicateMetadataProducer
|
||||
priorityMetadataProducerFactory PriorityMetadataProducerFactory
|
||||
predicateMetadataProducerFactory PredicateMetadataProducerFactory
|
||||
)
|
||||
|
||||
const (
|
||||
@ -335,17 +335,17 @@ func IsFitPredicateRegistered(name string) bool {
|
||||
}
|
||||
|
||||
// RegisterPriorityMetadataProducerFactory registers a PriorityMetadataProducerFactory.
|
||||
func RegisterPriorityMetadataProducerFactory(factory PriorityMetadataProducerFactory) {
|
||||
func RegisterPriorityMetadataProducerFactory(f PriorityMetadataProducerFactory) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
priorityMetadataProducer = factory
|
||||
priorityMetadataProducerFactory = f
|
||||
}
|
||||
|
||||
// RegisterPredicateMetadataProducer registers a PredicateMetadataProducer.
|
||||
func RegisterPredicateMetadataProducer(producer predicates.PredicateMetadataProducer) {
|
||||
// RegisterPredicateMetadataProducerFactory registers a MetadataProducer.
|
||||
func RegisterPredicateMetadataProducerFactory(f PredicateMetadataProducerFactory) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
predicateMetadataProducer = producer
|
||||
predicateMetadataProducerFactory = f
|
||||
}
|
||||
|
||||
// RegisterPriorityMapReduceFunction registers a priority function with the algorithm registry. Returns the name,
|
||||
@ -561,24 +561,24 @@ func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[st
|
||||
return fitPredicates, nil
|
||||
}
|
||||
|
||||
func getPriorityMetadataProducer(args PluginFactoryArgs) (priorities.PriorityMetadataProducer, error) {
|
||||
func getPriorityMetadataProducer(args PluginFactoryArgs) (priorities.MetadataProducer, error) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
if priorityMetadataProducer == nil {
|
||||
return priorities.EmptyPriorityMetadataProducer, nil
|
||||
if priorityMetadataProducerFactory == nil {
|
||||
return priorities.EmptyMetadataProducer, nil
|
||||
}
|
||||
return priorityMetadataProducer(args), nil
|
||||
return priorityMetadataProducerFactory(args), nil
|
||||
}
|
||||
|
||||
func getPredicateMetadataProducer() (predicates.PredicateMetadataProducer, error) {
|
||||
func getPredicateMetadataProducer(args PluginFactoryArgs) (predicates.MetadataProducer, error) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
if predicateMetadataProducer == nil {
|
||||
return predicates.EmptyPredicateMetadataProducer, nil
|
||||
if predicateMetadataProducerFactory == nil {
|
||||
return predicates.EmptyMetadataProducer, nil
|
||||
}
|
||||
return predicateMetadataProducer, nil
|
||||
return predicateMetadataProducerFactory(args), nil
|
||||
}
|
||||
|
||||
func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]priorities.PriorityConfig, error) {
|
||||
|
@ -23,7 +23,11 @@ import (
|
||||
|
||||
func init() {
|
||||
// Register functions that extract metadata used by predicates computations.
|
||||
scheduler.RegisterPredicateMetadataProducer(predicates.GetPredicateMetadata)
|
||||
scheduler.RegisterPredicateMetadataProducerFactory(
|
||||
func(args scheduler.PluginFactoryArgs) predicates.MetadataProducer {
|
||||
f := &predicates.MetadataProducerFactory{}
|
||||
return f.GetPredicateMetadata
|
||||
})
|
||||
|
||||
// IMPORTANT NOTES for predicate developers:
|
||||
// Registers predicates and priorities that are not enabled by default, but user can pick when creating their
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
func init() {
|
||||
// Register functions that extract metadata used by priorities computations.
|
||||
scheduler.RegisterPriorityMetadataProducerFactory(
|
||||
func(args scheduler.PluginFactoryArgs) priorities.PriorityMetadataProducer {
|
||||
return priorities.NewPriorityMetadataFactory(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister, args.StatefulSetLister, args.HardPodAffinitySymmetricWeight)
|
||||
func(args scheduler.PluginFactoryArgs) priorities.MetadataProducer {
|
||||
return priorities.NewMetadataFactory(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister, args.StatefulSetLister, args.HardPodAffinitySymmetricWeight)
|
||||
})
|
||||
|
||||
// ServiceSpreadingPriority is a priority config factory that spreads pods by minimizing
|
||||
|
@ -544,9 +544,9 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
||||
cache,
|
||||
queue,
|
||||
test.predicates,
|
||||
predicates.EmptyPredicateMetadataProducer,
|
||||
predicates.EmptyMetadataProducer,
|
||||
test.prioritizers,
|
||||
priorities.EmptyPriorityMetadataProducer,
|
||||
priorities.EmptyMetadataProducer,
|
||||
emptySnapshot,
|
||||
emptyFramework,
|
||||
extenders,
|
||||
|
@ -133,7 +133,7 @@ type ScheduleAlgorithm interface {
|
||||
// GetPredicateMetadataProducer returns the predicate metadata producer. This is needed
|
||||
// for cluster autoscaler integration.
|
||||
// TODO(ahg-g): remove this once CA migrates to creating a Framework instead of a full scheduler.
|
||||
PredicateMetadataProducer() predicates.PredicateMetadataProducer
|
||||
PredicateMetadataProducer() predicates.MetadataProducer
|
||||
}
|
||||
|
||||
// ScheduleResult represents the result of one pod scheduled. It will contain
|
||||
@ -151,8 +151,8 @@ type genericScheduler struct {
|
||||
cache internalcache.Cache
|
||||
schedulingQueue internalqueue.SchedulingQueue
|
||||
predicates map[string]predicates.FitPredicate
|
||||
priorityMetaProducer priorities.PriorityMetadataProducer
|
||||
predicateMetaProducer predicates.PredicateMetadataProducer
|
||||
priorityMetaProducer priorities.MetadataProducer
|
||||
predicateMetaProducer predicates.MetadataProducer
|
||||
prioritizers []priorities.PriorityConfig
|
||||
framework framework.Framework
|
||||
extenders []algorithm.SchedulerExtender
|
||||
@ -176,7 +176,7 @@ func (g *genericScheduler) snapshot() error {
|
||||
|
||||
// GetPredicateMetadataProducer returns the predicate metadata producer. This is needed
|
||||
// for cluster autoscaler integration.
|
||||
func (g *genericScheduler) PredicateMetadataProducer() predicates.PredicateMetadataProducer {
|
||||
func (g *genericScheduler) PredicateMetadataProducer() predicates.MetadataProducer {
|
||||
return g.predicateMetaProducer
|
||||
}
|
||||
|
||||
@ -578,8 +578,8 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
||||
// addNominatedPods adds pods with equal or greater priority which are nominated
|
||||
// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether
|
||||
// any pod was added, 2) augmented metadata, 3) augmented CycleState 4) augmented nodeInfo.
|
||||
func (g *genericScheduler) addNominatedPods(ctx context.Context, pod *v1.Pod, meta predicates.PredicateMetadata, state *framework.CycleState,
|
||||
nodeInfo *schedulernodeinfo.NodeInfo) (bool, predicates.PredicateMetadata,
|
||||
func (g *genericScheduler) addNominatedPods(ctx context.Context, pod *v1.Pod, meta predicates.Metadata, state *framework.CycleState,
|
||||
nodeInfo *schedulernodeinfo.NodeInfo) (bool, predicates.Metadata,
|
||||
*framework.CycleState, *schedulernodeinfo.NodeInfo, error) {
|
||||
if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
// This may happen only in tests.
|
||||
@ -590,7 +590,7 @@ func (g *genericScheduler) addNominatedPods(ctx context.Context, pod *v1.Pod, me
|
||||
return false, meta, state, nodeInfo, nil
|
||||
}
|
||||
nodeInfoOut := nodeInfo.Clone()
|
||||
var metaOut predicates.PredicateMetadata
|
||||
var metaOut predicates.Metadata
|
||||
if meta != nil {
|
||||
metaOut = meta.ShallowCopy()
|
||||
}
|
||||
@ -629,7 +629,7 @@ func (g *genericScheduler) podFitsOnNode(
|
||||
ctx context.Context,
|
||||
state *framework.CycleState,
|
||||
pod *v1.Pod,
|
||||
meta predicates.PredicateMetadata,
|
||||
meta predicates.Metadata,
|
||||
info *schedulernodeinfo.NodeInfo,
|
||||
alwaysCheckAllPredicates bool,
|
||||
) (bool, []predicates.PredicateFailureReason, *framework.Status, error) {
|
||||
@ -1012,7 +1012,7 @@ func (g *genericScheduler) selectNodesForPreemption(
|
||||
return
|
||||
}
|
||||
nodeInfoCopy := g.nodeInfoSnapshot.NodeInfoMap[nodeName].Clone()
|
||||
var metaCopy predicates.PredicateMetadata
|
||||
var metaCopy predicates.Metadata
|
||||
if meta != nil {
|
||||
metaCopy = meta.ShallowCopy()
|
||||
}
|
||||
@ -1091,7 +1091,7 @@ func (g *genericScheduler) selectVictimsOnNode(
|
||||
ctx context.Context,
|
||||
state *framework.CycleState,
|
||||
pod *v1.Pod,
|
||||
meta predicates.PredicateMetadata,
|
||||
meta predicates.Metadata,
|
||||
nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
pdbs []*policy.PodDisruptionBudget,
|
||||
) ([]*v1.Pod, int, bool) {
|
||||
@ -1268,9 +1268,9 @@ func NewGenericScheduler(
|
||||
cache internalcache.Cache,
|
||||
podQueue internalqueue.SchedulingQueue,
|
||||
predicates map[string]predicates.FitPredicate,
|
||||
predicateMetaProducer predicates.PredicateMetadataProducer,
|
||||
predicateMetaProducer predicates.MetadataProducer,
|
||||
prioritizers []priorities.PriorityConfig,
|
||||
priorityMetaProducer priorities.PriorityMetadataProducer,
|
||||
priorityMetaProducer priorities.MetadataProducer,
|
||||
nodeInfoSnapshot *nodeinfosnapshot.Snapshot,
|
||||
framework framework.Framework,
|
||||
extenders []algorithm.SchedulerExtender,
|
||||
|
@ -57,15 +57,15 @@ var (
|
||||
order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred}
|
||||
)
|
||||
|
||||
func falsePredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
func falsePredicate(pod *v1.Pod, meta algorithmpredicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||
}
|
||||
|
||||
func truePredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
func truePredicate(pod *v1.Pod, meta algorithmpredicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func matchesPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
func matchesPredicate(pod *v1.Pod, meta algorithmpredicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@ -76,7 +76,7 @@ func matchesPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, n
|
||||
return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||
}
|
||||
|
||||
func hasNoPodsPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
func hasNoPodsPredicate(pod *v1.Pod, meta algorithmpredicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
if len(nodeInfo.Pods()) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
@ -655,9 +655,10 @@ func TestGenericScheduler(t *testing.T) {
|
||||
|
||||
pvcLister := fakelisters.PersistentVolumeClaimLister(pvcs)
|
||||
|
||||
predMetaProducer := algorithmpredicates.EmptyPredicateMetadataProducer
|
||||
predMetaProducer := algorithmpredicates.EmptyMetadataProducer
|
||||
if test.buildPredMeta {
|
||||
predMetaProducer = algorithmpredicates.GetPredicateMetadata
|
||||
f := &algorithmpredicates.MetadataProducerFactory{}
|
||||
predMetaProducer = f.GetPredicateMetadata
|
||||
}
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
@ -665,7 +666,7 @@ func TestGenericScheduler(t *testing.T) {
|
||||
test.predicates,
|
||||
predMetaProducer,
|
||||
test.prioritizers,
|
||||
priorities.EmptyPriorityMetadataProducer,
|
||||
priorities.EmptyMetadataProducer,
|
||||
emptySnapshot,
|
||||
filterFramework,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
@ -703,9 +704,9 @@ func makeScheduler(predicates map[string]algorithmpredicates.FitPredicate, nodes
|
||||
cache,
|
||||
internalqueue.NewSchedulingQueue(nil, nil),
|
||||
predicates,
|
||||
algorithmpredicates.EmptyPredicateMetadataProducer,
|
||||
algorithmpredicates.EmptyMetadataProducer,
|
||||
nil,
|
||||
priorities.EmptyPriorityMetadataProducer,
|
||||
priorities.EmptyMetadataProducer,
|
||||
emptySnapshot,
|
||||
emptyFramework,
|
||||
nil, nil, nil, nil, false, false,
|
||||
@ -782,7 +783,7 @@ type predicateCallCounter struct {
|
||||
}
|
||||
|
||||
func (c *predicateCallCounter) truePredicate() algorithmpredicates.FitPredicate {
|
||||
return func(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
return func(pod *v1.Pod, meta algorithmpredicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
c.count++
|
||||
return truePredicate(pod, meta, nodeInfo)
|
||||
}
|
||||
@ -823,9 +824,9 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
|
||||
cache,
|
||||
queue,
|
||||
predicates,
|
||||
algorithmpredicates.EmptyPredicateMetadataProducer,
|
||||
algorithmpredicates.EmptyMetadataProducer,
|
||||
nil,
|
||||
priorities.EmptyPriorityMetadataProducer,
|
||||
priorities.EmptyMetadataProducer,
|
||||
emptySnapshot,
|
||||
emptyFramework,
|
||||
nil, nil, nil, nil, false, false,
|
||||
@ -997,7 +998,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes)
|
||||
|
||||
metaDataProducer := priorities.NewPriorityMetadataFactory(
|
||||
metaDataProducer := priorities.NewMetadataFactory(
|
||||
informerFactory.Core().V1().Services().Lister(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
informerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
@ -1407,14 +1408,15 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
||||
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}})
|
||||
}
|
||||
|
||||
factory := &algorithmpredicates.MetadataProducerFactory{}
|
||||
filterPlugin.failedNodeReturnCodeMap = filterFailedNodeReturnCodeMap
|
||||
scheduler := NewGenericScheduler(
|
||||
nil,
|
||||
internalqueue.NewSchedulingQueue(nil, nil),
|
||||
test.predicates,
|
||||
algorithmpredicates.GetPredicateMetadata,
|
||||
factory.GetPredicateMetadata,
|
||||
nil,
|
||||
priorities.EmptyPriorityMetadataProducer,
|
||||
priorities.EmptyMetadataProducer,
|
||||
emptySnapshot,
|
||||
filterFramework,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
@ -1667,18 +1669,19 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := []*v1.Node{}
|
||||
var nodes []*v1.Node
|
||||
for _, n := range test.nodes {
|
||||
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
|
||||
}
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes)
|
||||
fwk, _ := framework.NewFramework(emptyPluginRegistry, nil, []schedulerapi.PluginConfig{}, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
factory := algorithmpredicates.MetadataProducerFactory{}
|
||||
g := &genericScheduler{
|
||||
framework: fwk,
|
||||
nodeInfoSnapshot: snapshot,
|
||||
predicates: test.predicates,
|
||||
predicateMetaProducer: algorithmpredicates.GetPredicateMetadata,
|
||||
predicateMetaProducer: factory.GetPredicateMetadata,
|
||||
}
|
||||
assignDefaultStartTime(test.pods)
|
||||
|
||||
@ -2150,9 +2153,10 @@ func TestPreempt(t *testing.T) {
|
||||
if test.predicate != nil {
|
||||
predicate = test.predicate
|
||||
}
|
||||
predMetaProducer := algorithmpredicates.EmptyPredicateMetadataProducer
|
||||
predMetaProducer := algorithmpredicates.EmptyMetadataProducer
|
||||
if test.buildPredMeta {
|
||||
predMetaProducer = algorithmpredicates.GetPredicateMetadata
|
||||
f := &algorithmpredicates.MetadataProducerFactory{}
|
||||
predMetaProducer = f.GetPredicateMetadata
|
||||
}
|
||||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
@ -2160,7 +2164,7 @@ func TestPreempt(t *testing.T) {
|
||||
map[string]algorithmpredicates.FitPredicate{"matches": predicate},
|
||||
predMetaProducer,
|
||||
[]priorities.PriorityConfig{{Map: numericMapPriority, Weight: 1}},
|
||||
priorities.EmptyPriorityMetadataProducer,
|
||||
priorities.EmptyMetadataProducer,
|
||||
emptySnapshot,
|
||||
emptyFramework,
|
||||
extenders,
|
||||
|
@ -177,7 +177,7 @@ type Configurator struct {
|
||||
pluginConfigProducerRegistry *plugins.ConfigProducerRegistry
|
||||
nodeInfoSnapshot *nodeinfosnapshot.Snapshot
|
||||
|
||||
factoryArgs *PluginFactoryArgs
|
||||
factoryArgs PluginFactoryArgs
|
||||
configProducerArgs *plugins.ConfigProducerArgs
|
||||
}
|
||||
|
||||
@ -265,7 +265,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) *Configurator {
|
||||
pluginConfigProducerRegistry: args.PluginConfigProducerRegistry,
|
||||
nodeInfoSnapshot: nodeinfosnapshot.NewEmptySnapshot(),
|
||||
}
|
||||
c.factoryArgs = &PluginFactoryArgs{
|
||||
c.factoryArgs = PluginFactoryArgs{
|
||||
NodeInfoLister: c.nodeInfoSnapshot.NodeInfos(),
|
||||
PodLister: c.nodeInfoSnapshot.Pods(),
|
||||
ServiceLister: c.serviceLister,
|
||||
@ -405,12 +405,12 @@ func (c *Configurator) CreateFromKeys(predicateKeys, priorityKeys sets.String, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
priorityMetaProducer, err := getPriorityMetadataProducer(*c.factoryArgs)
|
||||
priorityMetaProducer, err := getPriorityMetadataProducer(c.factoryArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
predicateMetaProducer, err := c.GetPredicateMetadataProducer()
|
||||
predicateMetaProducer, err := getPredicateMetadataProducer(c.factoryArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -510,7 +510,7 @@ func getBinderFunc(client clientset.Interface, extenders []algorithm.SchedulerEx
|
||||
// as framework plugins. Specifically, a priority will run as a framework plugin if a plugin config producer was
|
||||
// registered for that priority.
|
||||
func (c *Configurator) getPriorityConfigs(priorityKeys sets.String) ([]priorities.PriorityConfig, *schedulerapi.Plugins, []schedulerapi.PluginConfig, error) {
|
||||
allPriorityConfigs, err := getPriorityFunctionConfigs(priorityKeys, *c.factoryArgs)
|
||||
allPriorityConfigs, err := getPriorityFunctionConfigs(priorityKeys, c.factoryArgs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@ -537,19 +537,13 @@ func (c *Configurator) getPriorityConfigs(priorityKeys sets.String) ([]prioritie
|
||||
return priorityConfigs, &plugins, pluginConfig, nil
|
||||
}
|
||||
|
||||
// GetPredicateMetadataProducer returns a function to build Predicate Metadata.
|
||||
// It is used by the scheduler and other components, such as k8s.io/autoscaler/cluster-autoscaler.
|
||||
func (c *Configurator) GetPredicateMetadataProducer() (predicates.PredicateMetadataProducer, error) {
|
||||
return getPredicateMetadataProducer()
|
||||
}
|
||||
|
||||
// getPredicateConfigs returns predicates configuration: ones that will run as fitPredicates and ones that will run
|
||||
// as framework plugins. Specifically, a predicate will run as a framework plugin if a plugin config producer was
|
||||
// registered for that predicate.
|
||||
// Note that the framework executes plugins according to their order in the Plugins list, and so predicates run as plugins
|
||||
// are added to the Plugins list according to the order specified in predicates.Ordering().
|
||||
func (c *Configurator) getPredicateConfigs(predicateKeys sets.String) (map[string]predicates.FitPredicate, *schedulerapi.Plugins, []schedulerapi.PluginConfig, error) {
|
||||
allFitPredicates, err := getFitPredicateFunctions(predicateKeys, *c.factoryArgs)
|
||||
allFitPredicates, err := getFitPredicateFunctions(predicateKeys, c.factoryArgs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ func TestCreateFromConfigWithEmptyPredicatesOrPriorities(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func PredicateFunc(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
func PredicateFunc(pod *v1.Pod, meta predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
|
@ -352,7 +352,7 @@ func TestDefaultPodTopologySpreadScore(t *testing.T) {
|
||||
fakelisters.StatefulSetLister(test.sss),
|
||||
)
|
||||
|
||||
metaDataProducer := priorities.NewPriorityMetadataFactory(
|
||||
metaDataProducer := priorities.NewMetadataFactory(
|
||||
fakelisters.ServiceLister(test.services),
|
||||
fakelisters.ControllerLister(test.rcs),
|
||||
fakelisters.ReplicaSetLister(test.rss),
|
||||
@ -610,7 +610,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
fakelisters.ReplicaSetLister(test.rss),
|
||||
fakelisters.StatefulSetLister(test.sss),
|
||||
)
|
||||
metaDataProducer := priorities.NewPriorityMetadataFactory(
|
||||
metaDataProducer := priorities.NewMetadataFactory(
|
||||
fakelisters.ServiceLister(test.services),
|
||||
fakelisters.ControllerLister(test.rcs),
|
||||
fakelisters.ReplicaSetLister(test.rss),
|
||||
|
@ -193,7 +193,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
metaDataProducer := priorities.NewPriorityMetadataFactory(
|
||||
metaDataProducer := priorities.NewMetadataFactory(
|
||||
informerFactory.Core().V1().Services().Lister(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
informerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
|
@ -48,9 +48,9 @@ func (pl *InterPodAffinity) Name() string {
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
meta, ok := migration.PredicateMetadata(cycleState).(predicates.PredicateMetadata)
|
||||
meta, ok := migration.PredicateMetadata(cycleState).(predicates.Metadata)
|
||||
if !ok {
|
||||
return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.PredicateMetadata error", cycleState))
|
||||
return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.Metadata error", cycleState))
|
||||
}
|
||||
_, reasons, err := pl.predicate(pod, meta, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
|
@ -736,7 +736,8 @@ func TestSingleNode(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(test.pods, []*v1.Node{test.node})
|
||||
meta := predicates.GetPredicateMetadata(test.pod, snapshot)
|
||||
factory := &predicates.MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(test.pod, snapshot)
|
||||
state := framework.NewCycleState()
|
||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||
|
||||
@ -1437,7 +1438,8 @@ func TestMultipleNodes(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes)
|
||||
for indexNode, node := range test.nodes {
|
||||
meta := predicates.GetPredicateMetadata(test.pod, snapshot)
|
||||
factory := &predicates.MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(test.pod, snapshot)
|
||||
state := framework.NewCycleState()
|
||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||
|
||||
@ -1947,7 +1949,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
metaDataProducer := priorities.NewPriorityMetadataFactory(
|
||||
metaDataProducer := priorities.NewMetadataFactory(
|
||||
informerFactory.Core().V1().Services().Lister(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
informerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
@ -2062,7 +2064,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
metaDataProducer := priorities.NewPriorityMetadataFactory(
|
||||
metaDataProducer := priorities.NewMetadataFactory(
|
||||
informerFactory.Core().V1().Services().Lister(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
informerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
|
@ -57,18 +57,18 @@ func ErrorToFrameworkStatus(err error) *framework.Status {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PredicatesStateData is a pointer to PredicateMetadata. In the normal case, StateData is supposed to
|
||||
// PredicatesStateData is a pointer to Metadata. In the normal case, StateData is supposed to
|
||||
// be generated and stored in CycleState by a framework plugin (like a PreFilter pre-computing data for
|
||||
// its corresponding Filter). However, during migration, the scheduler will inject a pointer to
|
||||
// PredicateMetadata into CycleState. This "hack" is necessary because during migration Filters that implement
|
||||
// Metadata into CycleState. This "hack" is necessary because during migration Filters that implement
|
||||
// predicates functionality will be calling into the existing predicate functions, and need
|
||||
// to pass PredicateMetadata.
|
||||
// to pass Metadata.
|
||||
type PredicatesStateData struct {
|
||||
Reference interface{}
|
||||
}
|
||||
|
||||
// Clone is supposed to make a copy of the data, but since this is just a pointer, we are practically
|
||||
// just copying the pointer. This is ok because the actual reference to the PredicateMetadata
|
||||
// just copying the pointer. This is ok because the actual reference to the Metadata
|
||||
// copy that is made by generic_scheduler during preemption cycle will be injected again outside
|
||||
// the framework.
|
||||
func (p *PredicatesStateData) Clone() framework.StateData {
|
||||
|
@ -43,9 +43,9 @@ func (f *Fit) Name() string {
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
meta, ok := migration.PredicateMetadata(cycleState).(predicates.PredicateMetadata)
|
||||
meta, ok := migration.PredicateMetadata(cycleState).(predicates.Metadata)
|
||||
if !ok {
|
||||
return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.PredicateMetadata error", cycleState))
|
||||
return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.Metadata error", cycleState))
|
||||
}
|
||||
_, reasons, err := predicates.PodFitsResources(pod, meta, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
|
@ -343,7 +343,8 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
|
||||
for _, test := range enoughPodsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
meta := predicates.GetPredicateMetadata(test.pod, nil)
|
||||
factory := &predicates.MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(test.pod, nil)
|
||||
state := framework.NewCycleState()
|
||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||
|
||||
@ -396,7 +397,8 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
}
|
||||
for _, test := range notEnoughPodsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
meta := predicates.GetPredicateMetadata(test.pod, nil)
|
||||
factory := &predicates.MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(test.pod, nil)
|
||||
state := framework.NewCycleState()
|
||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||
|
||||
@ -447,7 +449,8 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
|
||||
for _, test := range storagePodsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
meta := predicates.GetPredicateMetadata(test.pod, nil)
|
||||
factory := &predicates.MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(test.pod, nil)
|
||||
state := framework.NewCycleState()
|
||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||
|
||||
|
@ -47,9 +47,9 @@ func (pl *PodTopologySpread) Name() string {
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
meta, ok := migration.PredicateMetadata(cycleState).(predicates.PredicateMetadata)
|
||||
meta, ok := migration.PredicateMetadata(cycleState).(predicates.Metadata)
|
||||
if !ok {
|
||||
return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.PredicateMetadata error", cycleState))
|
||||
return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.Metadata error", cycleState))
|
||||
}
|
||||
_, reasons, err := predicates.EvenPodsSpreadPredicate(pod, meta, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
|
@ -270,7 +270,8 @@ func TestPodTopologySpread_Filter_SingleConstraint(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
meta := predicates.GetPredicateMetadata(tt.pod, snapshot)
|
||||
factory := &predicates.MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(tt.pod, snapshot)
|
||||
state := framework.NewCycleState()
|
||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||
plugin, _ := New(nil, nil)
|
||||
@ -467,7 +468,8 @@ func TestPodTopologySpread_Filter_MultipleConstraints(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
meta := predicates.GetPredicateMetadata(tt.pod, snapshot)
|
||||
factory := &predicates.MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(tt.pod, snapshot)
|
||||
state := framework.NewCycleState()
|
||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||
plugin, _ := New(nil, nil)
|
||||
|
@ -85,9 +85,9 @@ func (pl *ServiceAffinity) Name() string {
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
meta, ok := migration.PredicateMetadata(cycleState).(predicates.PredicateMetadata)
|
||||
meta, ok := migration.PredicateMetadata(cycleState).(predicates.Metadata)
|
||||
if !ok {
|
||||
return framework.NewStatus(framework.Error, "looking up PredicateMetadata")
|
||||
return framework.NewStatus(framework.Error, "looking up Metadata")
|
||||
}
|
||||
_, reasons, err := pl.predicate(pod, meta, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
|
@ -172,7 +172,8 @@ func TestServiceAffinity(t *testing.T) {
|
||||
predicate: predicate,
|
||||
}
|
||||
|
||||
meta := predicates.GetPredicateMetadata(test.pod, snapshot)
|
||||
factory := &predicates.MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(test.pod, snapshot)
|
||||
state := framework.NewCycleState()
|
||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||
|
||||
@ -399,7 +400,7 @@ func TestServiceAffinityScore(t *testing.T) {
|
||||
priorityMapFunction: priorityMapFunction,
|
||||
priorityReduceFunction: priorityReduceFunction,
|
||||
}
|
||||
metaDataProducer := priorities.NewPriorityMetadataFactory(
|
||||
metaDataProducer := priorities.NewMetadataFactory(
|
||||
fakelisters.ServiceLister(test.services),
|
||||
fakelisters.ControllerLister(rcs),
|
||||
fakelisters.ReplicaSetLister(rss),
|
||||
|
@ -139,7 +139,7 @@ func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v
|
||||
return pod
|
||||
}
|
||||
|
||||
func PredicateOne(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
func PredicateOne(pod *v1.Pod, meta predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
@ -152,7 +152,7 @@ type mockScheduler struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (es mockScheduler) PredicateMetadataProducer() predicates.PredicateMetadataProducer {
|
||||
func (es mockScheduler) PredicateMetadataProducer() predicates.MetadataProducer {
|
||||
return nil
|
||||
|
||||
}
|
||||
@ -647,9 +647,9 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
|
||||
scache,
|
||||
internalqueue.NewSchedulingQueue(nil, nil),
|
||||
predicateMap,
|
||||
predicates.EmptyPredicateMetadataProducer,
|
||||
predicates.EmptyMetadataProducer,
|
||||
[]priorities.PriorityConfig{},
|
||||
priorities.EmptyPriorityMetadataProducer,
|
||||
priorities.EmptyMetadataProducer,
|
||||
emptySnapshot,
|
||||
emptyFramework,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
@ -698,9 +698,9 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc
|
||||
scache,
|
||||
internalqueue.NewSchedulingQueue(nil, nil),
|
||||
predicateMap,
|
||||
predicates.EmptyPredicateMetadataProducer,
|
||||
predicates.EmptyMetadataProducer,
|
||||
[]priorities.PriorityConfig{},
|
||||
priorities.EmptyPriorityMetadataProducer,
|
||||
priorities.EmptyMetadataProducer,
|
||||
emptySnapshot,
|
||||
emptyFramework,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
|
@ -54,11 +54,11 @@ type nodeStateManager struct {
|
||||
makeUnSchedulable nodeMutationFunc
|
||||
}
|
||||
|
||||
func PredicateOne(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
func PredicateOne(pod *v1.Pod, meta predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func PredicateTwo(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
func PredicateTwo(pod *v1.Pod, meta predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user